aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/Makefile2
-rw-r--r--drivers/acpi/Kconfig2
-rw-r--r--drivers/acpi/acpi_pad.c34
-rw-r--r--drivers/acpi/acpica/aclocal.h1
-rw-r--r--drivers/acpi/acpica/exutils.c2
-rw-r--r--drivers/acpi/acpica/rsutils.c2
-rw-r--r--drivers/acpi/apei/Kconfig2
-rw-r--r--drivers/acpi/apei/apei-base.c21
-rw-r--r--drivers/acpi/apei/einj.c4
-rw-r--r--drivers/acpi/apei/erst-dbg.c18
-rw-r--r--drivers/acpi/apei/erst.c29
-rw-r--r--drivers/acpi/apei/ghes.c2
-rw-r--r--drivers/acpi/apei/hest.c11
-rw-r--r--drivers/acpi/atomicio.c2
-rw-r--r--drivers/acpi/battery.c1
-rw-r--r--drivers/acpi/blacklist.c18
-rw-r--r--drivers/acpi/bus.c18
-rw-r--r--drivers/acpi/fan.c2
-rw-r--r--drivers/acpi/processor_core.c6
-rw-r--r--drivers/acpi/processor_driver.c2
-rw-r--r--drivers/acpi/processor_perflib.c4
-rw-r--r--drivers/acpi/sleep.c22
-rw-r--r--drivers/acpi/sysfs.c20
-rw-r--r--drivers/acpi/video_detect.c4
-rw-r--r--drivers/ata/ahci.c7
-rw-r--r--drivers/ata/ahci.h12
-rw-r--r--drivers/ata/ahci_platform.c6
-rw-r--r--drivers/ata/ata_piix.c4
-rw-r--r--drivers/ata/libahci.c18
-rw-r--r--drivers/ata/libata-core.c14
-rw-r--r--drivers/ata/libata-eh.c4
-rw-r--r--drivers/ata/libata-sff.c41
-rw-r--r--drivers/ata/pata_artop.c3
-rw-r--r--drivers/ata/pata_via.c2
-rw-r--r--drivers/ata/sata_mv.c2
-rw-r--r--drivers/atm/Makefile2
-rw-r--r--drivers/atm/horizon.c6
-rw-r--r--drivers/atm/idt77252.c6
-rw-r--r--drivers/atm/iphase.c6
-rw-r--r--drivers/atm/iphase.h2
-rw-r--r--drivers/atm/solos-pci.c8
-rw-r--r--drivers/base/power/main.c1
-rw-r--r--drivers/block/cciss.c13
-rw-r--r--drivers/block/loop.c2
-rw-r--r--drivers/block/mg_disk.c3
-rw-r--r--drivers/block/pktcdvd.c2
-rw-r--r--drivers/bluetooth/btmrvl_main.c4
-rw-r--r--drivers/bluetooth/btsdio.c8
-rw-r--r--drivers/bluetooth/btusb.c6
-rw-r--r--drivers/bluetooth/hci_ldisc.c2
-rw-r--r--drivers/char/agp/intel-agp.c2
-rw-r--r--drivers/char/agp/intel-agp.h2
-rw-r--r--drivers/char/ipmi/ipmi_si_intf.c17
-rw-r--r--drivers/char/mem.c3
-rw-r--r--drivers/char/virtio_console.c6
-rw-r--r--drivers/char/vt_ioctl.c16
-rw-r--r--drivers/cpuidle/governors/menu.c2
-rw-r--r--drivers/dca/dca-core.c85
-rw-r--r--drivers/dma/mv_xor.c2
-rw-r--r--drivers/dma/shdma.c3
-rw-r--r--drivers/edac/edac_mc.c3
-rw-r--r--drivers/edac/i7core_edac.c1
-rw-r--r--drivers/firewire/ohci.c1
-rw-r--r--drivers/gpio/sx150x.c26
-rw-r--r--drivers/gpu/drm/drm_buffer.c6
-rw-r--r--drivers/gpu/drm/drm_crtc_helper.c10
-rw-r--r--drivers/gpu/drm/drm_gem.c39
-rw-r--r--drivers/gpu/drm/drm_info.c2
-rw-r--r--drivers/gpu/drm/drm_pci.c4
-rw-r--r--drivers/gpu/drm/drm_platform.c5
-rw-r--r--drivers/gpu/drm/drm_sysfs.c2
-rw-r--r--drivers/gpu/drm/drm_vm.c28
-rw-r--r--drivers/gpu/drm/i810/i810_dma.c2
-rw-r--r--drivers/gpu/drm/i830/i830_dma.c2
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c6
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c1
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c89
-rw-r--r--drivers/gpu/drm/i915/i915_gem_evict.c44
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c22
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h8
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c36
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c10
-rw-r--r--drivers/gpu/drm/i915/intel_display.c93
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c21
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h1
-rw-r--r--drivers/gpu/drm/i915/intel_dvo.c3
-rw-r--r--drivers/gpu/drm/i915/intel_fb.c4
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c2
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c9
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c20
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c11
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c12
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.c1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_notifier.c1
-rw-r--r--drivers/gpu/drm/radeon/atombios.h2
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c5
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c27
-rw-r--r--drivers/gpu/drm/radeon/r100.c24
-rw-r--r--drivers/gpu/drm/radeon/r600.c5
-rw-r--r--drivers/gpu/drm/radeon/r600_blit_kms.c25
-rw-r--r--drivers/gpu/drm/radeon/r600_blit_shaders.h24
-rw-r--r--drivers/gpu/drm/radeon/r600_cs.c5
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c9
-rw-r--r--drivers/gpu/drm/radeon/radeon_combios.c47
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c15
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c14
-rw-r--r--drivers/gpu/drm/radeon/radeon_fb.c14
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h3
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c1
-rw-r--r--drivers/gpu/drm/ttm/ttm_page_alloc.c8
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c145
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h8
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fb.c5
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c3
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c17
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c34
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c4
-rw-r--r--drivers/gpu/vga/vgaarb.c2
-rw-r--r--drivers/hid/hid-core.c4
-rw-r--r--drivers/hid/hid-ids.h4
-rw-r--r--drivers/hid/hid-mosart.c1
-rw-r--r--drivers/hid/hid-topseed.c1
-rw-r--r--drivers/hid/usbhid/hid-core.c8
-rw-r--r--drivers/hid/usbhid/hid-quirks.c4
-rw-r--r--drivers/hid/usbhid/hiddev.c2
-rw-r--r--drivers/hid/usbhid/usbhid.h1
-rw-r--r--drivers/hwmon/Kconfig2
-rw-r--r--drivers/hwmon/adm1031.c43
-rw-r--r--drivers/hwmon/coretemp.c57
-rw-r--r--drivers/hwmon/emc1403.c1
-rw-r--r--drivers/hwmon/f71882fg.c32
-rw-r--r--drivers/hwmon/f75375s.c6
-rw-r--r--drivers/hwmon/hp_accel.c2
-rw-r--r--drivers/hwmon/lis3lv02d.c4
-rw-r--r--drivers/hwmon/lis3lv02d_i2c.c4
-rw-r--r--drivers/hwmon/lis3lv02d_spi.c4
-rw-r--r--drivers/hwmon/lm95241.c21
-rw-r--r--drivers/hwmon/pkgtemp.c23
-rw-r--r--drivers/hwmon/w83627ehf.c1
-rw-r--r--drivers/i2c/busses/i2c-davinci.c6
-rw-r--r--drivers/i2c/busses/i2c-octeon.c2
-rw-r--r--drivers/i2c/busses/i2c-omap.c2
-rw-r--r--drivers/i2c/busses/i2c-s3c2410.c4
-rw-r--r--drivers/ide/ide-probe.c12
-rw-r--r--[-rwxr-xr-x]drivers/idle/intel_idle.c20
-rw-r--r--drivers/infiniband/hw/cxgb3/cxio_hal.h2
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_cm.c6
-rw-r--r--drivers/infiniband/hw/mlx4/Kconfig1
-rw-r--r--drivers/infiniband/hw/nes/nes_cm.c22
-rw-r--r--drivers/infiniband/hw/nes/nes_hw.c14
-rw-r--r--drivers/infiniband/hw/nes/nes_hw.h1
-rw-r--r--drivers/infiniband/hw/nes/nes_nic.c4
-rw-r--r--drivers/infiniband/hw/nes/nes_verbs.c4
-rw-r--r--drivers/input/input.c11
-rw-r--r--drivers/input/mouse/bcm5974.c12
-rw-r--r--drivers/input/serio/i8042.c2
-rw-r--r--drivers/input/tablet/wacom_wac.c4
-rw-r--r--drivers/isdn/capi/kcapi.c19
-rw-r--r--drivers/isdn/gigaset/bas-gigaset.c400
-rw-r--r--drivers/isdn/gigaset/common.c26
-rw-r--r--drivers/isdn/gigaset/gigaset.h3
-rw-r--r--drivers/isdn/gigaset/i4l.c2
-rw-r--r--drivers/isdn/gigaset/isocdata.c8
-rw-r--r--drivers/isdn/hardware/eicon/debug.c2
-rw-r--r--drivers/isdn/hardware/eicon/debuglib.h2
-rw-r--r--drivers/isdn/i4l/isdn_tty.c11
-rw-r--r--drivers/isdn/mISDN/stack.c7
-rw-r--r--drivers/isdn/pcbit/edss1.c2
-rw-r--r--drivers/isdn/pcbit/edss1.h2
-rw-r--r--drivers/isdn/sc/interrupt.c18
-rw-r--r--drivers/leds/leds-ns2.c9
-rw-r--r--drivers/md/md.c6
-rw-r--r--drivers/mfd/max8925-core.c13
-rw-r--r--drivers/mfd/wm831x-irq.c9
-rw-r--r--drivers/misc/Kconfig2
-rw-r--r--drivers/misc/Makefile2
-rw-r--r--drivers/misc/vmw_balloon.c (renamed from drivers/misc/vmware_balloon.c)0
-rw-r--r--drivers/mmc/core/sdio.c5
-rw-r--r--drivers/mmc/host/at91_mci.c1
-rw-r--r--drivers/mmc/host/imxmmc.c3
-rw-r--r--drivers/mmc/host/omap_hsmmc.c3
-rw-r--r--drivers/mmc/host/s3cmci.c2
-rw-r--r--drivers/mmc/host/sdhci-s3c.c12
-rw-r--r--drivers/mmc/host/tmio_mmc.c7
-rw-r--r--drivers/mmc/host/tmio_mmc.h13
-rw-r--r--drivers/mtd/nand/bf5xx_nand.c9
-rw-r--r--drivers/mtd/nand/mxc_nand.c47
-rw-r--r--drivers/mtd/nand/omap2.c2
-rw-r--r--drivers/mtd/nand/pxa3xx_nand.c6
-rw-r--r--drivers/mtd/onenand/samsung.c16
-rw-r--r--drivers/net/3c515.c4
-rw-r--r--drivers/net/3c523.c4
-rw-r--r--drivers/net/3c527.c2
-rw-r--r--drivers/net/3c59x.c17
-rw-r--r--drivers/net/8139cp.c2
-rw-r--r--drivers/net/Kconfig30
-rw-r--r--drivers/net/Makefile1
-rw-r--r--drivers/net/amd8111e.c2
-rw-r--r--drivers/net/appletalk/ipddp.c10
-rw-r--r--drivers/net/appletalk/ltpc.c2
-rw-r--r--drivers/net/atarilance.c24
-rw-r--r--drivers/net/atl1c/atl1c_main.c2
-rw-r--r--drivers/net/atl1e/atl1e_main.c2
-rw-r--r--drivers/net/atlx/atl1.c17
-rw-r--r--drivers/net/atlx/atl2.c2
-rw-r--r--drivers/net/ax88796.c1
-rw-r--r--drivers/net/b44.c19
-rw-r--r--drivers/net/benet/be.h86
-rw-r--r--drivers/net/benet/be_cmds.c103
-rw-r--r--drivers/net/benet/be_cmds.h65
-rw-r--r--drivers/net/benet/be_ethtool.c174
-rw-r--r--drivers/net/benet/be_main.c584
-rw-r--r--drivers/net/bmac.c2
-rw-r--r--drivers/net/bna/bfa_ioc.c10
-rw-r--r--drivers/net/bna/bfa_ioc.h1
-rw-r--r--drivers/net/bna/bfa_ioc_ct.c2
-rw-r--r--drivers/net/bna/bfa_sm.h2
-rw-r--r--drivers/net/bna/bna.h108
-rw-r--r--drivers/net/bna/bna_ctrl.c565
-rw-r--r--drivers/net/bna/bna_hw.h1
-rw-r--r--drivers/net/bna/bna_txrx.c149
-rw-r--r--drivers/net/bna/bnad.c79
-rw-r--r--drivers/net/bna/bnad.h1
-rw-r--r--drivers/net/bna/cna_fwimg.c2
-rw-r--r--drivers/net/bnx2.c229
-rw-r--r--drivers/net/bnx2.h25
-rw-r--r--drivers/net/bnx2x/bnx2x.h701
-rw-r--r--drivers/net/bnx2x/bnx2x_cmn.c991
-rw-r--r--drivers/net/bnx2x/bnx2x_cmn.h581
-rw-r--r--drivers/net/bnx2x/bnx2x_dump.h35
-rw-r--r--drivers/net/bnx2x/bnx2x_ethtool.c201
-rw-r--r--drivers/net/bnx2x/bnx2x_fw_defs.h819
-rw-r--r--drivers/net/bnx2x/bnx2x_fw_file_hdr.h1
-rw-r--r--drivers/net/bnx2x/bnx2x_hsi.h1648
-rw-r--r--drivers/net/bnx2x/bnx2x_init.h44
-rw-r--r--drivers/net/bnx2x/bnx2x_init_ops.h366
-rw-r--r--drivers/net/bnx2x/bnx2x_link.c382
-rw-r--r--drivers/net/bnx2x/bnx2x_link.h6
-rw-r--r--drivers/net/bnx2x/bnx2x_main.c5315
-rw-r--r--drivers/net/bnx2x/bnx2x_reg.h885
-rw-r--r--drivers/net/bnx2x/bnx2x_stats.c298
-rw-r--r--drivers/net/bnx2x/bnx2x_stats.h8
-rw-r--r--drivers/net/bonding/bond_3ad.c279
-rw-r--r--drivers/net/bonding/bond_alb.c3
-rw-r--r--drivers/net/bonding/bond_main.c190
-rw-r--r--drivers/net/bonding/bond_sysfs.c52
-rw-r--r--drivers/net/bonding/bonding.h34
-rw-r--r--drivers/net/bsd_comp.c2
-rw-r--r--drivers/net/can/mcp251x.c103
-rw-r--r--drivers/net/cassini.c4
-rw-r--r--drivers/net/chelsio/sge.c4
-rw-r--r--drivers/net/chelsio/vsc7326.c2
-rw-r--r--drivers/net/cnic.c953
-rw-r--r--drivers/net/cnic.h118
-rw-r--r--drivers/net/cnic_defs.h456
-rw-r--r--drivers/net/cnic_if.h23
-rw-r--r--drivers/net/cxgb3/adapter.h3
-rw-r--r--drivers/net/cxgb3/common.h18
-rw-r--r--drivers/net/cxgb3/cxgb3_defs.h3
-rw-r--r--drivers/net/cxgb3/cxgb3_main.c7
-rw-r--r--drivers/net/cxgb3/cxgb3_offload.c11
-rw-r--r--drivers/net/cxgb3/mc5.c38
-rw-r--r--drivers/net/cxgb3/sge.c43
-rw-r--r--drivers/net/cxgb3/t3_hw.c197
-rw-r--r--drivers/net/cxgb4/cxgb4.h15
-rw-r--r--drivers/net/cxgb4/cxgb4_main.c75
-rw-r--r--drivers/net/cxgb4/cxgb4_uld.h6
-rw-r--r--drivers/net/cxgb4/l2t.c34
-rw-r--r--drivers/net/cxgb4/l2t.h3
-rw-r--r--drivers/net/cxgb4/t4_hw.c332
-rw-r--r--drivers/net/cxgb4vf/cxgb4vf_main.c5
-rw-r--r--drivers/net/cxgb4vf/t4vf_common.h26
-rw-r--r--drivers/net/de620.c2
-rw-r--r--drivers/net/defxx.c66
-rw-r--r--drivers/net/dnet.c18
-rw-r--r--drivers/net/dummy.c58
-rw-r--r--drivers/net/e100.c4
-rw-r--r--drivers/net/e1000/e1000.h3
-rw-r--r--drivers/net/e1000/e1000_main.c74
-rw-r--r--drivers/net/e1000e/82571.c6
-rw-r--r--drivers/net/e1000e/defines.h2
-rw-r--r--drivers/net/e1000e/e1000.h29
-rw-r--r--drivers/net/e1000e/es2lan.c1
-rw-r--r--drivers/net/e1000e/ethtool.c23
-rw-r--r--drivers/net/e1000e/hw.h1
-rw-r--r--drivers/net/e1000e/ich8lan.c199
-rw-r--r--drivers/net/e1000e/netdev.c142
-rw-r--r--drivers/net/e1000e/param.c2
-rw-r--r--drivers/net/ehea/ehea.h2
-rw-r--r--drivers/net/ehea/ehea_main.c43
-rw-r--r--drivers/net/enic/enic.h27
-rw-r--r--drivers/net/enic/enic_main.c398
-rw-r--r--drivers/net/enic/enic_res.c32
-rw-r--r--drivers/net/enic/enic_res.h2
-rw-r--r--drivers/net/enic/vnic_dev.c108
-rw-r--r--drivers/net/enic/vnic_dev.h19
-rw-r--r--drivers/net/enic/vnic_intr.c5
-rw-r--r--drivers/net/enic/vnic_rq.c2
-rw-r--r--drivers/net/enic/vnic_rq.h6
-rw-r--r--drivers/net/enic/vnic_rss.h5
-rw-r--r--drivers/net/enic/vnic_wq.c2
-rw-r--r--drivers/net/enic/vnic_wq.h4
-rw-r--r--drivers/net/eql.c2
-rw-r--r--drivers/net/eth16i.c16
-rw-r--r--drivers/net/fec.c3
-rw-r--r--drivers/net/forcedeth.c13
-rw-r--r--drivers/net/fsl_pq_mdio.c4
-rw-r--r--drivers/net/gianfar.c7
-rw-r--r--drivers/net/gianfar_ethtool.c4
-rw-r--r--drivers/net/hamradio/bpqether.c2
-rw-r--r--drivers/net/hamradio/hdlcdrv.c2
-rw-r--r--drivers/net/hp100.c6
-rw-r--r--drivers/net/ibm_newemac/core.c10
-rw-r--r--drivers/net/ibm_newemac/core.h6
-rw-r--r--drivers/net/ibmveth.c46
-rw-r--r--drivers/net/igb/e1000_82575.c18
-rw-r--r--drivers/net/igb/e1000_defines.h31
-rw-r--r--drivers/net/igb/e1000_hw.h2
-rw-r--r--drivers/net/igb/e1000_phy.c206
-rw-r--r--drivers/net/igb/e1000_phy.h2
-rw-r--r--drivers/net/igb/igb.h9
-rw-r--r--drivers/net/igb/igb_ethtool.c52
-rw-r--r--drivers/net/igb/igb_main.c147
-rw-r--r--drivers/net/igbvf/ethtool.c2
-rw-r--r--drivers/net/igbvf/netdev.c9
-rw-r--r--drivers/net/irda/donauboe.c4
-rw-r--r--drivers/net/irda/irda-usb.c10
-rw-r--r--drivers/net/irda/nsc-ircc.c2
-rw-r--r--drivers/net/irda/sir_dev.c2
-rw-r--r--drivers/net/irda/smsc-ircc2.c2
-rw-r--r--drivers/net/irda/stir4200.c2
-rw-r--r--drivers/net/irda/via-ircc.h2
-rw-r--r--drivers/net/irda/vlsi_ir.h2
-rw-r--r--drivers/net/ixgb/ixgb_ee.c32
-rw-r--r--drivers/net/ixgb/ixgb_ethtool.c2
-rw-r--r--drivers/net/ixgb/ixgb_hw.c14
-rw-r--r--drivers/net/ixgb/ixgb_main.c6
-rw-r--r--drivers/net/ixgbe/ixgbe.h9
-rw-r--r--drivers/net/ixgbe/ixgbe_82599.c234
-rw-r--r--drivers/net/ixgbe/ixgbe_common.c50
-rw-r--r--drivers/net/ixgbe/ixgbe_common.h1
-rw-r--r--drivers/net/ixgbe/ixgbe_dcb.c219
-rw-r--r--drivers/net/ixgbe/ixgbe_dcb.h18
-rw-r--r--drivers/net/ixgbe/ixgbe_dcb_82598.c67
-rw-r--r--drivers/net/ixgbe/ixgbe_dcb_82598.h15
-rw-r--r--drivers/net/ixgbe/ixgbe_dcb_82599.c69
-rw-r--r--drivers/net/ixgbe/ixgbe_dcb_82599.h18
-rw-r--r--drivers/net/ixgbe/ixgbe_ethtool.c47
-rw-r--r--drivers/net/ixgbe/ixgbe_fcoe.c6
-rw-r--r--drivers/net/ixgbe/ixgbe_fcoe.h1
-rw-r--r--drivers/net/ixgbe/ixgbe_main.c259
-rw-r--r--drivers/net/ixgbe/ixgbe_mbx.c21
-rw-r--r--drivers/net/ixgbe/ixgbe_mbx.h5
-rw-r--r--drivers/net/ixgbe/ixgbe_sriov.c19
-rw-r--r--drivers/net/ixgbe/ixgbe_sriov.h10
-rw-r--r--drivers/net/ixgbevf/ethtool.c153
-rw-r--r--drivers/net/ixgbevf/ixgbevf_main.c6
-rw-r--r--drivers/net/ixgbevf/mbx.c2
-rw-r--r--drivers/net/ixgbevf/mbx.h2
-rw-r--r--drivers/net/ixgbevf/vf.c2
-rw-r--r--drivers/net/jme.c67
-rw-r--r--drivers/net/jme.h3
-rw-r--r--drivers/net/ll_temac_main.c3
-rw-r--r--drivers/net/ll_temac_mdio.c1
-rw-r--r--drivers/net/loopback.c28
-rw-r--r--drivers/net/lp486e.c2
-rw-r--r--drivers/net/macvlan.c4
-rw-r--r--drivers/net/meth.c2
-rw-r--r--drivers/net/mlx4/en_netdev.c3
-rw-r--r--drivers/net/mlx4/en_selftest.c2
-rw-r--r--drivers/net/mlx4/en_tx.c4
-rw-r--r--drivers/net/mv643xx_eth.c3
-rw-r--r--drivers/net/myri10ge/myri10ge.c45
-rw-r--r--drivers/net/myri_sbus.c2
-rw-r--r--drivers/net/netconsole.c9
-rw-r--r--drivers/net/netxen/netxen_nic.h23
-rw-r--r--drivers/net/netxen/netxen_nic_hw.c43
-rw-r--r--drivers/net/netxen/netxen_nic_init.c17
-rw-r--r--drivers/net/netxen/netxen_nic_main.c49
-rw-r--r--drivers/net/niu.c127
-rw-r--r--drivers/net/ns83820.c2
-rw-r--r--drivers/net/pch_gbe/Makefile4
-rw-r--r--drivers/net/pch_gbe/pch_gbe.h659
-rw-r--r--drivers/net/pch_gbe/pch_gbe_api.c245
-rw-r--r--drivers/net/pch_gbe/pch_gbe_api.h36
-rw-r--r--drivers/net/pch_gbe/pch_gbe_ethtool.c585
-rw-r--r--drivers/net/pch_gbe/pch_gbe_main.c2477
-rw-r--r--drivers/net/pch_gbe/pch_gbe_param.c499
-rw-r--r--drivers/net/pch_gbe/pch_gbe_phy.c274
-rw-r--r--drivers/net/pch_gbe/pch_gbe_phy.h37
-rw-r--r--drivers/net/pcmcia/3c589_cs.c2
-rw-r--r--drivers/net/pcmcia/nmclan_cs.c2
-rw-r--r--drivers/net/pcmcia/pcnet_cs.c139
-rw-r--r--drivers/net/pcmcia/smc91c92_cs.c2
-rw-r--r--drivers/net/pcnet32.c4
-rw-r--r--drivers/net/phy/Kconfig1
-rw-r--r--drivers/net/phy/bcm63xx.c2
-rw-r--r--drivers/net/phy/broadcom.c2
-rw-r--r--drivers/net/phy/cicada.c2
-rw-r--r--drivers/net/phy/davicom.c2
-rw-r--r--drivers/net/phy/et1011c.c2
-rw-r--r--drivers/net/phy/icplus.c2
-rw-r--r--drivers/net/phy/lxt.c2
-rw-r--r--drivers/net/phy/marvell.c33
-rw-r--r--drivers/net/phy/mdio_bus.c4
-rw-r--r--drivers/net/phy/micrel.c2
-rw-r--r--drivers/net/phy/national.c2
-rw-r--r--drivers/net/phy/qsemi.c2
-rw-r--r--drivers/net/phy/realtek.c2
-rw-r--r--drivers/net/phy/smsc.c2
-rw-r--r--drivers/net/phy/ste10Xp.c2
-rw-r--r--drivers/net/phy/vitesse.c2
-rw-r--r--drivers/net/plip.c8
-rw-r--r--drivers/net/ppp_generic.c55
-rw-r--r--drivers/net/pppoe.c2
-rw-r--r--drivers/net/pppox.c4
-rw-r--r--drivers/net/pptp.c8
-rw-r--r--drivers/net/ps3_gelic_wireless.c6
-rw-r--r--drivers/net/pxa168_eth.c1
-rw-r--r--drivers/net/qlcnic/qlcnic.h80
-rw-r--r--drivers/net/qlcnic/qlcnic_ctx.c212
-rw-r--r--drivers/net/qlcnic/qlcnic_ethtool.c157
-rw-r--r--drivers/net/qlcnic/qlcnic_hdr.h24
-rw-r--r--drivers/net/qlcnic/qlcnic_hw.c58
-rw-r--r--drivers/net/qlcnic/qlcnic_init.c120
-rw-r--r--drivers/net/qlcnic/qlcnic_main.c222
-rw-r--r--drivers/net/qlge/qlge_main.c2
-rw-r--r--drivers/net/r6040.c3
-rw-r--r--drivers/net/r8169.c408
-rw-r--r--drivers/net/rionet.c2
-rw-r--r--drivers/net/s2io.c2
-rw-r--r--drivers/net/sfc/Makefile7
-rw-r--r--drivers/net/sfc/efx.c53
-rw-r--r--drivers/net/sfc/efx.h24
-rw-r--r--drivers/net/sfc/ethtool.c128
-rw-r--r--drivers/net/sfc/falcon.c122
-rw-r--r--drivers/net/sfc/falcon_boards.c203
-rw-r--r--drivers/net/sfc/falcon_gmac.c230
-rw-r--r--drivers/net/sfc/falcon_xmac.c2
-rw-r--r--drivers/net/sfc/filter.c454
-rw-r--r--drivers/net/sfc/filter.h189
-rw-r--r--drivers/net/sfc/mac.h2
-rw-r--r--drivers/net/sfc/mcdi.c4
-rw-r--r--drivers/net/sfc/mcdi.h2
-rw-r--r--drivers/net/sfc/mcdi_phy.c3
-rw-r--r--drivers/net/sfc/mdio_10g.c30
-rw-r--r--drivers/net/sfc/net_driver.h14
-rw-r--r--drivers/net/sfc/nic.c17
-rw-r--r--drivers/net/sfc/phy.h18
-rw-r--r--drivers/net/sfc/regs.h14
-rw-r--r--drivers/net/sfc/selftest.c10
-rw-r--r--drivers/net/sfc/siena.c4
-rw-r--r--drivers/net/sfc/tenxpress.c424
-rw-r--r--drivers/net/sfc/txc43128_phy.c560
-rw-r--r--drivers/net/sfc/workarounds.h9
-rw-r--r--drivers/net/sgiseeq.c2
-rw-r--r--drivers/net/sis900.c6
-rw-r--r--drivers/net/skfp/cfm.c10
-rw-r--r--drivers/net/skfp/drvfbi.c16
-rw-r--r--drivers/net/skfp/ess.c46
-rw-r--r--drivers/net/skfp/fplustm.c24
-rw-r--r--drivers/net/skfp/hwmtm.c30
-rw-r--r--drivers/net/skfp/hwt.c4
-rw-r--r--drivers/net/skfp/pcmplc.c22
-rw-r--r--drivers/net/skfp/pmf.c62
-rw-r--r--drivers/net/skfp/queue.c2
-rw-r--r--drivers/net/skfp/skfddi.c32
-rw-r--r--drivers/net/skfp/smt.c78
-rw-r--r--drivers/net/skfp/smtdef.c4
-rw-r--r--drivers/net/skfp/smtinit.c2
-rw-r--r--drivers/net/skfp/srf.c2
-rw-r--r--drivers/net/skge.c18
-rw-r--r--drivers/net/sky2.c5
-rw-r--r--drivers/net/slip.c2
-rw-r--r--drivers/net/smsc911x.c1
-rw-r--r--drivers/net/stmmac/Kconfig2
-rw-r--r--drivers/net/stmmac/common.h11
-rw-r--r--drivers/net/stmmac/dwmac100.h2
-rw-r--r--drivers/net/stmmac/dwmac1000.h4
-rw-r--r--drivers/net/stmmac/dwmac1000_core.c16
-rw-r--r--drivers/net/stmmac/dwmac1000_dma.c12
-rw-r--r--drivers/net/stmmac/dwmac100_core.c9
-rw-r--r--drivers/net/stmmac/dwmac100_dma.c12
-rw-r--r--drivers/net/stmmac/enh_desc.c4
-rw-r--r--drivers/net/stmmac/norm_desc.c19
-rw-r--r--drivers/net/stmmac/stmmac.h9
-rw-r--r--drivers/net/stmmac/stmmac_ethtool.c42
-rw-r--r--drivers/net/stmmac/stmmac_main.c93
-rw-r--r--drivers/net/stmmac/stmmac_mdio.c5
-rw-r--r--drivers/net/sun3lance.c4
-rw-r--r--drivers/net/sundance.c225
-rw-r--r--drivers/net/sungem_phy.c2
-rw-r--r--drivers/net/sunhme.c2
-rw-r--r--drivers/net/sunqe.c2
-rw-r--r--drivers/net/tc35815.c2
-rw-r--r--drivers/net/tg3.c732
-rw-r--r--drivers/net/tg3.h73
-rw-r--r--drivers/net/tlan.c2
-rw-r--r--drivers/net/tlan.h8
-rw-r--r--drivers/net/tokenring/proteon.c2
-rw-r--r--drivers/net/tokenring/smctr.c500
-rw-r--r--drivers/net/tokenring/tms380tr.c58
-rw-r--r--drivers/net/tokenring/tmspci.c10
-rw-r--r--drivers/net/tsi108_eth.c2
-rw-r--r--drivers/net/tulip/Kconfig4
-rw-r--r--drivers/net/tulip/de2104x.c46
-rw-r--r--drivers/net/tulip/de4x5.c39
-rw-r--r--drivers/net/tulip/uli526x.c2
-rw-r--r--drivers/net/typhoon.c2
-rw-r--r--drivers/net/usb/hso.c2
-rw-r--r--drivers/net/usb/sierra_net.c4
-rw-r--r--drivers/net/usb/smsc95xx.c4
-rw-r--r--drivers/net/veth.c2
-rw-r--r--drivers/net/via-velocity.c84
-rw-r--r--drivers/net/via-velocity.h16
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c19
-rw-r--r--drivers/net/vmxnet3/vmxnet3_int.h17
-rw-r--r--drivers/net/vxge/vxge-main.c4
-rw-r--r--drivers/net/wan/dlci.c42
-rw-r--r--drivers/net/wan/hdlc_cisco.c4
-rw-r--r--drivers/net/wan/lmc/lmc_main.c4
-rw-r--r--drivers/net/wan/n2.c4
-rw-r--r--drivers/net/wan/pc300_drv.c18
-rw-r--r--drivers/net/wan/pc300_tty.c2
-rw-r--r--drivers/net/wan/sdla.c108
-rw-r--r--drivers/net/wan/x25_asy.c2
-rw-r--r--drivers/net/wimax/i2400m/control.c18
-rw-r--r--drivers/net/wimax/i2400m/driver.c2
-rw-r--r--drivers/net/wimax/i2400m/i2400m-sdio.h1
-rw-r--r--drivers/net/wimax/i2400m/i2400m.h9
-rw-r--r--drivers/net/wimax/i2400m/rx.c28
-rw-r--r--drivers/net/wimax/i2400m/sdio-rx.c2
-rw-r--r--drivers/net/wireless/Kconfig1
-rw-r--r--drivers/net/wireless/Makefile2
-rw-r--r--drivers/net/wireless/airo.c19
-rw-r--r--drivers/net/wireless/at76c50x-usb.c3
-rw-r--r--drivers/net/wireless/ath/Kconfig1
-rw-r--r--drivers/net/wireless/ath/Makefile4
-rw-r--r--drivers/net/wireless/ath/ar9170/usb.c2
-rw-r--r--drivers/net/wireless/ath/ath.h55
-rw-r--r--drivers/net/wireless/ath/ath5k/ani.c41
-rw-r--r--drivers/net/wireless/ath/ath5k/ani.h5
-rw-r--r--drivers/net/wireless/ath/ath5k/ath5k.h24
-rw-r--r--drivers/net/wireless/ath/ath5k/attach.c15
-rw-r--r--drivers/net/wireless/ath/ath5k/base.c2301
-rw-r--r--drivers/net/wireless/ath/ath5k/base.h33
-rw-r--r--drivers/net/wireless/ath/ath5k/debug.c116
-rw-r--r--drivers/net/wireless/ath/ath5k/debug.h13
-rw-r--r--drivers/net/wireless/ath/ath5k/dma.c4
-rw-r--r--drivers/net/wireless/ath/ath5k/pcu.c277
-rw-r--r--drivers/net/wireless/ath/ath5k/phy.c7
-rw-r--r--drivers/net/wireless/ath/ath5k/qcu.c99
-rw-r--r--drivers/net/wireless/ath/ath5k/reg.h71
-rw-r--r--drivers/net/wireless/ath/ath5k/reset.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/Kconfig8
-rw-r--r--drivers/net/wireless/ath/ath9k/Makefile2
-rw-r--r--drivers/net/wireless/ath/ath9k/ani.c655
-rw-r--r--drivers/net/wireless/ath/ath9k/ani.h13
-rw-r--r--drivers/net/wireless/ath/ath9k/ar5008_phy.c50
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_calib.c89
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_hw.c55
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_phy.c36
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_phy.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_2p0_initvals.h1784
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_calib.c37
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_eeprom.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_hw.c164
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_mac.c3
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_phy.c20
-rw-r--r--drivers/net/wireless/ath/ath9k/ath9k.h77
-rw-r--r--drivers/net/wireless/ath/ath9k/beacon.c21
-rw-r--r--drivers/net/wireless/ath/ath9k/calib.c43
-rw-r--r--drivers/net/wireless/ath/ath9k/calib.h10
-rw-r--r--drivers/net/wireless/ath/ath9k/common.c270
-rw-r--r--drivers/net/wireless/ath/ath9k/common.h6
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.c170
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.h33
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom.h7
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom_4k.c28
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom_9287.c8
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom_def.c22
-rw-r--r--drivers/net/wireless/ath/ath9k/hif_usb.c32
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_beacon.c9
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_init.c32
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_main.c25
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_txrx.c6
-rw-r--r--drivers/net/wireless/ath/ath9k/hw-ops.h22
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c418
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.h115
-rw-r--r--drivers/net/wireless/ath/ath9k/init.c74
-rw-r--r--drivers/net/wireless/ath/ath9k/mac.c10
-rw-r--r--drivers/net/wireless/ath/ath9k/mac.h21
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c291
-rw-r--r--drivers/net/wireless/ath/ath9k/phy.h3
-rw-r--r--drivers/net/wireless/ath/ath9k/rc.c200
-rw-r--r--drivers/net/wireless/ath/ath9k/rc.h37
-rw-r--r--drivers/net/wireless/ath/ath9k/recv.c583
-rw-r--r--drivers/net/wireless/ath/ath9k/reg.h48
-rw-r--r--drivers/net/wireless/ath/ath9k/virtual.c63
-rw-r--r--drivers/net/wireless/ath/ath9k/wmi.c72
-rw-r--r--drivers/net/wireless/ath/ath9k/wmi.h6
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c166
-rw-r--r--drivers/net/wireless/ath/carl9170/Kconfig41
-rw-r--r--drivers/net/wireless/ath/carl9170/Makefile4
-rw-r--r--drivers/net/wireless/ath/carl9170/carl9170.h628
-rw-r--r--drivers/net/wireless/ath/carl9170/cmd.c188
-rw-r--r--drivers/net/wireless/ath/carl9170/cmd.h168
-rw-r--r--drivers/net/wireless/ath/carl9170/debug.c902
-rw-r--r--drivers/net/wireless/ath/carl9170/debug.h134
-rw-r--r--drivers/net/wireless/ath/carl9170/eeprom.h216
-rw-r--r--drivers/net/wireless/ath/carl9170/fw.c402
-rw-r--r--drivers/net/wireless/ath/carl9170/fwcmd.h284
-rw-r--r--drivers/net/wireless/ath/carl9170/fwdesc.h241
-rw-r--r--drivers/net/wireless/ath/carl9170/hw.h739
-rw-r--r--drivers/net/wireless/ath/carl9170/led.c190
-rw-r--r--drivers/net/wireless/ath/carl9170/mac.c604
-rw-r--r--drivers/net/wireless/ath/carl9170/main.c1891
-rw-r--r--drivers/net/wireless/ath/carl9170/phy.c1810
-rw-r--r--drivers/net/wireless/ath/carl9170/phy.h564
-rw-r--r--drivers/net/wireless/ath/carl9170/rx.c938
-rw-r--r--drivers/net/wireless/ath/carl9170/tx.c1335
-rw-r--r--drivers/net/wireless/ath/carl9170/usb.c1136
-rw-r--r--drivers/net/wireless/ath/carl9170/version.h7
-rw-r--r--drivers/net/wireless/ath/carl9170/wlan.h420
-rw-r--r--drivers/net/wireless/ath/debug.c29
-rw-r--r--drivers/net/wireless/ath/debug.h10
-rw-r--r--drivers/net/wireless/ath/hw.c59
-rw-r--r--drivers/net/wireless/ath/key.c568
-rw-r--r--drivers/net/wireless/ath/reg.h34
-rw-r--r--drivers/net/wireless/b43/Makefile2
-rw-r--r--drivers/net/wireless/b43/b43.h3
-rw-r--r--drivers/net/wireless/b43/phy_common.c6
-rw-r--r--drivers/net/wireless/b43/phy_common.h5
-rw-r--r--drivers/net/wireless/b43/phy_n.c147
-rw-r--r--drivers/net/wireless/b43/phy_n.h218
-rw-r--r--drivers/net/wireless/b43/radio_2055.c1332
-rw-r--r--drivers/net/wireless/b43/radio_2055.h254
-rw-r--r--drivers/net/wireless/b43/radio_2056.c43
-rw-r--r--drivers/net/wireless/b43/radio_2056.h42
-rw-r--r--drivers/net/wireless/b43/tables_nphy.c1311
-rw-r--r--drivers/net/wireless/b43/tables_nphy.h59
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2200.c8
-rw-r--r--drivers/net/wireless/iwlwifi/Makefile2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-1000.c110
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945.c117
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945.h11
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-4965.c75
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-5000.c208
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-6000.c655
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-calib.c42
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-calib.h (renamed from drivers/net/wireless/iwlwifi/iwl-calib.h)4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-debugfs.c515
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-debugfs.h7
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-eeprom.c454
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-hcmd.c32
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-ict.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-lib.c725
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-rs.c80
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-rs.h19
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-rx.c35
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-sta.c716
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-tt.c11
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-tx.c59
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-ucode.c65
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn.c227
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn.h84
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-commands.h69
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-core.c671
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-core.h191
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-csr.h3
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-debugfs.c107
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-dev.h74
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-eeprom.c393
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-eeprom.h8
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-helpers.h5
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-led.c6
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-power.c4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-rx.c1
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-scan.c390
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-sta.c728
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-sta.h35
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-tx.c38
-rw-r--r--drivers/net/wireless/iwlwifi/iwl3945-base.c214
-rw-r--r--drivers/net/wireless/iwmc3200wifi/cfg80211.c7
-rw-r--r--drivers/net/wireless/libertas/cfg.c10
-rw-r--r--drivers/net/wireless/libertas/if_usb.c4
-rw-r--r--drivers/net/wireless/libertas/mesh.c2
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c15
-rw-r--r--drivers/net/wireless/p54/eeprom.c4
-rw-r--r--drivers/net/wireless/p54/p54spi.c2
-rw-r--r--drivers/net/wireless/p54/p54usb.c13
-rw-r--r--drivers/net/wireless/ray_cs.c16
-rw-r--r--drivers/net/wireless/rndis_wlan.c12
-rw-r--r--drivers/net/wireless/rt2x00/rt2400pci.c116
-rw-r--r--drivers/net/wireless/rt2x00/rt2500pci.c117
-rw-r--r--drivers/net/wireless/rt2x00/rt2500usb.c41
-rw-r--r--drivers/net/wireless/rt2x00/rt2800.h73
-rw-r--r--drivers/net/wireless/rt2x00/rt2800lib.c334
-rw-r--r--drivers/net/wireless/rt2x00/rt2800lib.h4
-rw-r--r--drivers/net/wireless/rt2x00/rt2800pci.c193
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00.h39
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00config.c12
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00debug.c2
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00dev.c125
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00ht.c21
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00lib.h14
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00link.c24
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00mac.c6
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00pci.c2
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00queue.c56
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00usb.c54
-rw-r--r--drivers/net/wireless/rt2x00/rt61pci.c56
-rw-r--r--drivers/net/wireless/rt2x00/rt73usb.c55
-rw-r--r--drivers/net/wireless/rtl818x/rtl8180_dev.c128
-rw-r--r--drivers/net/wireless/wl1251/Kconfig33
-rw-r--r--drivers/net/wireless/wl1251/Makefile6
-rw-r--r--drivers/net/wireless/wl1251/acx.c (renamed from drivers/net/wireless/wl12xx/wl1251_acx.c)8
-rw-r--r--drivers/net/wireless/wl1251/acx.h (renamed from drivers/net/wireless/wl12xx/wl1251_acx.h)2
-rw-r--r--drivers/net/wireless/wl1251/boot.c (renamed from drivers/net/wireless/wl12xx/wl1251_boot.c)12
-rw-r--r--drivers/net/wireless/wl1251/boot.h (renamed from drivers/net/wireless/wl12xx/wl1251_boot.h)0
-rw-r--r--drivers/net/wireless/wl1251/cmd.c (renamed from drivers/net/wireless/wl12xx/wl1251_cmd.c)10
-rw-r--r--drivers/net/wireless/wl1251/cmd.h (renamed from drivers/net/wireless/wl12xx/wl1251_cmd.h)0
-rw-r--r--drivers/net/wireless/wl1251/debugfs.c (renamed from drivers/net/wireless/wl12xx/wl1251_debugfs.c)6
-rw-r--r--drivers/net/wireless/wl1251/debugfs.h (renamed from drivers/net/wireless/wl12xx/wl1251_debugfs.h)0
-rw-r--r--drivers/net/wireless/wl1251/event.c (renamed from drivers/net/wireless/wl12xx/wl1251_event.c)8
-rw-r--r--drivers/net/wireless/wl1251/event.h (renamed from drivers/net/wireless/wl12xx/wl1251_event.h)0
-rw-r--r--drivers/net/wireless/wl1251/init.c (renamed from drivers/net/wireless/wl12xx/wl1251_init.c)8
-rw-r--r--drivers/net/wireless/wl1251/init.h (renamed from drivers/net/wireless/wl12xx/wl1251_init.h)0
-rw-r--r--drivers/net/wireless/wl1251/io.c (renamed from drivers/net/wireless/wl12xx/wl1251_io.c)4
-rw-r--r--drivers/net/wireless/wl1251/io.h (renamed from drivers/net/wireless/wl12xx/wl1251_io.h)0
-rw-r--r--drivers/net/wireless/wl1251/main.c (renamed from drivers/net/wireless/wl12xx/wl1251_main.c)20
-rw-r--r--drivers/net/wireless/wl1251/ps.c (renamed from drivers/net/wireless/wl12xx/wl1251_ps.c)8
-rw-r--r--drivers/net/wireless/wl1251/ps.h (renamed from drivers/net/wireless/wl12xx/wl1251_ps.h)2
-rw-r--r--drivers/net/wireless/wl1251/reg.h (renamed from drivers/net/wireless/wl12xx/wl1251_reg.h)0
-rw-r--r--drivers/net/wireless/wl1251/rx.c (renamed from drivers/net/wireless/wl12xx/wl1251_rx.c)10
-rw-r--r--drivers/net/wireless/wl1251/rx.h (renamed from drivers/net/wireless/wl12xx/wl1251_rx.h)0
-rw-r--r--drivers/net/wireless/wl1251/sdio.c (renamed from drivers/net/wireless/wl12xx/wl1251_sdio.c)2
-rw-r--r--drivers/net/wireless/wl1251/spi.c (renamed from drivers/net/wireless/wl12xx/wl1251_spi.c)6
-rw-r--r--drivers/net/wireless/wl1251/spi.h (renamed from drivers/net/wireless/wl12xx/wl1251_spi.h)6
-rw-r--r--drivers/net/wireless/wl1251/tx.c (renamed from drivers/net/wireless/wl12xx/wl1251_tx.c)8
-rw-r--r--drivers/net/wireless/wl1251/tx.h (renamed from drivers/net/wireless/wl12xx/wl1251_tx.h)2
-rw-r--r--drivers/net/wireless/wl1251/wl1251.h (renamed from drivers/net/wireless/wl12xx/wl1251.h)0
-rw-r--r--drivers/net/wireless/wl1251/wl12xx_80211.h156
-rw-r--r--drivers/net/wireless/wl12xx/Kconfig39
-rw-r--r--drivers/net/wireless/wl12xx/Makefile12
-rw-r--r--drivers/net/wireless/wl12xx/wl1271.h32
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_acx.c34
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_acx.h31
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_boot.c67
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_boot.h1
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_cmd.c143
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_cmd.h73
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_conf.h78
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_event.c15
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_init.c39
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_io.h9
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_main.c401
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_ps.c20
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_ps.h2
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_rx.c67
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_scan.c79
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_scan.h6
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_sdio.c98
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_spi.c151
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_testmode.c14
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_tx.c105
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_tx.h17
-rw-r--r--drivers/net/wireless/wl12xx/wl12xx_platform_data.c28
-rw-r--r--drivers/net/xen-netfront.c6
-rw-r--r--drivers/oprofile/buffer_sync.c27
-rw-r--r--drivers/oprofile/cpu_buffer.c2
-rw-r--r--drivers/pci/intel-iommu.c117
-rw-r--r--drivers/pci/iov.c2
-rw-r--r--drivers/pci/pci.h5
-rw-r--r--drivers/pci/quirks.c20
-rw-r--r--drivers/pcmcia/pcmcia_resource.c57
-rw-r--r--drivers/pcmcia/pd6729.c2
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c5
-rw-r--r--drivers/power/apm_power.c1
-rw-r--r--drivers/power/intel_mid_battery.c6
-rw-r--r--drivers/regulator/88pm8607.c4
-rw-r--r--drivers/regulator/ab3100.c5
-rw-r--r--drivers/regulator/ab8500.c9
-rw-r--r--drivers/regulator/ad5398.c12
-rw-r--r--drivers/regulator/core.c6
-rw-r--r--drivers/regulator/isl6271a-regulator.c2
-rw-r--r--drivers/regulator/max1586.c12
-rw-r--r--drivers/regulator/max8649.c2
-rw-r--r--drivers/regulator/max8998.c8
-rw-r--r--drivers/regulator/tps6507x-regulator.c6
-rw-r--r--drivers/regulator/tps6586x-regulator.c4
-rw-r--r--drivers/regulator/wm831x-ldo.c7
-rw-r--r--drivers/regulator/wm8350-regulator.c2
-rw-r--r--drivers/rtc/rtc-ab3100.c2
-rw-r--r--drivers/rtc/rtc-bfin.c15
-rw-r--r--drivers/rtc/rtc-m41t80.c2
-rw-r--r--drivers/rtc/rtc-pl031.c2
-rw-r--r--drivers/rtc/rtc-s3c.c13
-rw-r--r--drivers/s390/char/tape_block.c3
-rw-r--r--drivers/s390/net/ctcm_main.c4
-rw-r--r--drivers/s390/net/ctcm_mpc.c2
-rw-r--r--drivers/s390/net/qeth_l2_main.c2
-rw-r--r--drivers/s390/net/qeth_l3_main.c33
-rw-r--r--drivers/scsi/be2iscsi/be_iscsi.c5
-rw-r--r--drivers/scsi/be2iscsi/be_mgmt.c2
-rw-r--r--drivers/scsi/bnx2i/57xx_iscsi_constants.h2
-rw-r--r--drivers/scsi/bnx2i/bnx2i.h2
-rw-r--r--drivers/scsi/bnx2i/bnx2i_hwi.c3
-rw-r--r--drivers/scsi/constants.c6
-rw-r--r--drivers/scsi/hpsa.c6
-rw-r--r--drivers/scsi/osd/osd_initiator.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c23
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.h2
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h20
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c94
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c7
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c7
-rw-r--r--drivers/scsi/qla2xxx/qla_mid.c68
-rw-r--r--drivers/scsi/qla2xxx/qla_nx.c36
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c30
-rw-r--r--drivers/scsi/qla2xxx/qla_version.h4
-rw-r--r--drivers/scsi/scsi_lib.c2
-rw-r--r--drivers/scsi/sd.c8
-rw-r--r--drivers/scsi/sym53c8xx_2/sym_hipd.c10
-rw-r--r--drivers/serial/amba-pl010.c9
-rw-r--r--drivers/serial/mfd.c18
-rw-r--r--drivers/serial/mpc52xx_uart.c1
-rw-r--r--drivers/serial/mrst_max3110.c1
-rw-r--r--drivers/serial/serial_cs.c62
-rw-r--r--drivers/spi/amba-pl022.c16
-rw-r--r--drivers/spi/dw_spi.c24
-rw-r--r--drivers/spi/spi.c14
-rw-r--r--drivers/spi/spi_gpio.c2
-rw-r--r--drivers/spi/spi_mpc8xxx.c10
-rw-r--r--drivers/spi/spi_s3c64xx.c37
-rw-r--r--drivers/staging/batman-adv/hard-interface.c13
-rw-r--r--drivers/staging/batman-adv/send.c8
-rw-r--r--drivers/staging/ti-st/st.h1
-rw-r--r--drivers/staging/ti-st/st_core.c9
-rw-r--r--drivers/staging/ti-st/st_core.h2
-rw-r--r--drivers/staging/ti-st/st_kim.c22
-rw-r--r--drivers/staging/vt6655/wpactl.c11
-rw-r--r--drivers/usb/core/Kconfig6
-rw-r--r--drivers/usb/core/file.c35
-rw-r--r--drivers/usb/core/message.c1
-rw-r--r--drivers/usb/host/ehci-pci.c5
-rw-r--r--drivers/usb/musb/cppi_dma.c1
-rw-r--r--drivers/usb/musb/musb_debugfs.c5
-rw-r--r--drivers/usb/musb/musb_gadget.c75
-rw-r--r--drivers/usb/musb/musb_gadget.h2
-rw-r--r--drivers/usb/musb/musb_gadget_ep0.c9
-rw-r--r--drivers/usb/musb/musb_host.c6
-rw-r--r--drivers/usb/otg/twl4030-usb.c78
-rw-r--r--drivers/usb/serial/mos7720.c3
-rw-r--r--drivers/usb/serial/mos7840.c3
-rw-r--r--drivers/vhost/net.c2
-rw-r--r--drivers/vhost/vhost.c56
-rw-r--r--drivers/vhost/vhost.h18
-rw-r--r--drivers/video/console/fbcon.c5
-rw-r--r--drivers/video/efifb.c103
-rw-r--r--drivers/video/pxa168fb.c10
-rw-r--r--drivers/video/sis/sis_main.c3
-rw-r--r--drivers/video/via/ioctl.c2
-rw-r--r--drivers/watchdog/Kconfig6
-rw-r--r--drivers/watchdog/sb_wdog.c12
-rw-r--r--drivers/watchdog/ts72xx_wdt.c3
-rw-r--r--drivers/xen/xenbus/xenbus_probe.c9
870 files changed, 48908 insertions, 24593 deletions
diff --git a/drivers/Makefile b/drivers/Makefile
index ae473445ad6d..a2aea53a75ed 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -50,7 +50,7 @@ obj-$(CONFIG_SPI) += spi/
50obj-y += net/ 50obj-y += net/
51obj-$(CONFIG_ATM) += atm/ 51obj-$(CONFIG_ATM) += atm/
52obj-$(CONFIG_FUSION) += message/ 52obj-$(CONFIG_FUSION) += message/
53obj-$(CONFIG_FIREWIRE) += firewire/ 53obj-y += firewire/
54obj-y += ieee1394/ 54obj-y += ieee1394/
55obj-$(CONFIG_UIO) += uio/ 55obj-$(CONFIG_UIO) += uio/
56obj-y += cdrom/ 56obj-y += cdrom/
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index b811f2173f6f..88681aca88c5 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -105,7 +105,7 @@ config ACPI_EC_DEBUGFS
105 105
106 Be aware that using this interface can confuse your Embedded 106 Be aware that using this interface can confuse your Embedded
107 Controller in a way that a normal reboot is not enough. You then 107 Controller in a way that a normal reboot is not enough. You then
108 have to power of your system, and remove the laptop battery for 108 have to power off your system, and remove the laptop battery for
109 some seconds. 109 some seconds.
110 An Embedded Controller typically is available on laptops and reads 110 An Embedded Controller typically is available on laptops and reads
111 sensor values like battery state and temperature. 111 sensor values like battery state and temperature.
diff --git a/drivers/acpi/acpi_pad.c b/drivers/acpi/acpi_pad.c
index b76848c80be3..6b115f6c4313 100644
--- a/drivers/acpi/acpi_pad.c
+++ b/drivers/acpi/acpi_pad.c
@@ -382,31 +382,32 @@ static void acpi_pad_remove_sysfs(struct acpi_device *device)
382 device_remove_file(&device->dev, &dev_attr_rrtime); 382 device_remove_file(&device->dev, &dev_attr_rrtime);
383} 383}
384 384
385/* Query firmware how many CPUs should be idle */ 385/*
386static int acpi_pad_pur(acpi_handle handle, int *num_cpus) 386 * Query firmware how many CPUs should be idle
387 * return -1 on failure
388 */
389static int acpi_pad_pur(acpi_handle handle)
387{ 390{
388 struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL}; 391 struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
389 union acpi_object *package; 392 union acpi_object *package;
390 int rev, num, ret = -EINVAL; 393 int num = -1;
391 394
392 if (ACPI_FAILURE(acpi_evaluate_object(handle, "_PUR", NULL, &buffer))) 395 if (ACPI_FAILURE(acpi_evaluate_object(handle, "_PUR", NULL, &buffer)))
393 return -EINVAL; 396 return num;
394 397
395 if (!buffer.length || !buffer.pointer) 398 if (!buffer.length || !buffer.pointer)
396 return -EINVAL; 399 return num;
397 400
398 package = buffer.pointer; 401 package = buffer.pointer;
399 if (package->type != ACPI_TYPE_PACKAGE || package->package.count != 2) 402
400 goto out; 403 if (package->type == ACPI_TYPE_PACKAGE &&
401 rev = package->package.elements[0].integer.value; 404 package->package.count == 2 &&
402 num = package->package.elements[1].integer.value; 405 package->package.elements[0].integer.value == 1) /* rev 1 */
403 if (rev != 1 || num < 0) 406
404 goto out; 407 num = package->package.elements[1].integer.value;
405 *num_cpus = num; 408
406 ret = 0;
407out:
408 kfree(buffer.pointer); 409 kfree(buffer.pointer);
409 return ret; 410 return num;
410} 411}
411 412
412/* Notify firmware how many CPUs are idle */ 413/* Notify firmware how many CPUs are idle */
@@ -433,7 +434,8 @@ static void acpi_pad_handle_notify(acpi_handle handle)
433 uint32_t idle_cpus; 434 uint32_t idle_cpus;
434 435
435 mutex_lock(&isolated_cpus_lock); 436 mutex_lock(&isolated_cpus_lock);
436 if (acpi_pad_pur(handle, &num_cpus)) { 437 num_cpus = acpi_pad_pur(handle);
438 if (num_cpus < 0) {
437 mutex_unlock(&isolated_cpus_lock); 439 mutex_unlock(&isolated_cpus_lock);
438 return; 440 return;
439 } 441 }
diff --git a/drivers/acpi/acpica/aclocal.h b/drivers/acpi/acpica/aclocal.h
index df85b53a674f..7dad9160f209 100644
--- a/drivers/acpi/acpica/aclocal.h
+++ b/drivers/acpi/acpica/aclocal.h
@@ -854,6 +854,7 @@ struct acpi_bit_register_info {
854 ACPI_BITMASK_POWER_BUTTON_STATUS | \ 854 ACPI_BITMASK_POWER_BUTTON_STATUS | \
855 ACPI_BITMASK_SLEEP_BUTTON_STATUS | \ 855 ACPI_BITMASK_SLEEP_BUTTON_STATUS | \
856 ACPI_BITMASK_RT_CLOCK_STATUS | \ 856 ACPI_BITMASK_RT_CLOCK_STATUS | \
857 ACPI_BITMASK_PCIEXP_WAKE_DISABLE | \
857 ACPI_BITMASK_WAKE_STATUS) 858 ACPI_BITMASK_WAKE_STATUS)
858 859
859#define ACPI_BITMASK_TIMER_ENABLE 0x0001 860#define ACPI_BITMASK_TIMER_ENABLE 0x0001
diff --git a/drivers/acpi/acpica/exutils.c b/drivers/acpi/acpica/exutils.c
index 74c24d517f81..4093522eed45 100644
--- a/drivers/acpi/acpica/exutils.c
+++ b/drivers/acpi/acpica/exutils.c
@@ -109,7 +109,7 @@ void acpi_ex_enter_interpreter(void)
109 * 109 *
110 * DESCRIPTION: Reacquire the interpreter execution region from within the 110 * DESCRIPTION: Reacquire the interpreter execution region from within the
111 * interpreter code. Failure to enter the interpreter region is a 111 * interpreter code. Failure to enter the interpreter region is a
112 * fatal system error. Used in conjuction with 112 * fatal system error. Used in conjunction with
113 * relinquish_interpreter 113 * relinquish_interpreter
114 * 114 *
115 ******************************************************************************/ 115 ******************************************************************************/
diff --git a/drivers/acpi/acpica/rsutils.c b/drivers/acpi/acpica/rsutils.c
index 22cfcfbd9fff..491191e6cf69 100644
--- a/drivers/acpi/acpica/rsutils.c
+++ b/drivers/acpi/acpica/rsutils.c
@@ -149,7 +149,7 @@ acpi_rs_move_data(void *destination, void *source, u16 item_count, u8 move_type)
149 149
150 /* 150 /*
151 * 16-, 32-, and 64-bit cases must use the move macros that perform 151 * 16-, 32-, and 64-bit cases must use the move macros that perform
152 * endian conversion and/or accomodate hardware that cannot perform 152 * endian conversion and/or accommodate hardware that cannot perform
153 * misaligned memory transfers 153 * misaligned memory transfers
154 */ 154 */
155 case ACPI_RSC_MOVE16: 155 case ACPI_RSC_MOVE16:
diff --git a/drivers/acpi/apei/Kconfig b/drivers/acpi/apei/Kconfig
index 907e350f1c7d..fca34ccfd294 100644
--- a/drivers/acpi/apei/Kconfig
+++ b/drivers/acpi/apei/Kconfig
@@ -34,6 +34,6 @@ config ACPI_APEI_ERST_DEBUG
34 depends on ACPI_APEI 34 depends on ACPI_APEI
35 help 35 help
36 ERST is a way provided by APEI to save and retrieve hardware 36 ERST is a way provided by APEI to save and retrieve hardware
37 error infomation to and from a persistent store. Enable this 37 error information to and from a persistent store. Enable this
38 if you want to debugging and testing the ERST kernel support 38 if you want to debugging and testing the ERST kernel support
39 and firmware implementation. 39 and firmware implementation.
diff --git a/drivers/acpi/apei/apei-base.c b/drivers/acpi/apei/apei-base.c
index 73fd0c7487c1..4a904a4bf05f 100644
--- a/drivers/acpi/apei/apei-base.c
+++ b/drivers/acpi/apei/apei-base.c
@@ -445,11 +445,15 @@ EXPORT_SYMBOL_GPL(apei_resources_sub);
445int apei_resources_request(struct apei_resources *resources, 445int apei_resources_request(struct apei_resources *resources,
446 const char *desc) 446 const char *desc)
447{ 447{
448 struct apei_res *res, *res_bak; 448 struct apei_res *res, *res_bak = NULL;
449 struct resource *r; 449 struct resource *r;
450 int rc;
450 451
451 apei_resources_sub(resources, &apei_resources_all); 452 rc = apei_resources_sub(resources, &apei_resources_all);
453 if (rc)
454 return rc;
452 455
456 rc = -EINVAL;
453 list_for_each_entry(res, &resources->iomem, list) { 457 list_for_each_entry(res, &resources->iomem, list) {
454 r = request_mem_region(res->start, res->end - res->start, 458 r = request_mem_region(res->start, res->end - res->start,
455 desc); 459 desc);
@@ -475,7 +479,11 @@ int apei_resources_request(struct apei_resources *resources,
475 } 479 }
476 } 480 }
477 481
478 apei_resources_merge(&apei_resources_all, resources); 482 rc = apei_resources_merge(&apei_resources_all, resources);
483 if (rc) {
484 pr_err(APEI_PFX "Fail to merge resources!\n");
485 goto err_unmap_ioport;
486 }
479 487
480 return 0; 488 return 0;
481err_unmap_ioport: 489err_unmap_ioport:
@@ -491,12 +499,13 @@ err_unmap_iomem:
491 break; 499 break;
492 release_mem_region(res->start, res->end - res->start); 500 release_mem_region(res->start, res->end - res->start);
493 } 501 }
494 return -EINVAL; 502 return rc;
495} 503}
496EXPORT_SYMBOL_GPL(apei_resources_request); 504EXPORT_SYMBOL_GPL(apei_resources_request);
497 505
498void apei_resources_release(struct apei_resources *resources) 506void apei_resources_release(struct apei_resources *resources)
499{ 507{
508 int rc;
500 struct apei_res *res; 509 struct apei_res *res;
501 510
502 list_for_each_entry(res, &resources->iomem, list) 511 list_for_each_entry(res, &resources->iomem, list)
@@ -504,7 +513,9 @@ void apei_resources_release(struct apei_resources *resources)
504 list_for_each_entry(res, &resources->ioport, list) 513 list_for_each_entry(res, &resources->ioport, list)
505 release_region(res->start, res->end - res->start); 514 release_region(res->start, res->end - res->start);
506 515
507 apei_resources_sub(&apei_resources_all, resources); 516 rc = apei_resources_sub(&apei_resources_all, resources);
517 if (rc)
518 pr_err(APEI_PFX "Fail to sub resources!\n");
508} 519}
509EXPORT_SYMBOL_GPL(apei_resources_release); 520EXPORT_SYMBOL_GPL(apei_resources_release);
510 521
diff --git a/drivers/acpi/apei/einj.c b/drivers/acpi/apei/einj.c
index 465c885938ee..cf29df69380b 100644
--- a/drivers/acpi/apei/einj.c
+++ b/drivers/acpi/apei/einj.c
@@ -426,7 +426,9 @@ DEFINE_SIMPLE_ATTRIBUTE(error_inject_fops, NULL,
426 426
427static int einj_check_table(struct acpi_table_einj *einj_tab) 427static int einj_check_table(struct acpi_table_einj *einj_tab)
428{ 428{
429 if (einj_tab->header_length != sizeof(struct acpi_table_einj)) 429 if ((einj_tab->header_length !=
430 (sizeof(struct acpi_table_einj) - sizeof(einj_tab->header)))
431 && (einj_tab->header_length != sizeof(struct acpi_table_einj)))
430 return -EINVAL; 432 return -EINVAL;
431 if (einj_tab->header.length < sizeof(struct acpi_table_einj)) 433 if (einj_tab->header.length < sizeof(struct acpi_table_einj))
432 return -EINVAL; 434 return -EINVAL;
diff --git a/drivers/acpi/apei/erst-dbg.c b/drivers/acpi/apei/erst-dbg.c
index 5281ddda2777..da1228a9a544 100644
--- a/drivers/acpi/apei/erst-dbg.c
+++ b/drivers/acpi/apei/erst-dbg.c
@@ -2,7 +2,7 @@
2 * APEI Error Record Serialization Table debug support 2 * APEI Error Record Serialization Table debug support
3 * 3 *
4 * ERST is a way provided by APEI to save and retrieve hardware error 4 * ERST is a way provided by APEI to save and retrieve hardware error
5 * infomation to and from a persistent store. This file provide the 5 * information to and from a persistent store. This file provide the
6 * debugging/testing support for ERST kernel support and firmware 6 * debugging/testing support for ERST kernel support and firmware
7 * implementation. 7 * implementation.
8 * 8 *
@@ -111,11 +111,13 @@ retry:
111 goto out; 111 goto out;
112 } 112 }
113 if (len > erst_dbg_buf_len) { 113 if (len > erst_dbg_buf_len) {
114 kfree(erst_dbg_buf); 114 void *p;
115 rc = -ENOMEM; 115 rc = -ENOMEM;
116 erst_dbg_buf = kmalloc(len, GFP_KERNEL); 116 p = kmalloc(len, GFP_KERNEL);
117 if (!erst_dbg_buf) 117 if (!p)
118 goto out; 118 goto out;
119 kfree(erst_dbg_buf);
120 erst_dbg_buf = p;
119 erst_dbg_buf_len = len; 121 erst_dbg_buf_len = len;
120 goto retry; 122 goto retry;
121 } 123 }
@@ -150,11 +152,13 @@ static ssize_t erst_dbg_write(struct file *filp, const char __user *ubuf,
150 if (mutex_lock_interruptible(&erst_dbg_mutex)) 152 if (mutex_lock_interruptible(&erst_dbg_mutex))
151 return -EINTR; 153 return -EINTR;
152 if (usize > erst_dbg_buf_len) { 154 if (usize > erst_dbg_buf_len) {
153 kfree(erst_dbg_buf); 155 void *p;
154 rc = -ENOMEM; 156 rc = -ENOMEM;
155 erst_dbg_buf = kmalloc(usize, GFP_KERNEL); 157 p = kmalloc(usize, GFP_KERNEL);
156 if (!erst_dbg_buf) 158 if (!p)
157 goto out; 159 goto out;
160 kfree(erst_dbg_buf);
161 erst_dbg_buf = p;
158 erst_dbg_buf_len = usize; 162 erst_dbg_buf_len = usize;
159 } 163 }
160 rc = copy_from_user(erst_dbg_buf, ubuf, usize); 164 rc = copy_from_user(erst_dbg_buf, ubuf, usize);
diff --git a/drivers/acpi/apei/erst.c b/drivers/acpi/apei/erst.c
index 18645f4e83cd..1211c03149e8 100644
--- a/drivers/acpi/apei/erst.c
+++ b/drivers/acpi/apei/erst.c
@@ -2,7 +2,7 @@
2 * APEI Error Record Serialization Table support 2 * APEI Error Record Serialization Table support
3 * 3 *
4 * ERST is a way provided by APEI to save and retrieve hardware error 4 * ERST is a way provided by APEI to save and retrieve hardware error
5 * infomation to and from a persistent store. 5 * information to and from a persistent store.
6 * 6 *
7 * For more information about ERST, please refer to ACPI Specification 7 * For more information about ERST, please refer to ACPI Specification
8 * version 4.0, section 17.4. 8 * version 4.0, section 17.4.
@@ -266,13 +266,30 @@ static int erst_exec_move_data(struct apei_exec_context *ctx,
266{ 266{
267 int rc; 267 int rc;
268 u64 offset; 268 u64 offset;
269 void *src, *dst;
270
271 /* ioremap does not work in interrupt context */
272 if (in_interrupt()) {
273 pr_warning(ERST_PFX
274 "MOVE_DATA can not be used in interrupt context");
275 return -EBUSY;
276 }
269 277
270 rc = __apei_exec_read_register(entry, &offset); 278 rc = __apei_exec_read_register(entry, &offset);
271 if (rc) 279 if (rc)
272 return rc; 280 return rc;
273 memmove((void *)ctx->dst_base + offset, 281
274 (void *)ctx->src_base + offset, 282 src = ioremap(ctx->src_base + offset, ctx->var2);
275 ctx->var2); 283 if (!src)
284 return -ENOMEM;
285 dst = ioremap(ctx->dst_base + offset, ctx->var2);
286 if (!dst)
287 return -ENOMEM;
288
289 memmove(dst, src, ctx->var2);
290
291 iounmap(src);
292 iounmap(dst);
276 293
277 return 0; 294 return 0;
278} 295}
@@ -750,7 +767,9 @@ __setup("erst_disable", setup_erst_disable);
750 767
751static int erst_check_table(struct acpi_table_erst *erst_tab) 768static int erst_check_table(struct acpi_table_erst *erst_tab)
752{ 769{
753 if (erst_tab->header_length != sizeof(struct acpi_table_erst)) 770 if ((erst_tab->header_length !=
771 (sizeof(struct acpi_table_erst) - sizeof(erst_tab->header)))
772 && (erst_tab->header_length != sizeof(struct acpi_table_einj)))
754 return -EINVAL; 773 return -EINVAL;
755 if (erst_tab->header.length < sizeof(struct acpi_table_erst)) 774 if (erst_tab->header.length < sizeof(struct acpi_table_erst))
756 return -EINVAL; 775 return -EINVAL;
diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c
index 385a6059714a..0d505e59214d 100644
--- a/drivers/acpi/apei/ghes.c
+++ b/drivers/acpi/apei/ghes.c
@@ -302,7 +302,7 @@ static int __devinit ghes_probe(struct platform_device *ghes_dev)
302 struct ghes *ghes = NULL; 302 struct ghes *ghes = NULL;
303 int rc = -EINVAL; 303 int rc = -EINVAL;
304 304
305 generic = ghes_dev->dev.platform_data; 305 generic = *(struct acpi_hest_generic **)ghes_dev->dev.platform_data;
306 if (!generic->enabled) 306 if (!generic->enabled)
307 return -ENODEV; 307 return -ENODEV;
308 308
diff --git a/drivers/acpi/apei/hest.c b/drivers/acpi/apei/hest.c
index 343168d18266..1a3508a7fe03 100644
--- a/drivers/acpi/apei/hest.c
+++ b/drivers/acpi/apei/hest.c
@@ -137,20 +137,23 @@ static int hest_parse_ghes_count(struct acpi_hest_header *hest_hdr, void *data)
137 137
138static int hest_parse_ghes(struct acpi_hest_header *hest_hdr, void *data) 138static int hest_parse_ghes(struct acpi_hest_header *hest_hdr, void *data)
139{ 139{
140 struct acpi_hest_generic *generic;
141 struct platform_device *ghes_dev; 140 struct platform_device *ghes_dev;
142 struct ghes_arr *ghes_arr = data; 141 struct ghes_arr *ghes_arr = data;
143 int rc; 142 int rc;
144 143
145 if (hest_hdr->type != ACPI_HEST_TYPE_GENERIC_ERROR) 144 if (hest_hdr->type != ACPI_HEST_TYPE_GENERIC_ERROR)
146 return 0; 145 return 0;
147 generic = (struct acpi_hest_generic *)hest_hdr; 146
148 if (!generic->enabled) 147 if (!((struct acpi_hest_generic *)hest_hdr)->enabled)
149 return 0; 148 return 0;
150 ghes_dev = platform_device_alloc("GHES", hest_hdr->source_id); 149 ghes_dev = platform_device_alloc("GHES", hest_hdr->source_id);
151 if (!ghes_dev) 150 if (!ghes_dev)
152 return -ENOMEM; 151 return -ENOMEM;
153 ghes_dev->dev.platform_data = generic; 152
153 rc = platform_device_add_data(ghes_dev, &hest_hdr, sizeof(void *));
154 if (rc)
155 goto err;
156
154 rc = platform_device_add(ghes_dev); 157 rc = platform_device_add(ghes_dev);
155 if (rc) 158 if (rc)
156 goto err; 159 goto err;
diff --git a/drivers/acpi/atomicio.c b/drivers/acpi/atomicio.c
index 8f8bd736d4ff..542e53903891 100644
--- a/drivers/acpi/atomicio.c
+++ b/drivers/acpi/atomicio.c
@@ -142,7 +142,7 @@ static void __iomem *acpi_pre_map(phys_addr_t paddr,
142 list_add_tail_rcu(&map->list, &acpi_iomaps); 142 list_add_tail_rcu(&map->list, &acpi_iomaps);
143 spin_unlock_irqrestore(&acpi_iomaps_lock, flags); 143 spin_unlock_irqrestore(&acpi_iomaps_lock, flags);
144 144
145 return vaddr + (paddr - pg_off); 145 return map->vaddr + (paddr - map->paddr);
146err_unmap: 146err_unmap:
147 iounmap(vaddr); 147 iounmap(vaddr);
148 return NULL; 148 return NULL;
diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
index dc58402b0a17..98417201e9ce 100644
--- a/drivers/acpi/battery.c
+++ b/drivers/acpi/battery.c
@@ -273,7 +273,6 @@ static enum power_supply_property energy_battery_props[] = {
273 POWER_SUPPLY_PROP_CYCLE_COUNT, 273 POWER_SUPPLY_PROP_CYCLE_COUNT,
274 POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN, 274 POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN,
275 POWER_SUPPLY_PROP_VOLTAGE_NOW, 275 POWER_SUPPLY_PROP_VOLTAGE_NOW,
276 POWER_SUPPLY_PROP_CURRENT_NOW,
277 POWER_SUPPLY_PROP_POWER_NOW, 276 POWER_SUPPLY_PROP_POWER_NOW,
278 POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN, 277 POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN,
279 POWER_SUPPLY_PROP_ENERGY_FULL, 278 POWER_SUPPLY_PROP_ENERGY_FULL,
diff --git a/drivers/acpi/blacklist.c b/drivers/acpi/blacklist.c
index 2bb28b9d91c4..f7619600270a 100644
--- a/drivers/acpi/blacklist.c
+++ b/drivers/acpi/blacklist.c
@@ -183,6 +183,8 @@ static int __init dmi_disable_osi_vista(const struct dmi_system_id *d)
183{ 183{
184 printk(KERN_NOTICE PREFIX "DMI detected: %s\n", d->ident); 184 printk(KERN_NOTICE PREFIX "DMI detected: %s\n", d->ident);
185 acpi_osi_setup("!Windows 2006"); 185 acpi_osi_setup("!Windows 2006");
186 acpi_osi_setup("!Windows 2006 SP1");
187 acpi_osi_setup("!Windows 2006 SP2");
186 return 0; 188 return 0;
187} 189}
188static int __init dmi_disable_osi_win7(const struct dmi_system_id *d) 190static int __init dmi_disable_osi_win7(const struct dmi_system_id *d)
@@ -226,6 +228,14 @@ static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
226 }, 228 },
227 }, 229 },
228 { 230 {
231 .callback = dmi_disable_osi_vista,
232 .ident = "Toshiba Satellite L355",
233 .matches = {
234 DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
235 DMI_MATCH(DMI_PRODUCT_VERSION, "Satellite L355"),
236 },
237 },
238 {
229 .callback = dmi_disable_osi_win7, 239 .callback = dmi_disable_osi_win7,
230 .ident = "ASUS K50IJ", 240 .ident = "ASUS K50IJ",
231 .matches = { 241 .matches = {
@@ -233,6 +243,14 @@ static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
233 DMI_MATCH(DMI_PRODUCT_NAME, "K50IJ"), 243 DMI_MATCH(DMI_PRODUCT_NAME, "K50IJ"),
234 }, 244 },
235 }, 245 },
246 {
247 .callback = dmi_disable_osi_vista,
248 .ident = "Toshiba P305D",
249 .matches = {
250 DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
251 DMI_MATCH(DMI_PRODUCT_NAME, "Satellite P305D"),
252 },
253 },
236 254
237 /* 255 /*
238 * BIOS invocation of _OSI(Linux) is almost always a BIOS bug. 256 * BIOS invocation of _OSI(Linux) is almost always a BIOS bug.
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index 5c221ab535d5..310e3b9749cb 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -55,7 +55,7 @@ EXPORT_SYMBOL(acpi_root_dir);
55static int set_power_nocheck(const struct dmi_system_id *id) 55static int set_power_nocheck(const struct dmi_system_id *id)
56{ 56{
57 printk(KERN_NOTICE PREFIX "%s detected - " 57 printk(KERN_NOTICE PREFIX "%s detected - "
58 "disable power check in power transistion\n", id->ident); 58 "disable power check in power transition\n", id->ident);
59 acpi_power_nocheck = 1; 59 acpi_power_nocheck = 1;
60 return 0; 60 return 0;
61} 61}
@@ -80,23 +80,15 @@ static int set_copy_dsdt(const struct dmi_system_id *id)
80 80
81static struct dmi_system_id dsdt_dmi_table[] __initdata = { 81static struct dmi_system_id dsdt_dmi_table[] __initdata = {
82 /* 82 /*
83 * Insyde BIOS on some TOSHIBA machines corrupt the DSDT. 83 * Invoke DSDT corruption work-around on all Toshiba Satellite.
84 * https://bugzilla.kernel.org/show_bug.cgi?id=14679 84 * https://bugzilla.kernel.org/show_bug.cgi?id=14679
85 */ 85 */
86 { 86 {
87 .callback = set_copy_dsdt, 87 .callback = set_copy_dsdt,
88 .ident = "TOSHIBA Satellite A505", 88 .ident = "TOSHIBA Satellite",
89 .matches = { 89 .matches = {
90 DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), 90 DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
91 DMI_MATCH(DMI_PRODUCT_NAME, "Satellite A505"), 91 DMI_MATCH(DMI_PRODUCT_NAME, "Satellite"),
92 },
93 },
94 {
95 .callback = set_copy_dsdt,
96 .ident = "TOSHIBA Satellite L505D",
97 .matches = {
98 DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
99 DMI_MATCH(DMI_PRODUCT_NAME, "Satellite L505D"),
100 }, 92 },
101 }, 93 },
102 {} 94 {}
@@ -1027,7 +1019,7 @@ static int __init acpi_init(void)
1027 1019
1028 /* 1020 /*
1029 * If the laptop falls into the DMI check table, the power state check 1021 * If the laptop falls into the DMI check table, the power state check
1030 * will be disabled in the course of device power transistion. 1022 * will be disabled in the course of device power transition.
1031 */ 1023 */
1032 dmi_check_system(power_nocheck_dmi_table); 1024 dmi_check_system(power_nocheck_dmi_table);
1033 1025
diff --git a/drivers/acpi/fan.c b/drivers/acpi/fan.c
index 8a3b840c0bb2..d94d2953c974 100644
--- a/drivers/acpi/fan.c
+++ b/drivers/acpi/fan.c
@@ -369,7 +369,9 @@ static void __exit acpi_fan_exit(void)
369 369
370 acpi_bus_unregister_driver(&acpi_fan_driver); 370 acpi_bus_unregister_driver(&acpi_fan_driver);
371 371
372#ifdef CONFIG_ACPI_PROCFS
372 remove_proc_entry(ACPI_FAN_CLASS, acpi_root_dir); 373 remove_proc_entry(ACPI_FAN_CLASS, acpi_root_dir);
374#endif
373 375
374 return; 376 return;
375} 377}
diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c
index e9699aaed109..b618f888d66b 100644
--- a/drivers/acpi/processor_core.c
+++ b/drivers/acpi/processor_core.c
@@ -29,12 +29,6 @@ static int set_no_mwait(const struct dmi_system_id *id)
29 29
30static struct dmi_system_id __cpuinitdata processor_idle_dmi_table[] = { 30static struct dmi_system_id __cpuinitdata processor_idle_dmi_table[] = {
31 { 31 {
32 set_no_mwait, "IFL91 board", {
33 DMI_MATCH(DMI_BIOS_VENDOR, "COMPAL"),
34 DMI_MATCH(DMI_SYS_VENDOR, "ZEPTO"),
35 DMI_MATCH(DMI_PRODUCT_VERSION, "3215W"),
36 DMI_MATCH(DMI_BOARD_NAME, "IFL91") }, NULL},
37 {
38 set_no_mwait, "Extensa 5220", { 32 set_no_mwait, "Extensa 5220", {
39 DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies LTD"), 33 DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies LTD"),
40 DMI_MATCH(DMI_SYS_VENDOR, "Acer"), 34 DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c
index 156021892389..347eb21b2353 100644
--- a/drivers/acpi/processor_driver.c
+++ b/drivers/acpi/processor_driver.c
@@ -850,7 +850,7 @@ static int __init acpi_processor_init(void)
850 printk(KERN_DEBUG "ACPI: %s registered with cpuidle\n", 850 printk(KERN_DEBUG "ACPI: %s registered with cpuidle\n",
851 acpi_idle_driver.name); 851 acpi_idle_driver.name);
852 } else { 852 } else {
853 printk(KERN_DEBUG "ACPI: acpi_idle yielding to %s", 853 printk(KERN_DEBUG "ACPI: acpi_idle yielding to %s\n",
854 cpuidle_get_driver()->name); 854 cpuidle_get_driver()->name);
855 } 855 }
856 856
diff --git a/drivers/acpi/processor_perflib.c b/drivers/acpi/processor_perflib.c
index ba1bd263d903..3a73a93596e8 100644
--- a/drivers/acpi/processor_perflib.c
+++ b/drivers/acpi/processor_perflib.c
@@ -447,8 +447,8 @@ int acpi_processor_notify_smm(struct module *calling_module)
447 if (!try_module_get(calling_module)) 447 if (!try_module_get(calling_module))
448 return -EINVAL; 448 return -EINVAL;
449 449
450 /* is_done is set to negative if an error occured, 450 /* is_done is set to negative if an error occurred,
451 * and to postitive if _no_ error occured, but SMM 451 * and to postitive if _no_ error occurred, but SMM
452 * was already notified. This avoids double notification 452 * was already notified. This avoids double notification
453 * which might lead to unexpected results... 453 * which might lead to unexpected results...
454 */ 454 */
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index cf82989ae756..4754ff6e70e6 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -363,6 +363,12 @@ static int __init init_old_suspend_ordering(const struct dmi_system_id *d)
363 return 0; 363 return 0;
364} 364}
365 365
366static int __init init_nvs_nosave(const struct dmi_system_id *d)
367{
368 acpi_nvs_nosave();
369 return 0;
370}
371
366static struct dmi_system_id __initdata acpisleep_dmi_table[] = { 372static struct dmi_system_id __initdata acpisleep_dmi_table[] = {
367 { 373 {
368 .callback = init_old_suspend_ordering, 374 .callback = init_old_suspend_ordering,
@@ -397,6 +403,22 @@ static struct dmi_system_id __initdata acpisleep_dmi_table[] = {
397 DMI_MATCH(DMI_BOARD_NAME, "CF51-2L"), 403 DMI_MATCH(DMI_BOARD_NAME, "CF51-2L"),
398 }, 404 },
399 }, 405 },
406 {
407 .callback = init_nvs_nosave,
408 .ident = "Sony Vaio VGN-SR11M",
409 .matches = {
410 DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
411 DMI_MATCH(DMI_PRODUCT_NAME, "VGN-SR11M"),
412 },
413 },
414 {
415 .callback = init_nvs_nosave,
416 .ident = "Everex StepNote Series",
417 .matches = {
418 DMI_MATCH(DMI_SYS_VENDOR, "Everex Systems, Inc."),
419 DMI_MATCH(DMI_PRODUCT_NAME, "Everex StepNote Series"),
420 },
421 },
400 {}, 422 {},
401}; 423};
402#endif /* CONFIG_SUSPEND */ 424#endif /* CONFIG_SUSPEND */
diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c
index 68e2e4582fa2..f8588f81048a 100644
--- a/drivers/acpi/sysfs.c
+++ b/drivers/acpi/sysfs.c
@@ -100,7 +100,7 @@ static const struct acpi_dlevel acpi_debug_levels[] = {
100 ACPI_DEBUG_INIT(ACPI_LV_EVENTS), 100 ACPI_DEBUG_INIT(ACPI_LV_EVENTS),
101}; 101};
102 102
103static int param_get_debug_layer(char *buffer, struct kernel_param *kp) 103static int param_get_debug_layer(char *buffer, const struct kernel_param *kp)
104{ 104{
105 int result = 0; 105 int result = 0;
106 int i; 106 int i;
@@ -128,7 +128,7 @@ static int param_get_debug_layer(char *buffer, struct kernel_param *kp)
128 return result; 128 return result;
129} 129}
130 130
131static int param_get_debug_level(char *buffer, struct kernel_param *kp) 131static int param_get_debug_level(char *buffer, const struct kernel_param *kp)
132{ 132{
133 int result = 0; 133 int result = 0;
134 int i; 134 int i;
@@ -149,10 +149,18 @@ static int param_get_debug_level(char *buffer, struct kernel_param *kp)
149 return result; 149 return result;
150} 150}
151 151
152module_param_call(debug_layer, param_set_uint, param_get_debug_layer, 152static struct kernel_param_ops param_ops_debug_layer = {
153 &acpi_dbg_layer, 0644); 153 .set = param_set_uint,
154module_param_call(debug_level, param_set_uint, param_get_debug_level, 154 .get = param_get_debug_layer,
155 &acpi_dbg_level, 0644); 155};
156
157static struct kernel_param_ops param_ops_debug_level = {
158 .set = param_set_uint,
159 .get = param_get_debug_level,
160};
161
162module_param_cb(debug_layer, &param_ops_debug_layer, &acpi_dbg_layer, 0644);
163module_param_cb(debug_level, &param_ops_debug_level, &acpi_dbg_level, 0644);
156 164
157static char trace_method_name[6]; 165static char trace_method_name[6];
158module_param_string(trace_method_name, trace_method_name, 6, 0644); 166module_param_string(trace_method_name, trace_method_name, 6, 0644);
diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c
index c5fef01b3c95..b83676126598 100644
--- a/drivers/acpi/video_detect.c
+++ b/drivers/acpi/video_detect.c
@@ -59,8 +59,8 @@ acpi_backlight_cap_match(acpi_handle handle, u32 level, void *context,
59 "support\n")); 59 "support\n"));
60 *cap |= ACPI_VIDEO_BACKLIGHT; 60 *cap |= ACPI_VIDEO_BACKLIGHT;
61 if (ACPI_FAILURE(acpi_get_handle(handle, "_BQC", &h_dummy))) 61 if (ACPI_FAILURE(acpi_get_handle(handle, "_BQC", &h_dummy)))
62 printk(KERN_WARNING FW_BUG PREFIX "ACPI brightness " 62 printk(KERN_WARNING FW_BUG PREFIX "No _BQC method, "
63 "control misses _BQC function\n"); 63 "cannot determine initial brightness\n");
64 /* We have backlight support, no need to scan further */ 64 /* We have backlight support, no need to scan further */
65 return AE_CTRL_TERMINATE; 65 return AE_CTRL_TERMINATE;
66 } 66 }
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index 013727b20417..99d0e5a51148 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -90,6 +90,10 @@ static int ahci_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg);
90static int ahci_pci_device_resume(struct pci_dev *pdev); 90static int ahci_pci_device_resume(struct pci_dev *pdev);
91#endif 91#endif
92 92
93static struct scsi_host_template ahci_sht = {
94 AHCI_SHT("ahci"),
95};
96
93static struct ata_port_operations ahci_vt8251_ops = { 97static struct ata_port_operations ahci_vt8251_ops = {
94 .inherits = &ahci_ops, 98 .inherits = &ahci_ops,
95 .hardreset = ahci_vt8251_hardreset, 99 .hardreset = ahci_vt8251_hardreset,
@@ -253,6 +257,9 @@ static const struct pci_device_id ahci_pci_tbl[] = {
253 { PCI_VDEVICE(INTEL, 0x1c05), board_ahci }, /* CPT RAID */ 257 { PCI_VDEVICE(INTEL, 0x1c05), board_ahci }, /* CPT RAID */
254 { PCI_VDEVICE(INTEL, 0x1c06), board_ahci }, /* CPT RAID */ 258 { PCI_VDEVICE(INTEL, 0x1c06), board_ahci }, /* CPT RAID */
255 { PCI_VDEVICE(INTEL, 0x1c07), board_ahci }, /* CPT RAID */ 259 { PCI_VDEVICE(INTEL, 0x1c07), board_ahci }, /* CPT RAID */
260 { PCI_VDEVICE(INTEL, 0x1d02), board_ahci }, /* PBG AHCI */
261 { PCI_VDEVICE(INTEL, 0x1d04), board_ahci }, /* PBG RAID */
262 { PCI_VDEVICE(INTEL, 0x1d06), board_ahci }, /* PBG RAID */
256 263
257 /* JMicron 360/1/3/5/6, match class to avoid IDE function */ 264 /* JMicron 360/1/3/5/6, match class to avoid IDE function */
258 { PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, 265 { PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
diff --git a/drivers/ata/ahci.h b/drivers/ata/ahci.h
index 474427b6f99f..e5fdeebf9ef0 100644
--- a/drivers/ata/ahci.h
+++ b/drivers/ata/ahci.h
@@ -298,7 +298,17 @@ struct ahci_host_priv {
298 298
299extern int ahci_ignore_sss; 299extern int ahci_ignore_sss;
300 300
301extern struct scsi_host_template ahci_sht; 301extern struct device_attribute *ahci_shost_attrs[];
302extern struct device_attribute *ahci_sdev_attrs[];
303
304#define AHCI_SHT(drv_name) \
305 ATA_NCQ_SHT(drv_name), \
306 .can_queue = AHCI_MAX_CMDS - 1, \
307 .sg_tablesize = AHCI_MAX_SG, \
308 .dma_boundary = AHCI_DMA_BOUNDARY, \
309 .shost_attrs = ahci_shost_attrs, \
310 .sdev_attrs = ahci_sdev_attrs
311
302extern struct ata_port_operations ahci_ops; 312extern struct ata_port_operations ahci_ops;
303 313
304void ahci_save_initial_config(struct device *dev, 314void ahci_save_initial_config(struct device *dev,
diff --git a/drivers/ata/ahci_platform.c b/drivers/ata/ahci_platform.c
index 4e97f33cca44..84b643270e7a 100644
--- a/drivers/ata/ahci_platform.c
+++ b/drivers/ata/ahci_platform.c
@@ -23,6 +23,10 @@
23#include <linux/ahci_platform.h> 23#include <linux/ahci_platform.h>
24#include "ahci.h" 24#include "ahci.h"
25 25
26static struct scsi_host_template ahci_platform_sht = {
27 AHCI_SHT("ahci_platform"),
28};
29
26static int __init ahci_probe(struct platform_device *pdev) 30static int __init ahci_probe(struct platform_device *pdev)
27{ 31{
28 struct device *dev = &pdev->dev; 32 struct device *dev = &pdev->dev;
@@ -145,7 +149,7 @@ static int __init ahci_probe(struct platform_device *pdev)
145 ahci_print_info(host, "platform"); 149 ahci_print_info(host, "platform");
146 150
147 rc = ata_host_activate(host, irq, ahci_interrupt, IRQF_SHARED, 151 rc = ata_host_activate(host, irq, ahci_interrupt, IRQF_SHARED,
148 &ahci_sht); 152 &ahci_platform_sht);
149 if (rc) 153 if (rc)
150 goto err0; 154 goto err0;
151 155
diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c
index 3971bc0a4838..d712675d0a96 100644
--- a/drivers/ata/ata_piix.c
+++ b/drivers/ata/ata_piix.c
@@ -302,6 +302,10 @@ static const struct pci_device_id piix_pci_tbl[] = {
302 { 0x8086, 0x1c08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata }, 302 { 0x8086, 0x1c08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
303 /* SATA Controller IDE (CPT) */ 303 /* SATA Controller IDE (CPT) */
304 { 0x8086, 0x1c09, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata }, 304 { 0x8086, 0x1c09, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
305 /* SATA Controller IDE (PBG) */
306 { 0x8086, 0x1d00, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata },
307 /* SATA Controller IDE (PBG) */
308 { 0x8086, 0x1d08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
305 { } /* terminate list */ 309 { } /* terminate list */
306}; 310};
307 311
diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
index 666850d31df2..8eea309ea212 100644
--- a/drivers/ata/libahci.c
+++ b/drivers/ata/libahci.c
@@ -121,7 +121,7 @@ static DEVICE_ATTR(ahci_port_cmd, S_IRUGO, ahci_show_port_cmd, NULL);
121static DEVICE_ATTR(em_buffer, S_IWUSR | S_IRUGO, 121static DEVICE_ATTR(em_buffer, S_IWUSR | S_IRUGO,
122 ahci_read_em_buffer, ahci_store_em_buffer); 122 ahci_read_em_buffer, ahci_store_em_buffer);
123 123
124static struct device_attribute *ahci_shost_attrs[] = { 124struct device_attribute *ahci_shost_attrs[] = {
125 &dev_attr_link_power_management_policy, 125 &dev_attr_link_power_management_policy,
126 &dev_attr_em_message_type, 126 &dev_attr_em_message_type,
127 &dev_attr_em_message, 127 &dev_attr_em_message,
@@ -132,22 +132,14 @@ static struct device_attribute *ahci_shost_attrs[] = {
132 &dev_attr_em_buffer, 132 &dev_attr_em_buffer,
133 NULL 133 NULL
134}; 134};
135EXPORT_SYMBOL_GPL(ahci_shost_attrs);
135 136
136static struct device_attribute *ahci_sdev_attrs[] = { 137struct device_attribute *ahci_sdev_attrs[] = {
137 &dev_attr_sw_activity, 138 &dev_attr_sw_activity,
138 &dev_attr_unload_heads, 139 &dev_attr_unload_heads,
139 NULL 140 NULL
140}; 141};
141 142EXPORT_SYMBOL_GPL(ahci_sdev_attrs);
142struct scsi_host_template ahci_sht = {
143 ATA_NCQ_SHT("ahci"),
144 .can_queue = AHCI_MAX_CMDS - 1,
145 .sg_tablesize = AHCI_MAX_SG,
146 .dma_boundary = AHCI_DMA_BOUNDARY,
147 .shost_attrs = ahci_shost_attrs,
148 .sdev_attrs = ahci_sdev_attrs,
149};
150EXPORT_SYMBOL_GPL(ahci_sht);
151 143
152struct ata_port_operations ahci_ops = { 144struct ata_port_operations ahci_ops = {
153 .inherits = &sata_pmp_port_ops, 145 .inherits = &sata_pmp_port_ops,
@@ -1326,7 +1318,7 @@ int ahci_do_softreset(struct ata_link *link, unsigned int *class,
1326 /* issue the first D2H Register FIS */ 1318 /* issue the first D2H Register FIS */
1327 msecs = 0; 1319 msecs = 0;
1328 now = jiffies; 1320 now = jiffies;
1329 if (time_after(now, deadline)) 1321 if (time_after(deadline, now))
1330 msecs = jiffies_to_msecs(deadline - now); 1322 msecs = jiffies_to_msecs(deadline - now);
1331 1323
1332 tf.ctl |= ATA_SRST; 1324 tf.ctl |= ATA_SRST;
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index c035b3d041ee..932eaee50245 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -5418,6 +5418,7 @@ static int ata_host_request_pm(struct ata_host *host, pm_message_t mesg,
5418 */ 5418 */
5419int ata_host_suspend(struct ata_host *host, pm_message_t mesg) 5419int ata_host_suspend(struct ata_host *host, pm_message_t mesg)
5420{ 5420{
5421 unsigned int ehi_flags = ATA_EHI_QUIET;
5421 int rc; 5422 int rc;
5422 5423
5423 /* 5424 /*
@@ -5426,7 +5427,18 @@ int ata_host_suspend(struct ata_host *host, pm_message_t mesg)
5426 */ 5427 */
5427 ata_lpm_enable(host); 5428 ata_lpm_enable(host);
5428 5429
5429 rc = ata_host_request_pm(host, mesg, 0, ATA_EHI_QUIET, 1); 5430 /*
5431 * On some hardware, device fails to respond after spun down
5432 * for suspend. As the device won't be used before being
5433 * resumed, we don't need to touch the device. Ask EH to skip
5434 * the usual stuff and proceed directly to suspend.
5435 *
5436 * http://thread.gmane.org/gmane.linux.ide/46764
5437 */
5438 if (mesg.event == PM_EVENT_SUSPEND)
5439 ehi_flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_NO_RECOVERY;
5440
5441 rc = ata_host_request_pm(host, mesg, 0, ehi_flags, 1);
5430 if (rc == 0) 5442 if (rc == 0)
5431 host->dev->power.power_state = mesg; 5443 host->dev->power.power_state = mesg;
5432 return rc; 5444 return rc;
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index c9ae299b8342..e48302eae55f 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -3235,6 +3235,10 @@ static int ata_eh_skip_recovery(struct ata_link *link)
3235 if (link->flags & ATA_LFLAG_DISABLED) 3235 if (link->flags & ATA_LFLAG_DISABLED)
3236 return 1; 3236 return 1;
3237 3237
3238 /* skip if explicitly requested */
3239 if (ehc->i.flags & ATA_EHI_NO_RECOVERY)
3240 return 1;
3241
3238 /* thaw frozen port and recover failed devices */ 3242 /* thaw frozen port and recover failed devices */
3239 if ((ap->pflags & ATA_PFLAG_FROZEN) || ata_link_nr_enabled(link)) 3243 if ((ap->pflags & ATA_PFLAG_FROZEN) || ata_link_nr_enabled(link))
3240 return 0; 3244 return 0;
diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c
index 3b82d8ef76f0..e30c537cce32 100644
--- a/drivers/ata/libata-sff.c
+++ b/drivers/ata/libata-sff.c
@@ -418,6 +418,7 @@ void ata_sff_tf_load(struct ata_port *ap, const struct ata_taskfile *tf)
418 if (ioaddr->ctl_addr) 418 if (ioaddr->ctl_addr)
419 iowrite8(tf->ctl, ioaddr->ctl_addr); 419 iowrite8(tf->ctl, ioaddr->ctl_addr);
420 ap->last_ctl = tf->ctl; 420 ap->last_ctl = tf->ctl;
421 ata_wait_idle(ap);
421 } 422 }
422 423
423 if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) { 424 if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) {
@@ -453,6 +454,8 @@ void ata_sff_tf_load(struct ata_port *ap, const struct ata_taskfile *tf)
453 iowrite8(tf->device, ioaddr->device_addr); 454 iowrite8(tf->device, ioaddr->device_addr);
454 VPRINTK("device 0x%X\n", tf->device); 455 VPRINTK("device 0x%X\n", tf->device);
455 } 456 }
457
458 ata_wait_idle(ap);
456} 459}
457EXPORT_SYMBOL_GPL(ata_sff_tf_load); 460EXPORT_SYMBOL_GPL(ata_sff_tf_load);
458 461
@@ -1042,7 +1045,8 @@ static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
1042int ata_sff_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc, 1045int ata_sff_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,
1043 u8 status, int in_wq) 1046 u8 status, int in_wq)
1044{ 1047{
1045 struct ata_eh_info *ehi = &ap->link.eh_info; 1048 struct ata_link *link = qc->dev->link;
1049 struct ata_eh_info *ehi = &link->eh_info;
1046 unsigned long flags = 0; 1050 unsigned long flags = 0;
1047 int poll_next; 1051 int poll_next;
1048 1052
@@ -1298,8 +1302,14 @@ fsm_start:
1298} 1302}
1299EXPORT_SYMBOL_GPL(ata_sff_hsm_move); 1303EXPORT_SYMBOL_GPL(ata_sff_hsm_move);
1300 1304
1301void ata_sff_queue_pio_task(struct ata_port *ap, unsigned long delay) 1305void ata_sff_queue_pio_task(struct ata_link *link, unsigned long delay)
1302{ 1306{
1307 struct ata_port *ap = link->ap;
1308
1309 WARN_ON((ap->sff_pio_task_link != NULL) &&
1310 (ap->sff_pio_task_link != link));
1311 ap->sff_pio_task_link = link;
1312
1303 /* may fail if ata_sff_flush_pio_task() in progress */ 1313 /* may fail if ata_sff_flush_pio_task() in progress */
1304 queue_delayed_work(ata_sff_wq, &ap->sff_pio_task, 1314 queue_delayed_work(ata_sff_wq, &ap->sff_pio_task,
1305 msecs_to_jiffies(delay)); 1315 msecs_to_jiffies(delay));
@@ -1321,14 +1331,18 @@ static void ata_sff_pio_task(struct work_struct *work)
1321{ 1331{
1322 struct ata_port *ap = 1332 struct ata_port *ap =
1323 container_of(work, struct ata_port, sff_pio_task.work); 1333 container_of(work, struct ata_port, sff_pio_task.work);
1334 struct ata_link *link = ap->sff_pio_task_link;
1324 struct ata_queued_cmd *qc; 1335 struct ata_queued_cmd *qc;
1325 u8 status; 1336 u8 status;
1326 int poll_next; 1337 int poll_next;
1327 1338
1339 BUG_ON(ap->sff_pio_task_link == NULL);
1328 /* qc can be NULL if timeout occurred */ 1340 /* qc can be NULL if timeout occurred */
1329 qc = ata_qc_from_tag(ap, ap->link.active_tag); 1341 qc = ata_qc_from_tag(ap, link->active_tag);
1330 if (!qc) 1342 if (!qc) {
1343 ap->sff_pio_task_link = NULL;
1331 return; 1344 return;
1345 }
1332 1346
1333fsm_start: 1347fsm_start:
1334 WARN_ON_ONCE(ap->hsm_task_state == HSM_ST_IDLE); 1348 WARN_ON_ONCE(ap->hsm_task_state == HSM_ST_IDLE);
@@ -1345,11 +1359,16 @@ fsm_start:
1345 msleep(2); 1359 msleep(2);
1346 status = ata_sff_busy_wait(ap, ATA_BUSY, 10); 1360 status = ata_sff_busy_wait(ap, ATA_BUSY, 10);
1347 if (status & ATA_BUSY) { 1361 if (status & ATA_BUSY) {
1348 ata_sff_queue_pio_task(ap, ATA_SHORT_PAUSE); 1362 ata_sff_queue_pio_task(link, ATA_SHORT_PAUSE);
1349 return; 1363 return;
1350 } 1364 }
1351 } 1365 }
1352 1366
1367 /*
1368 * hsm_move() may trigger another command to be processed.
1369 * clean the link beforehand.
1370 */
1371 ap->sff_pio_task_link = NULL;
1353 /* move the HSM */ 1372 /* move the HSM */
1354 poll_next = ata_sff_hsm_move(ap, qc, status, 1); 1373 poll_next = ata_sff_hsm_move(ap, qc, status, 1);
1355 1374
@@ -1376,6 +1395,7 @@ fsm_start:
1376unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc) 1395unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc)
1377{ 1396{
1378 struct ata_port *ap = qc->ap; 1397 struct ata_port *ap = qc->ap;
1398 struct ata_link *link = qc->dev->link;
1379 1399
1380 /* Use polling pio if the LLD doesn't handle 1400 /* Use polling pio if the LLD doesn't handle
1381 * interrupt driven pio and atapi CDB interrupt. 1401 * interrupt driven pio and atapi CDB interrupt.
@@ -1396,7 +1416,7 @@ unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc)
1396 ap->hsm_task_state = HSM_ST_LAST; 1416 ap->hsm_task_state = HSM_ST_LAST;
1397 1417
1398 if (qc->tf.flags & ATA_TFLAG_POLLING) 1418 if (qc->tf.flags & ATA_TFLAG_POLLING)
1399 ata_sff_queue_pio_task(ap, 0); 1419 ata_sff_queue_pio_task(link, 0);
1400 1420
1401 break; 1421 break;
1402 1422
@@ -1409,7 +1429,7 @@ unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc)
1409 if (qc->tf.flags & ATA_TFLAG_WRITE) { 1429 if (qc->tf.flags & ATA_TFLAG_WRITE) {
1410 /* PIO data out protocol */ 1430 /* PIO data out protocol */
1411 ap->hsm_task_state = HSM_ST_FIRST; 1431 ap->hsm_task_state = HSM_ST_FIRST;
1412 ata_sff_queue_pio_task(ap, 0); 1432 ata_sff_queue_pio_task(link, 0);
1413 1433
1414 /* always send first data block using the 1434 /* always send first data block using the
1415 * ata_sff_pio_task() codepath. 1435 * ata_sff_pio_task() codepath.
@@ -1419,7 +1439,7 @@ unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc)
1419 ap->hsm_task_state = HSM_ST; 1439 ap->hsm_task_state = HSM_ST;
1420 1440
1421 if (qc->tf.flags & ATA_TFLAG_POLLING) 1441 if (qc->tf.flags & ATA_TFLAG_POLLING)
1422 ata_sff_queue_pio_task(ap, 0); 1442 ata_sff_queue_pio_task(link, 0);
1423 1443
1424 /* if polling, ata_sff_pio_task() handles the 1444 /* if polling, ata_sff_pio_task() handles the
1425 * rest. otherwise, interrupt handler takes 1445 * rest. otherwise, interrupt handler takes
@@ -1441,7 +1461,7 @@ unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc)
1441 /* send cdb by polling if no cdb interrupt */ 1461 /* send cdb by polling if no cdb interrupt */
1442 if ((!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) || 1462 if ((!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) ||
1443 (qc->tf.flags & ATA_TFLAG_POLLING)) 1463 (qc->tf.flags & ATA_TFLAG_POLLING))
1444 ata_sff_queue_pio_task(ap, 0); 1464 ata_sff_queue_pio_task(link, 0);
1445 break; 1465 break;
1446 1466
1447 default: 1467 default:
@@ -2734,6 +2754,7 @@ EXPORT_SYMBOL_GPL(ata_bmdma_dumb_qc_prep);
2734unsigned int ata_bmdma_qc_issue(struct ata_queued_cmd *qc) 2754unsigned int ata_bmdma_qc_issue(struct ata_queued_cmd *qc)
2735{ 2755{
2736 struct ata_port *ap = qc->ap; 2756 struct ata_port *ap = qc->ap;
2757 struct ata_link *link = qc->dev->link;
2737 2758
2738 /* defer PIO handling to sff_qc_issue */ 2759 /* defer PIO handling to sff_qc_issue */
2739 if (!ata_is_dma(qc->tf.protocol)) 2760 if (!ata_is_dma(qc->tf.protocol))
@@ -2762,7 +2783,7 @@ unsigned int ata_bmdma_qc_issue(struct ata_queued_cmd *qc)
2762 2783
2763 /* send cdb by polling if no cdb interrupt */ 2784 /* send cdb by polling if no cdb interrupt */
2764 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) 2785 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
2765 ata_sff_queue_pio_task(ap, 0); 2786 ata_sff_queue_pio_task(link, 0);
2766 break; 2787 break;
2767 2788
2768 default: 2789 default:
diff --git a/drivers/ata/pata_artop.c b/drivers/ata/pata_artop.c
index ba43f0f8c880..2215632e4b31 100644
--- a/drivers/ata/pata_artop.c
+++ b/drivers/ata/pata_artop.c
@@ -74,7 +74,8 @@ static int artop6260_pre_reset(struct ata_link *link, unsigned long deadline)
74 struct pci_dev *pdev = to_pci_dev(ap->host->dev); 74 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
75 75
76 /* Odd numbered device ids are the units with enable bits (the -R cards) */ 76 /* Odd numbered device ids are the units with enable bits (the -R cards) */
77 if (pdev->device % 1 && !pci_test_config_bits(pdev, &artop_enable_bits[ap->port_no])) 77 if ((pdev->device & 1) &&
78 !pci_test_config_bits(pdev, &artop_enable_bits[ap->port_no]))
78 return -ENOENT; 79 return -ENOENT;
79 80
80 return ata_sff_prereset(link, deadline); 81 return ata_sff_prereset(link, deadline);
diff --git a/drivers/ata/pata_via.c b/drivers/ata/pata_via.c
index 5e659885de16..ac8d7d97e408 100644
--- a/drivers/ata/pata_via.c
+++ b/drivers/ata/pata_via.c
@@ -417,6 +417,8 @@ static void via_tf_load(struct ata_port *ap, const struct ata_taskfile *tf)
417 tf->lbam, 417 tf->lbam,
418 tf->lbah); 418 tf->lbah);
419 } 419 }
420
421 ata_wait_idle(ap);
420} 422}
421 423
422static int via_port_start(struct ata_port *ap) 424static int via_port_start(struct ata_port *ap)
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
index 81982594a014..a9fd9709c262 100644
--- a/drivers/ata/sata_mv.c
+++ b/drivers/ata/sata_mv.c
@@ -2284,7 +2284,7 @@ static unsigned int mv_qc_issue_fis(struct ata_queued_cmd *qc)
2284 } 2284 }
2285 2285
2286 if (qc->tf.flags & ATA_TFLAG_POLLING) 2286 if (qc->tf.flags & ATA_TFLAG_POLLING)
2287 ata_sff_queue_pio_task(ap, 0); 2287 ata_sff_queue_pio_task(link, 0);
2288 return 0; 2288 return 0;
2289} 2289}
2290 2290
diff --git a/drivers/atm/Makefile b/drivers/atm/Makefile
index 62c3cc1075ae..c6c9ee9f5da2 100644
--- a/drivers/atm/Makefile
+++ b/drivers/atm/Makefile
@@ -2,7 +2,7 @@
2# Makefile for the Linux network (ATM) device drivers. 2# Makefile for the Linux network (ATM) device drivers.
3# 3#
4 4
5fore_200e-objs := fore200e.o 5fore_200e-y := fore200e.o
6 6
7obj-$(CONFIG_ATM_ZATM) += zatm.o uPD98402.o 7obj-$(CONFIG_ATM_ZATM) += zatm.o uPD98402.o
8obj-$(CONFIG_ATM_NICSTAR) += nicstar.o 8obj-$(CONFIG_ATM_NICSTAR) += nicstar.o
diff --git a/drivers/atm/horizon.c b/drivers/atm/horizon.c
index 54720baa7363..a95790452a68 100644
--- a/drivers/atm/horizon.c
+++ b/drivers/atm/horizon.c
@@ -1645,10 +1645,8 @@ static int hrz_send (struct atm_vcc * atm_vcc, struct sk_buff * skb) {
1645 unsigned short d = 0; 1645 unsigned short d = 0;
1646 char * s = skb->data; 1646 char * s = skb->data;
1647 if (*s++ == 'D') { 1647 if (*s++ == 'D') {
1648 for (i = 0; i < 4; ++i) { 1648 for (i = 0; i < 4; ++i)
1649 d = (d<<4) | ((*s <= '9') ? (*s - '0') : (*s - 'a' + 10)); 1649 d = (d << 4) | hex_to_bin(*s++);
1650 ++s;
1651 }
1652 PRINTK (KERN_INFO, "debug bitmap is now %hx", debug = d); 1650 PRINTK (KERN_INFO, "debug bitmap is now %hx", debug = d);
1653 } 1651 }
1654 } 1652 }
diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
index 1679cbf0c584..bce57328ddde 100644
--- a/drivers/atm/idt77252.c
+++ b/drivers/atm/idt77252.c
@@ -3152,7 +3152,7 @@ deinit_card(struct idt77252_dev *card)
3152} 3152}
3153 3153
3154 3154
3155static int __devinit 3155static void __devinit
3156init_sram(struct idt77252_dev *card) 3156init_sram(struct idt77252_dev *card)
3157{ 3157{
3158 int i; 3158 int i;
@@ -3298,7 +3298,6 @@ init_sram(struct idt77252_dev *card)
3298 SAR_REG_RXFD); 3298 SAR_REG_RXFD);
3299 3299
3300 IPRINTK("%s: SRAM initialization complete.\n", card->name); 3300 IPRINTK("%s: SRAM initialization complete.\n", card->name);
3301 return 0;
3302} 3301}
3303 3302
3304static int __devinit 3303static int __devinit
@@ -3410,8 +3409,7 @@ init_card(struct atm_dev *dev)
3410 3409
3411 writel(readl(SAR_REG_CFG) | conf, SAR_REG_CFG); 3410 writel(readl(SAR_REG_CFG) | conf, SAR_REG_CFG);
3412 3411
3413 if (init_sram(card) < 0) 3412 init_sram(card);
3414 return -1;
3415 3413
3416/********************************************************************/ 3414/********************************************************************/
3417/* A L L O C R A M A N D S E T V A R I O U S T H I N G S */ 3415/* A L L O C R A M A N D S E T V A R I O U S T H I N G S */
diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c
index 8b358d7d958f..9309d4724e13 100644
--- a/drivers/atm/iphase.c
+++ b/drivers/atm/iphase.c
@@ -3156,7 +3156,6 @@ static int __devinit ia_init_one(struct pci_dev *pdev,
3156{ 3156{
3157 struct atm_dev *dev; 3157 struct atm_dev *dev;
3158 IADEV *iadev; 3158 IADEV *iadev;
3159 unsigned long flags;
3160 int ret; 3159 int ret;
3161 3160
3162 iadev = kzalloc(sizeof(*iadev), GFP_KERNEL); 3161 iadev = kzalloc(sizeof(*iadev), GFP_KERNEL);
@@ -3188,19 +3187,14 @@ static int __devinit ia_init_one(struct pci_dev *pdev,
3188 ia_dev[iadev_count] = iadev; 3187 ia_dev[iadev_count] = iadev;
3189 _ia_dev[iadev_count] = dev; 3188 _ia_dev[iadev_count] = dev;
3190 iadev_count++; 3189 iadev_count++;
3191 spin_lock_init(&iadev->misc_lock);
3192 /* First fixes first. I don't want to think about this now. */
3193 spin_lock_irqsave(&iadev->misc_lock, flags);
3194 if (ia_init(dev) || ia_start(dev)) { 3190 if (ia_init(dev) || ia_start(dev)) {
3195 IF_INIT(printk("IA register failed!\n");) 3191 IF_INIT(printk("IA register failed!\n");)
3196 iadev_count--; 3192 iadev_count--;
3197 ia_dev[iadev_count] = NULL; 3193 ia_dev[iadev_count] = NULL;
3198 _ia_dev[iadev_count] = NULL; 3194 _ia_dev[iadev_count] = NULL;
3199 spin_unlock_irqrestore(&iadev->misc_lock, flags);
3200 ret = -EINVAL; 3195 ret = -EINVAL;
3201 goto err_out_deregister_dev; 3196 goto err_out_deregister_dev;
3202 } 3197 }
3203 spin_unlock_irqrestore(&iadev->misc_lock, flags);
3204 IF_EVENT(printk("iadev_count = %d\n", iadev_count);) 3198 IF_EVENT(printk("iadev_count = %d\n", iadev_count);)
3205 3199
3206 iadev->next_board = ia_boards; 3200 iadev->next_board = ia_boards;
diff --git a/drivers/atm/iphase.h b/drivers/atm/iphase.h
index b2cd20f549cb..077735e0e04b 100644
--- a/drivers/atm/iphase.h
+++ b/drivers/atm/iphase.h
@@ -1022,7 +1022,7 @@ typedef struct iadev_t {
1022 struct dle_q rx_dle_q; 1022 struct dle_q rx_dle_q;
1023 struct free_desc_q *rx_free_desc_qhead; 1023 struct free_desc_q *rx_free_desc_qhead;
1024 struct sk_buff_head rx_dma_q; 1024 struct sk_buff_head rx_dma_q;
1025 spinlock_t rx_lock, misc_lock; 1025 spinlock_t rx_lock;
1026 struct atm_vcc **rx_open; /* list of all open VCs */ 1026 struct atm_vcc **rx_open; /* list of all open VCs */
1027 u16 num_rx_desc, rx_buf_sz, rxing; 1027 u16 num_rx_desc, rx_buf_sz, rxing;
1028 u32 rx_pkt_ram, rx_tmp_cnt; 1028 u32 rx_pkt_ram, rx_tmp_cnt;
diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c
index f916ddf63938..f46138ab38b6 100644
--- a/drivers/atm/solos-pci.c
+++ b/drivers/atm/solos-pci.c
@@ -444,6 +444,7 @@ static ssize_t console_show(struct device *dev, struct device_attribute *attr,
444 struct atm_dev *atmdev = container_of(dev, struct atm_dev, class_dev); 444 struct atm_dev *atmdev = container_of(dev, struct atm_dev, class_dev);
445 struct solos_card *card = atmdev->dev_data; 445 struct solos_card *card = atmdev->dev_data;
446 struct sk_buff *skb; 446 struct sk_buff *skb;
447 unsigned int len;
447 448
448 spin_lock(&card->cli_queue_lock); 449 spin_lock(&card->cli_queue_lock);
449 skb = skb_dequeue(&card->cli_queue[SOLOS_CHAN(atmdev)]); 450 skb = skb_dequeue(&card->cli_queue[SOLOS_CHAN(atmdev)]);
@@ -451,11 +452,12 @@ static ssize_t console_show(struct device *dev, struct device_attribute *attr,
451 if(skb == NULL) 452 if(skb == NULL)
452 return sprintf(buf, "No data.\n"); 453 return sprintf(buf, "No data.\n");
453 454
454 memcpy(buf, skb->data, skb->len); 455 len = skb->len;
455 dev_dbg(&card->dev->dev, "len: %d\n", skb->len); 456 memcpy(buf, skb->data, len);
457 dev_dbg(&card->dev->dev, "len: %d\n", len);
456 458
457 kfree_skb(skb); 459 kfree_skb(skb);
458 return skb->len; 460 return len;
459} 461}
460 462
461static int send_command(struct solos_card *card, int dev, const char *buf, size_t size) 463static int send_command(struct solos_card *card, int dev, const char *buf, size_t size)
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index 5419a49ff135..276d5a701dc3 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -59,6 +59,7 @@ void device_pm_init(struct device *dev)
59{ 59{
60 dev->power.status = DPM_ON; 60 dev->power.status = DPM_ON;
61 init_completion(&dev->power.completion); 61 init_completion(&dev->power.completion);
62 complete_all(&dev->power.completion);
62 dev->power.wakeup_count = 0; 63 dev->power.wakeup_count = 0;
63 pm_runtime_init(dev); 64 pm_runtime_init(dev);
64} 65}
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
index 31064df1370a..5e4fadcdece9 100644
--- a/drivers/block/cciss.c
+++ b/drivers/block/cciss.c
@@ -297,6 +297,8 @@ static void enqueue_cmd_and_start_io(ctlr_info_t *h,
297 spin_lock_irqsave(&h->lock, flags); 297 spin_lock_irqsave(&h->lock, flags);
298 addQ(&h->reqQ, c); 298 addQ(&h->reqQ, c);
299 h->Qdepth++; 299 h->Qdepth++;
300 if (h->Qdepth > h->maxQsinceinit)
301 h->maxQsinceinit = h->Qdepth;
300 start_io(h); 302 start_io(h);
301 spin_unlock_irqrestore(&h->lock, flags); 303 spin_unlock_irqrestore(&h->lock, flags);
302} 304}
@@ -4519,6 +4521,12 @@ static __devinit int cciss_kdump_hard_reset_controller(struct pci_dev *pdev)
4519 misc_fw_support = readl(&cfgtable->misc_fw_support); 4521 misc_fw_support = readl(&cfgtable->misc_fw_support);
4520 use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET; 4522 use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET;
4521 4523
4524 /* The doorbell reset seems to cause lockups on some Smart
4525 * Arrays (e.g. P410, P410i, maybe others). Until this is
4526 * fixed or at least isolated, avoid the doorbell reset.
4527 */
4528 use_doorbell = 0;
4529
4522 rc = cciss_controller_hard_reset(pdev, vaddr, use_doorbell); 4530 rc = cciss_controller_hard_reset(pdev, vaddr, use_doorbell);
4523 if (rc) 4531 if (rc)
4524 goto unmap_cfgtable; 4532 goto unmap_cfgtable;
@@ -4712,6 +4720,9 @@ static int __devinit cciss_init_one(struct pci_dev *pdev,
4712 h->scatter_list = kmalloc(h->max_commands * 4720 h->scatter_list = kmalloc(h->max_commands *
4713 sizeof(struct scatterlist *), 4721 sizeof(struct scatterlist *),
4714 GFP_KERNEL); 4722 GFP_KERNEL);
4723 if (!h->scatter_list)
4724 goto clean4;
4725
4715 for (k = 0; k < h->nr_cmds; k++) { 4726 for (k = 0; k < h->nr_cmds; k++) {
4716 h->scatter_list[k] = kmalloc(sizeof(struct scatterlist) * 4727 h->scatter_list[k] = kmalloc(sizeof(struct scatterlist) *
4717 h->maxsgentries, 4728 h->maxsgentries,
@@ -4781,7 +4792,7 @@ static int __devinit cciss_init_one(struct pci_dev *pdev,
4781clean4: 4792clean4:
4782 kfree(h->cmd_pool_bits); 4793 kfree(h->cmd_pool_bits);
4783 /* Free up sg elements */ 4794 /* Free up sg elements */
4784 for (k = 0; k < h->nr_cmds; k++) 4795 for (k-- ; k >= 0; k--)
4785 kfree(h->scatter_list[k]); 4796 kfree(h->scatter_list[k]);
4786 kfree(h->scatter_list); 4797 kfree(h->scatter_list);
4787 cciss_free_sg_chain_blocks(h->cmd_sg_list, h->nr_cmds); 4798 cciss_free_sg_chain_blocks(h->cmd_sg_list, h->nr_cmds);
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index f3c636d23718..91797bbbe702 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -477,7 +477,7 @@ static int do_bio_filebacked(struct loop_device *lo, struct bio *bio)
477 pos = ((loff_t) bio->bi_sector << 9) + lo->lo_offset; 477 pos = ((loff_t) bio->bi_sector << 9) + lo->lo_offset;
478 478
479 if (bio_rw(bio) == WRITE) { 479 if (bio_rw(bio) == WRITE) {
480 bool barrier = (bio->bi_rw & REQ_HARDBARRIER); 480 bool barrier = !!(bio->bi_rw & REQ_HARDBARRIER);
481 struct file *file = lo->lo_backing_file; 481 struct file *file = lo->lo_backing_file;
482 482
483 if (barrier) { 483 if (barrier) {
diff --git a/drivers/block/mg_disk.c b/drivers/block/mg_disk.c
index b82c5ce5e9df..76fa3deaee84 100644
--- a/drivers/block/mg_disk.c
+++ b/drivers/block/mg_disk.c
@@ -974,8 +974,7 @@ static int mg_probe(struct platform_device *plat_dev)
974 host->breq->queuedata = host; 974 host->breq->queuedata = host;
975 975
976 /* mflash is random device, thanx for the noop */ 976 /* mflash is random device, thanx for the noop */
977 elevator_exit(host->breq->elevator); 977 err = elevator_change(host->breq, "noop");
978 err = elevator_init(host->breq, "noop");
979 if (err) { 978 if (err) {
980 printk(KERN_ERR "%s:%d (elevator_init) fail\n", 979 printk(KERN_ERR "%s:%d (elevator_init) fail\n",
981 __func__, __LINE__); 980 __func__, __LINE__);
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index b1cbeb59bb76..37a2bb595076 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -2369,7 +2369,7 @@ static void pkt_release_dev(struct pktcdvd_device *pd, int flush)
2369 pkt_shrink_pktlist(pd); 2369 pkt_shrink_pktlist(pd);
2370} 2370}
2371 2371
2372static struct pktcdvd_device *pkt_find_dev_from_minor(int dev_minor) 2372static struct pktcdvd_device *pkt_find_dev_from_minor(unsigned int dev_minor)
2373{ 2373{
2374 if (dev_minor >= MAX_WRITERS) 2374 if (dev_minor >= MAX_WRITERS)
2375 return NULL; 2375 return NULL;
diff --git a/drivers/bluetooth/btmrvl_main.c b/drivers/bluetooth/btmrvl_main.c
index 0d32ec82e9bf..548d1d9e4dda 100644
--- a/drivers/bluetooth/btmrvl_main.c
+++ b/drivers/bluetooth/btmrvl_main.c
@@ -117,8 +117,8 @@ int btmrvl_process_event(struct btmrvl_private *priv, struct sk_buff *skb)
117 (event->data[2] == MODULE_ALREADY_UP)) ? 117 (event->data[2] == MODULE_ALREADY_UP)) ?
118 "Bring-up succeed" : "Bring-up failed"); 118 "Bring-up succeed" : "Bring-up failed");
119 119
120 if (event->length > 3) 120 if (event->length > 3 && event->data[3])
121 priv->btmrvl_dev.dev_type = event->data[3]; 121 priv->btmrvl_dev.dev_type = HCI_AMP;
122 else 122 else
123 priv->btmrvl_dev.dev_type = HCI_BREDR; 123 priv->btmrvl_dev.dev_type = HCI_BREDR;
124 124
diff --git a/drivers/bluetooth/btsdio.c b/drivers/bluetooth/btsdio.c
index 76e5127884f0..792e32d29a1d 100644
--- a/drivers/bluetooth/btsdio.c
+++ b/drivers/bluetooth/btsdio.c
@@ -46,6 +46,9 @@ static const struct sdio_device_id btsdio_table[] = {
46 /* Generic Bluetooth Type-B SDIO device */ 46 /* Generic Bluetooth Type-B SDIO device */
47 { SDIO_DEVICE_CLASS(SDIO_CLASS_BT_B) }, 47 { SDIO_DEVICE_CLASS(SDIO_CLASS_BT_B) },
48 48
49 /* Generic Bluetooth AMP controller */
50 { SDIO_DEVICE_CLASS(SDIO_CLASS_BT_AMP) },
51
49 { } /* Terminating entry */ 52 { } /* Terminating entry */
50}; 53};
51 54
@@ -329,6 +332,11 @@ static int btsdio_probe(struct sdio_func *func,
329 hdev->bus = HCI_SDIO; 332 hdev->bus = HCI_SDIO;
330 hdev->driver_data = data; 333 hdev->driver_data = data;
331 334
335 if (id->class == SDIO_CLASS_BT_AMP)
336 hdev->dev_type = HCI_AMP;
337 else
338 hdev->dev_type = HCI_BREDR;
339
332 data->hdev = hdev; 340 data->hdev = hdev;
333 341
334 SET_HCIDEV_DEV(hdev, &func->dev); 342 SET_HCIDEV_DEV(hdev, &func->dev);
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index d22ce3cc611e..d120a5c1c093 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -59,9 +59,15 @@ static struct usb_device_id btusb_table[] = {
59 /* Generic Bluetooth USB device */ 59 /* Generic Bluetooth USB device */
60 { USB_DEVICE_INFO(0xe0, 0x01, 0x01) }, 60 { USB_DEVICE_INFO(0xe0, 0x01, 0x01) },
61 61
62 /* Apple MacBookPro 7,1 */
63 { USB_DEVICE(0x05ac, 0x8213) },
64
62 /* Apple iMac11,1 */ 65 /* Apple iMac11,1 */
63 { USB_DEVICE(0x05ac, 0x8215) }, 66 { USB_DEVICE(0x05ac, 0x8215) },
64 67
68 /* Apple MacBookPro6,2 */
69 { USB_DEVICE(0x05ac, 0x8218) },
70
65 /* AVM BlueFRITZ! USB v2.0 */ 71 /* AVM BlueFRITZ! USB v2.0 */
66 { USB_DEVICE(0x057c, 0x3800) }, 72 { USB_DEVICE(0x057c, 0x3800) },
67 73
diff --git a/drivers/bluetooth/hci_ldisc.c b/drivers/bluetooth/hci_ldisc.c
index 998833d93c13..74cb6f3e86c5 100644
--- a/drivers/bluetooth/hci_ldisc.c
+++ b/drivers/bluetooth/hci_ldisc.c
@@ -101,7 +101,7 @@ static inline void hci_uart_tx_complete(struct hci_uart *hu, int pkt_type)
101 break; 101 break;
102 102
103 case HCI_SCODATA_PKT: 103 case HCI_SCODATA_PKT:
104 hdev->stat.cmd_tx++; 104 hdev->stat.sco_tx++;
105 break; 105 break;
106 } 106 }
107} 107}
diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c
index eab58db5f91c..cd18493c9527 100644
--- a/drivers/char/agp/intel-agp.c
+++ b/drivers/char/agp/intel-agp.c
@@ -806,6 +806,8 @@ static const struct intel_driver_description {
806 "G45/G43", NULL, &intel_i965_driver }, 806 "G45/G43", NULL, &intel_i965_driver },
807 { PCI_DEVICE_ID_INTEL_B43_HB, PCI_DEVICE_ID_INTEL_B43_IG, 807 { PCI_DEVICE_ID_INTEL_B43_HB, PCI_DEVICE_ID_INTEL_B43_IG,
808 "B43", NULL, &intel_i965_driver }, 808 "B43", NULL, &intel_i965_driver },
809 { PCI_DEVICE_ID_INTEL_B43_1_HB, PCI_DEVICE_ID_INTEL_B43_1_IG,
810 "B43", NULL, &intel_i965_driver },
809 { PCI_DEVICE_ID_INTEL_G41_HB, PCI_DEVICE_ID_INTEL_G41_IG, 811 { PCI_DEVICE_ID_INTEL_G41_HB, PCI_DEVICE_ID_INTEL_G41_IG,
810 "G41", NULL, &intel_i965_driver }, 812 "G41", NULL, &intel_i965_driver },
811 { PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG, 813 { PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG,
diff --git a/drivers/char/agp/intel-agp.h b/drivers/char/agp/intel-agp.h
index ee189c74d345..d09b1ab7e8ab 100644
--- a/drivers/char/agp/intel-agp.h
+++ b/drivers/char/agp/intel-agp.h
@@ -186,6 +186,8 @@
186#define PCI_DEVICE_ID_INTEL_Q33_IG 0x29D2 186#define PCI_DEVICE_ID_INTEL_Q33_IG 0x29D2
187#define PCI_DEVICE_ID_INTEL_B43_HB 0x2E40 187#define PCI_DEVICE_ID_INTEL_B43_HB 0x2E40
188#define PCI_DEVICE_ID_INTEL_B43_IG 0x2E42 188#define PCI_DEVICE_ID_INTEL_B43_IG 0x2E42
189#define PCI_DEVICE_ID_INTEL_B43_1_HB 0x2E90
190#define PCI_DEVICE_ID_INTEL_B43_1_IG 0x2E92
189#define PCI_DEVICE_ID_INTEL_GM45_HB 0x2A40 191#define PCI_DEVICE_ID_INTEL_GM45_HB 0x2A40
190#define PCI_DEVICE_ID_INTEL_GM45_IG 0x2A42 192#define PCI_DEVICE_ID_INTEL_GM45_IG 0x2A42
191#define PCI_DEVICE_ID_INTEL_EAGLELAKE_HB 0x2E00 193#define PCI_DEVICE_ID_INTEL_EAGLELAKE_HB 0x2E00
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
index 3822b4f49c84..7bd7c45b53ef 100644
--- a/drivers/char/ipmi/ipmi_si_intf.c
+++ b/drivers/char/ipmi/ipmi_si_intf.c
@@ -305,6 +305,9 @@ static int num_force_kipmid;
305#ifdef CONFIG_PCI 305#ifdef CONFIG_PCI
306static int pci_registered; 306static int pci_registered;
307#endif 307#endif
308#ifdef CONFIG_ACPI
309static int pnp_registered;
310#endif
308#ifdef CONFIG_PPC_OF 311#ifdef CONFIG_PPC_OF
309static int of_registered; 312static int of_registered;
310#endif 313#endif
@@ -2126,7 +2129,7 @@ static int __devinit ipmi_pnp_probe(struct pnp_dev *dev,
2126{ 2129{
2127 struct acpi_device *acpi_dev; 2130 struct acpi_device *acpi_dev;
2128 struct smi_info *info; 2131 struct smi_info *info;
2129 struct resource *res; 2132 struct resource *res, *res_second;
2130 acpi_handle handle; 2133 acpi_handle handle;
2131 acpi_status status; 2134 acpi_status status;
2132 unsigned long long tmp; 2135 unsigned long long tmp;
@@ -2182,13 +2185,13 @@ static int __devinit ipmi_pnp_probe(struct pnp_dev *dev,
2182 info->io.addr_data = res->start; 2185 info->io.addr_data = res->start;
2183 2186
2184 info->io.regspacing = DEFAULT_REGSPACING; 2187 info->io.regspacing = DEFAULT_REGSPACING;
2185 res = pnp_get_resource(dev, 2188 res_second = pnp_get_resource(dev,
2186 (info->io.addr_type == IPMI_IO_ADDR_SPACE) ? 2189 (info->io.addr_type == IPMI_IO_ADDR_SPACE) ?
2187 IORESOURCE_IO : IORESOURCE_MEM, 2190 IORESOURCE_IO : IORESOURCE_MEM,
2188 1); 2191 1);
2189 if (res) { 2192 if (res_second) {
2190 if (res->start > info->io.addr_data) 2193 if (res_second->start > info->io.addr_data)
2191 info->io.regspacing = res->start - info->io.addr_data; 2194 info->io.regspacing = res_second->start - info->io.addr_data;
2192 } 2195 }
2193 info->io.regsize = DEFAULT_REGSPACING; 2196 info->io.regsize = DEFAULT_REGSPACING;
2194 info->io.regshift = 0; 2197 info->io.regshift = 0;
@@ -3359,6 +3362,7 @@ static __devinit int init_ipmi_si(void)
3359 3362
3360#ifdef CONFIG_ACPI 3363#ifdef CONFIG_ACPI
3361 pnp_register_driver(&ipmi_pnp_driver); 3364 pnp_register_driver(&ipmi_pnp_driver);
3365 pnp_registered = 1;
3362#endif 3366#endif
3363 3367
3364#ifdef CONFIG_DMI 3368#ifdef CONFIG_DMI
@@ -3526,7 +3530,8 @@ static __exit void cleanup_ipmi_si(void)
3526 pci_unregister_driver(&ipmi_pci_driver); 3530 pci_unregister_driver(&ipmi_pci_driver);
3527#endif 3531#endif
3528#ifdef CONFIG_ACPI 3532#ifdef CONFIG_ACPI
3529 pnp_unregister_driver(&ipmi_pnp_driver); 3533 if (pnp_registered)
3534 pnp_unregister_driver(&ipmi_pnp_driver);
3530#endif 3535#endif
3531 3536
3532#ifdef CONFIG_PPC_OF 3537#ifdef CONFIG_PPC_OF
diff --git a/drivers/char/mem.c b/drivers/char/mem.c
index a398ecdbd758..1f528fad3516 100644
--- a/drivers/char/mem.c
+++ b/drivers/char/mem.c
@@ -788,10 +788,11 @@ static const struct file_operations zero_fops = {
788/* 788/*
789 * capabilities for /dev/zero 789 * capabilities for /dev/zero
790 * - permits private mappings, "copies" are taken of the source of zeros 790 * - permits private mappings, "copies" are taken of the source of zeros
791 * - no writeback happens
791 */ 792 */
792static struct backing_dev_info zero_bdi = { 793static struct backing_dev_info zero_bdi = {
793 .name = "char/mem", 794 .name = "char/mem",
794 .capabilities = BDI_CAP_MAP_COPY, 795 .capabilities = BDI_CAP_MAP_COPY | BDI_CAP_NO_ACCT_AND_WRITEBACK,
795}; 796};
796 797
797static const struct file_operations full_fops = { 798static const struct file_operations full_fops = {
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
index 942a9826bd23..c810481a5bc2 100644
--- a/drivers/char/virtio_console.c
+++ b/drivers/char/virtio_console.c
@@ -596,6 +596,10 @@ static ssize_t port_fops_write(struct file *filp, const char __user *ubuf,
596 ssize_t ret; 596 ssize_t ret;
597 bool nonblock; 597 bool nonblock;
598 598
599 /* Userspace could be out to fool us */
600 if (!count)
601 return 0;
602
599 port = filp->private_data; 603 port = filp->private_data;
600 604
601 nonblock = filp->f_flags & O_NONBLOCK; 605 nonblock = filp->f_flags & O_NONBLOCK;
@@ -642,7 +646,7 @@ static unsigned int port_fops_poll(struct file *filp, poll_table *wait)
642 poll_wait(filp, &port->waitqueue, wait); 646 poll_wait(filp, &port->waitqueue, wait);
643 647
644 ret = 0; 648 ret = 0;
645 if (port->inbuf) 649 if (!will_read_block(port))
646 ret |= POLLIN | POLLRDNORM; 650 ret |= POLLIN | POLLRDNORM;
647 if (!will_write_block(port)) 651 if (!will_write_block(port))
648 ret |= POLLOUT; 652 ret |= POLLOUT;
diff --git a/drivers/char/vt_ioctl.c b/drivers/char/vt_ioctl.c
index 2bbeaaea46e9..38df8c19e74c 100644
--- a/drivers/char/vt_ioctl.c
+++ b/drivers/char/vt_ioctl.c
@@ -533,11 +533,14 @@ int vt_ioctl(struct tty_struct *tty, struct file * file,
533 case KIOCSOUND: 533 case KIOCSOUND:
534 if (!perm) 534 if (!perm)
535 goto eperm; 535 goto eperm;
536 /* FIXME: This is an old broken API but we need to keep it 536 /*
537 supported and somehow separate the historic advertised 537 * The use of PIT_TICK_RATE is historic, it used to be
538 tick rate from any real one */ 538 * the platform-dependent CLOCK_TICK_RATE between 2.6.12
539 * and 2.6.36, which was a minor but unfortunate ABI
540 * change.
541 */
539 if (arg) 542 if (arg)
540 arg = CLOCK_TICK_RATE / arg; 543 arg = PIT_TICK_RATE / arg;
541 kd_mksound(arg, 0); 544 kd_mksound(arg, 0);
542 break; 545 break;
543 546
@@ -553,11 +556,8 @@ int vt_ioctl(struct tty_struct *tty, struct file * file,
553 */ 556 */
554 ticks = HZ * ((arg >> 16) & 0xffff) / 1000; 557 ticks = HZ * ((arg >> 16) & 0xffff) / 1000;
555 count = ticks ? (arg & 0xffff) : 0; 558 count = ticks ? (arg & 0xffff) : 0;
556 /* FIXME: This is an old broken API but we need to keep it
557 supported and somehow separate the historic advertised
558 tick rate from any real one */
559 if (count) 559 if (count)
560 count = CLOCK_TICK_RATE / count; 560 count = PIT_TICK_RATE / count;
561 kd_mksound(count, ticks); 561 kd_mksound(count, ticks);
562 break; 562 break;
563 } 563 }
diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c
index c2408bbe9c2e..f508690eb958 100644
--- a/drivers/cpuidle/governors/menu.c
+++ b/drivers/cpuidle/governors/menu.c
@@ -80,7 +80,7 @@
80 * Limiting Performance Impact 80 * Limiting Performance Impact
81 * --------------------------- 81 * ---------------------------
82 * C states, especially those with large exit latencies, can have a real 82 * C states, especially those with large exit latencies, can have a real
83 * noticable impact on workloads, which is not acceptable for most sysadmins, 83 * noticeable impact on workloads, which is not acceptable for most sysadmins,
84 * and in addition, less performance has a power price of its own. 84 * and in addition, less performance has a power price of its own.
85 * 85 *
86 * As a general rule of thumb, menu assumes that the following heuristic 86 * As a general rule of thumb, menu assumes that the following heuristic
diff --git a/drivers/dca/dca-core.c b/drivers/dca/dca-core.c
index 8661c84a105d..b98c67664ae7 100644
--- a/drivers/dca/dca-core.c
+++ b/drivers/dca/dca-core.c
@@ -39,6 +39,10 @@ static DEFINE_SPINLOCK(dca_lock);
39 39
40static LIST_HEAD(dca_domains); 40static LIST_HEAD(dca_domains);
41 41
42static BLOCKING_NOTIFIER_HEAD(dca_provider_chain);
43
44static int dca_providers_blocked;
45
42static struct pci_bus *dca_pci_rc_from_dev(struct device *dev) 46static struct pci_bus *dca_pci_rc_from_dev(struct device *dev)
43{ 47{
44 struct pci_dev *pdev = to_pci_dev(dev); 48 struct pci_dev *pdev = to_pci_dev(dev);
@@ -70,6 +74,60 @@ static void dca_free_domain(struct dca_domain *domain)
70 kfree(domain); 74 kfree(domain);
71} 75}
72 76
77static int dca_provider_ioat_ver_3_0(struct device *dev)
78{
79 struct pci_dev *pdev = to_pci_dev(dev);
80
81 return ((pdev->vendor == PCI_VENDOR_ID_INTEL) &&
82 ((pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG0) ||
83 (pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG1) ||
84 (pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG2) ||
85 (pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG3) ||
86 (pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG4) ||
87 (pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG5) ||
88 (pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG6) ||
89 (pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG7)));
90}
91
92static void unregister_dca_providers(void)
93{
94 struct dca_provider *dca, *_dca;
95 struct list_head unregistered_providers;
96 struct dca_domain *domain;
97 unsigned long flags;
98
99 blocking_notifier_call_chain(&dca_provider_chain,
100 DCA_PROVIDER_REMOVE, NULL);
101
102 INIT_LIST_HEAD(&unregistered_providers);
103
104 spin_lock_irqsave(&dca_lock, flags);
105
106 if (list_empty(&dca_domains)) {
107 spin_unlock_irqrestore(&dca_lock, flags);
108 return;
109 }
110
111 /* at this point only one domain in the list is expected */
112 domain = list_first_entry(&dca_domains, struct dca_domain, node);
113 if (!domain)
114 return;
115
116 list_for_each_entry_safe(dca, _dca, &domain->dca_providers, node) {
117 list_del(&dca->node);
118 list_add(&dca->node, &unregistered_providers);
119 }
120
121 dca_free_domain(domain);
122
123 spin_unlock_irqrestore(&dca_lock, flags);
124
125 list_for_each_entry_safe(dca, _dca, &unregistered_providers, node) {
126 dca_sysfs_remove_provider(dca);
127 list_del(&dca->node);
128 }
129}
130
73static struct dca_domain *dca_find_domain(struct pci_bus *rc) 131static struct dca_domain *dca_find_domain(struct pci_bus *rc)
74{ 132{
75 struct dca_domain *domain; 133 struct dca_domain *domain;
@@ -90,9 +148,13 @@ static struct dca_domain *dca_get_domain(struct device *dev)
90 domain = dca_find_domain(rc); 148 domain = dca_find_domain(rc);
91 149
92 if (!domain) { 150 if (!domain) {
93 domain = dca_allocate_domain(rc); 151 if (dca_provider_ioat_ver_3_0(dev) && !list_empty(&dca_domains)) {
94 if (domain) 152 dca_providers_blocked = 1;
95 list_add(&domain->node, &dca_domains); 153 } else {
154 domain = dca_allocate_domain(rc);
155 if (domain)
156 list_add(&domain->node, &dca_domains);
157 }
96 } 158 }
97 159
98 return domain; 160 return domain;
@@ -293,8 +355,6 @@ void free_dca_provider(struct dca_provider *dca)
293} 355}
294EXPORT_SYMBOL_GPL(free_dca_provider); 356EXPORT_SYMBOL_GPL(free_dca_provider);
295 357
296static BLOCKING_NOTIFIER_HEAD(dca_provider_chain);
297
298/** 358/**
299 * register_dca_provider - register a dca provider 359 * register_dca_provider - register a dca provider
300 * @dca - struct created by alloc_dca_provider() 360 * @dca - struct created by alloc_dca_provider()
@@ -306,6 +366,13 @@ int register_dca_provider(struct dca_provider *dca, struct device *dev)
306 unsigned long flags; 366 unsigned long flags;
307 struct dca_domain *domain; 367 struct dca_domain *domain;
308 368
369 spin_lock_irqsave(&dca_lock, flags);
370 if (dca_providers_blocked) {
371 spin_unlock_irqrestore(&dca_lock, flags);
372 return -ENODEV;
373 }
374 spin_unlock_irqrestore(&dca_lock, flags);
375
309 err = dca_sysfs_add_provider(dca, dev); 376 err = dca_sysfs_add_provider(dca, dev);
310 if (err) 377 if (err)
311 return err; 378 return err;
@@ -313,7 +380,13 @@ int register_dca_provider(struct dca_provider *dca, struct device *dev)
313 spin_lock_irqsave(&dca_lock, flags); 380 spin_lock_irqsave(&dca_lock, flags);
314 domain = dca_get_domain(dev); 381 domain = dca_get_domain(dev);
315 if (!domain) { 382 if (!domain) {
316 spin_unlock_irqrestore(&dca_lock, flags); 383 if (dca_providers_blocked) {
384 spin_unlock_irqrestore(&dca_lock, flags);
385 dca_sysfs_remove_provider(dca);
386 unregister_dca_providers();
387 } else {
388 spin_unlock_irqrestore(&dca_lock, flags);
389 }
317 return -ENODEV; 390 return -ENODEV;
318 } 391 }
319 list_add(&dca->node, &domain->dca_providers); 392 list_add(&dca->node, &domain->dca_providers);
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c
index 86c5ae9fde34..411d5bf50fc4 100644
--- a/drivers/dma/mv_xor.c
+++ b/drivers/dma/mv_xor.c
@@ -162,7 +162,7 @@ static int mv_is_err_intr(u32 intr_cause)
162 162
163static void mv_xor_device_clear_eoc_cause(struct mv_xor_chan *chan) 163static void mv_xor_device_clear_eoc_cause(struct mv_xor_chan *chan)
164{ 164{
165 u32 val = (1 << (1 + (chan->idx * 16))); 165 u32 val = ~(1 << (chan->idx * 16));
166 dev_dbg(chan->device->common.dev, "%s, val 0x%08x\n", __func__, val); 166 dev_dbg(chan->device->common.dev, "%s, val 0x%08x\n", __func__, val);
167 __raw_writel(val, XOR_INTR_CAUSE(chan)); 167 __raw_writel(val, XOR_INTR_CAUSE(chan));
168} 168}
diff --git a/drivers/dma/shdma.c b/drivers/dma/shdma.c
index fb64cf36ba61..eb6b54dbb806 100644
--- a/drivers/dma/shdma.c
+++ b/drivers/dma/shdma.c
@@ -580,7 +580,6 @@ static struct dma_async_tx_descriptor *sh_dmae_prep_slave_sg(
580 580
581 sh_chan = to_sh_chan(chan); 581 sh_chan = to_sh_chan(chan);
582 param = chan->private; 582 param = chan->private;
583 slave_addr = param->config->addr;
584 583
585 /* Someone calling slave DMA on a public channel? */ 584 /* Someone calling slave DMA on a public channel? */
586 if (!param || !sg_len) { 585 if (!param || !sg_len) {
@@ -589,6 +588,8 @@ static struct dma_async_tx_descriptor *sh_dmae_prep_slave_sg(
589 return NULL; 588 return NULL;
590 } 589 }
591 590
591 slave_addr = param->config->addr;
592
592 /* 593 /*
593 * if (param != NULL), this is a successfully requested slave channel, 594 * if (param != NULL), this is a successfully requested slave channel,
594 * therefore param->config != NULL too. 595 * therefore param->config != NULL too.
diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c
index 3630308e7b81..6b21e25f7a84 100644
--- a/drivers/edac/edac_mc.c
+++ b/drivers/edac/edac_mc.c
@@ -339,6 +339,9 @@ static void edac_mc_workq_teardown(struct mem_ctl_info *mci)
339{ 339{
340 int status; 340 int status;
341 341
342 if (mci->op_state != OP_RUNNING_POLL)
343 return;
344
342 status = cancel_delayed_work(&mci->work); 345 status = cancel_delayed_work(&mci->work);
343 if (status == 0) { 346 if (status == 0) {
344 debugf0("%s() not canceled, flush the queue\n", 347 debugf0("%s() not canceled, flush the queue\n",
diff --git a/drivers/edac/i7core_edac.c b/drivers/edac/i7core_edac.c
index e0187d16dd7c..0fd5b85a0f75 100644
--- a/drivers/edac/i7core_edac.c
+++ b/drivers/edac/i7core_edac.c
@@ -1140,6 +1140,7 @@ static struct mcidev_sysfs_attribute i7core_udimm_counters_attrs[] = {
1140 ATTR_COUNTER(0), 1140 ATTR_COUNTER(0),
1141 ATTR_COUNTER(1), 1141 ATTR_COUNTER(1),
1142 ATTR_COUNTER(2), 1142 ATTR_COUNTER(2),
1143 { .attr = { .name = NULL } }
1143}; 1144};
1144 1145
1145static struct mcidev_sysfs_group i7core_udimm_counters = { 1146static struct mcidev_sysfs_group i7core_udimm_counters = {
diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c
index be29b0bb2471..1b05896648bc 100644
--- a/drivers/firewire/ohci.c
+++ b/drivers/firewire/ohci.c
@@ -263,6 +263,7 @@ static const struct {
263 {PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB38X_FW, QUIRK_NO_MSI}, 263 {PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB38X_FW, QUIRK_NO_MSI},
264 {PCI_VENDOR_ID_NEC, PCI_ANY_ID, QUIRK_CYCLE_TIMER}, 264 {PCI_VENDOR_ID_NEC, PCI_ANY_ID, QUIRK_CYCLE_TIMER},
265 {PCI_VENDOR_ID_VIA, PCI_ANY_ID, QUIRK_CYCLE_TIMER}, 265 {PCI_VENDOR_ID_VIA, PCI_ANY_ID, QUIRK_CYCLE_TIMER},
266 {PCI_VENDOR_ID_RICOH, PCI_ANY_ID, QUIRK_CYCLE_TIMER},
266 {PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_FW, QUIRK_BE_HEADERS}, 267 {PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_FW, QUIRK_BE_HEADERS},
267}; 268};
268 269
diff --git a/drivers/gpio/sx150x.c b/drivers/gpio/sx150x.c
index b42f42ca70c3..823559ab0e24 100644
--- a/drivers/gpio/sx150x.c
+++ b/drivers/gpio/sx150x.c
@@ -459,17 +459,33 @@ static int sx150x_init_io(struct sx150x_chip *chip, u8 base, u16 cfg)
459 return err; 459 return err;
460} 460}
461 461
462static int sx150x_init_hw(struct sx150x_chip *chip, 462static int sx150x_reset(struct sx150x_chip *chip)
463 struct sx150x_platform_data *pdata)
464{ 463{
465 int err = 0; 464 int err;
466 465
467 err = i2c_smbus_write_word_data(chip->client, 466 err = i2c_smbus_write_byte_data(chip->client,
468 chip->dev_cfg->reg_reset, 467 chip->dev_cfg->reg_reset,
469 0x3412); 468 0x12);
470 if (err < 0) 469 if (err < 0)
471 return err; 470 return err;
472 471
472 err = i2c_smbus_write_byte_data(chip->client,
473 chip->dev_cfg->reg_reset,
474 0x34);
475 return err;
476}
477
478static int sx150x_init_hw(struct sx150x_chip *chip,
479 struct sx150x_platform_data *pdata)
480{
481 int err = 0;
482
483 if (pdata->reset_during_probe) {
484 err = sx150x_reset(chip);
485 if (err < 0)
486 return err;
487 }
488
473 err = sx150x_i2c_write(chip->client, 489 err = sx150x_i2c_write(chip->client,
474 chip->dev_cfg->reg_misc, 490 chip->dev_cfg->reg_misc,
475 0x01); 491 0x01);
diff --git a/drivers/gpu/drm/drm_buffer.c b/drivers/gpu/drm/drm_buffer.c
index 55d03ed05000..529a0dbe9fc6 100644
--- a/drivers/gpu/drm/drm_buffer.c
+++ b/drivers/gpu/drm/drm_buffer.c
@@ -98,8 +98,8 @@ EXPORT_SYMBOL(drm_buffer_alloc);
98 * user_data: A pointer the data that is copied to the buffer. 98 * user_data: A pointer the data that is copied to the buffer.
99 * size: The Number of bytes to copy. 99 * size: The Number of bytes to copy.
100 */ 100 */
101extern int drm_buffer_copy_from_user(struct drm_buffer *buf, 101int drm_buffer_copy_from_user(struct drm_buffer *buf,
102 void __user *user_data, int size) 102 void __user *user_data, int size)
103{ 103{
104 int nr_pages = size / PAGE_SIZE + 1; 104 int nr_pages = size / PAGE_SIZE + 1;
105 int idx; 105 int idx;
@@ -163,7 +163,7 @@ void *drm_buffer_read_object(struct drm_buffer *buf,
163{ 163{
164 int idx = drm_buffer_index(buf); 164 int idx = drm_buffer_index(buf);
165 int page = drm_buffer_page(buf); 165 int page = drm_buffer_page(buf);
166 void *obj = 0; 166 void *obj = NULL;
167 167
168 if (idx + objsize <= PAGE_SIZE) { 168 if (idx + objsize <= PAGE_SIZE) {
169 obj = &buf->data[page][idx]; 169 obj = &buf->data[page][idx];
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index d2ab01e90a96..dcbeb98f195a 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -103,8 +103,8 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector,
103 if (connector->funcs->force) 103 if (connector->funcs->force)
104 connector->funcs->force(connector); 104 connector->funcs->force(connector);
105 } else { 105 } else {
106 connector->status = connector->funcs->detect(connector); 106 connector->status = connector->funcs->detect(connector, true);
107 drm_helper_hpd_irq_event(dev); 107 drm_kms_helper_poll_enable(dev);
108 } 108 }
109 109
110 if (connector->status == connector_status_disconnected) { 110 if (connector->status == connector_status_disconnected) {
@@ -637,13 +637,13 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
637 mode_changed = true; 637 mode_changed = true;
638 638
639 if (mode_changed) { 639 if (mode_changed) {
640 old_fb = set->crtc->fb;
641 set->crtc->fb = set->fb;
642 set->crtc->enabled = (set->mode != NULL); 640 set->crtc->enabled = (set->mode != NULL);
643 if (set->mode != NULL) { 641 if (set->mode != NULL) {
644 DRM_DEBUG_KMS("attempting to set mode from" 642 DRM_DEBUG_KMS("attempting to set mode from"
645 " userspace\n"); 643 " userspace\n");
646 drm_mode_debug_printmodeline(set->mode); 644 drm_mode_debug_printmodeline(set->mode);
645 old_fb = set->crtc->fb;
646 set->crtc->fb = set->fb;
647 if (!drm_crtc_helper_set_mode(set->crtc, set->mode, 647 if (!drm_crtc_helper_set_mode(set->crtc, set->mode,
648 set->x, set->y, 648 set->x, set->y,
649 old_fb)) { 649 old_fb)) {
@@ -866,7 +866,7 @@ static void output_poll_execute(struct work_struct *work)
866 !(connector->polled & DRM_CONNECTOR_POLL_HPD)) 866 !(connector->polled & DRM_CONNECTOR_POLL_HPD))
867 continue; 867 continue;
868 868
869 status = connector->funcs->detect(connector); 869 status = connector->funcs->detect(connector, false);
870 if (old_status != status) 870 if (old_status != status)
871 changed = true; 871 changed = true;
872 } 872 }
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index bf92d07510df..5663d2719063 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -148,7 +148,7 @@ int drm_gem_object_init(struct drm_device *dev,
148 return -ENOMEM; 148 return -ENOMEM;
149 149
150 kref_init(&obj->refcount); 150 kref_init(&obj->refcount);
151 kref_init(&obj->handlecount); 151 atomic_set(&obj->handle_count, 0);
152 obj->size = size; 152 obj->size = size;
153 153
154 atomic_inc(&dev->object_count); 154 atomic_inc(&dev->object_count);
@@ -462,28 +462,6 @@ drm_gem_object_free(struct kref *kref)
462} 462}
463EXPORT_SYMBOL(drm_gem_object_free); 463EXPORT_SYMBOL(drm_gem_object_free);
464 464
465/**
466 * Called after the last reference to the object has been lost.
467 * Must be called without holding struct_mutex
468 *
469 * Frees the object
470 */
471void
472drm_gem_object_free_unlocked(struct kref *kref)
473{
474 struct drm_gem_object *obj = (struct drm_gem_object *) kref;
475 struct drm_device *dev = obj->dev;
476
477 if (dev->driver->gem_free_object_unlocked != NULL)
478 dev->driver->gem_free_object_unlocked(obj);
479 else if (dev->driver->gem_free_object != NULL) {
480 mutex_lock(&dev->struct_mutex);
481 dev->driver->gem_free_object(obj);
482 mutex_unlock(&dev->struct_mutex);
483 }
484}
485EXPORT_SYMBOL(drm_gem_object_free_unlocked);
486
487static void drm_gem_object_ref_bug(struct kref *list_kref) 465static void drm_gem_object_ref_bug(struct kref *list_kref)
488{ 466{
489 BUG(); 467 BUG();
@@ -496,12 +474,8 @@ static void drm_gem_object_ref_bug(struct kref *list_kref)
496 * called before drm_gem_object_free or we'll be touching 474 * called before drm_gem_object_free or we'll be touching
497 * freed memory 475 * freed memory
498 */ 476 */
499void 477void drm_gem_object_handle_free(struct drm_gem_object *obj)
500drm_gem_object_handle_free(struct kref *kref)
501{ 478{
502 struct drm_gem_object *obj = container_of(kref,
503 struct drm_gem_object,
504 handlecount);
505 struct drm_device *dev = obj->dev; 479 struct drm_device *dev = obj->dev;
506 480
507 /* Remove any name for this object */ 481 /* Remove any name for this object */
@@ -528,6 +502,10 @@ void drm_gem_vm_open(struct vm_area_struct *vma)
528 struct drm_gem_object *obj = vma->vm_private_data; 502 struct drm_gem_object *obj = vma->vm_private_data;
529 503
530 drm_gem_object_reference(obj); 504 drm_gem_object_reference(obj);
505
506 mutex_lock(&obj->dev->struct_mutex);
507 drm_vm_open_locked(vma);
508 mutex_unlock(&obj->dev->struct_mutex);
531} 509}
532EXPORT_SYMBOL(drm_gem_vm_open); 510EXPORT_SYMBOL(drm_gem_vm_open);
533 511
@@ -535,7 +513,10 @@ void drm_gem_vm_close(struct vm_area_struct *vma)
535{ 513{
536 struct drm_gem_object *obj = vma->vm_private_data; 514 struct drm_gem_object *obj = vma->vm_private_data;
537 515
538 drm_gem_object_unreference_unlocked(obj); 516 mutex_lock(&obj->dev->struct_mutex);
517 drm_vm_close_locked(vma);
518 drm_gem_object_unreference(obj);
519 mutex_unlock(&obj->dev->struct_mutex);
539} 520}
540EXPORT_SYMBOL(drm_gem_vm_close); 521EXPORT_SYMBOL(drm_gem_vm_close);
541 522
diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c
index 2ef2c7827243..974e970ce3f8 100644
--- a/drivers/gpu/drm/drm_info.c
+++ b/drivers/gpu/drm/drm_info.c
@@ -255,7 +255,7 @@ int drm_gem_one_name_info(int id, void *ptr, void *data)
255 255
256 seq_printf(m, "%6d %8zd %7d %8d\n", 256 seq_printf(m, "%6d %8zd %7d %8d\n",
257 obj->name, obj->size, 257 obj->name, obj->size,
258 atomic_read(&obj->handlecount.refcount), 258 atomic_read(&obj->handle_count),
259 atomic_read(&obj->refcount.refcount)); 259 atomic_read(&obj->refcount.refcount));
260 return 0; 260 return 0;
261} 261}
diff --git a/drivers/gpu/drm/drm_pci.c b/drivers/gpu/drm/drm_pci.c
index e20f78b542a7..f5bd9e590c80 100644
--- a/drivers/gpu/drm/drm_pci.c
+++ b/drivers/gpu/drm/drm_pci.c
@@ -164,6 +164,8 @@ int drm_get_pci_dev(struct pci_dev *pdev, const struct pci_device_id *ent,
164 dev->hose = pdev->sysdata; 164 dev->hose = pdev->sysdata;
165#endif 165#endif
166 166
167 mutex_lock(&drm_global_mutex);
168
167 if ((ret = drm_fill_in_dev(dev, ent, driver))) { 169 if ((ret = drm_fill_in_dev(dev, ent, driver))) {
168 printk(KERN_ERR "DRM: Fill_in_dev failed.\n"); 170 printk(KERN_ERR "DRM: Fill_in_dev failed.\n");
169 goto err_g2; 171 goto err_g2;
@@ -199,6 +201,7 @@ int drm_get_pci_dev(struct pci_dev *pdev, const struct pci_device_id *ent,
199 driver->name, driver->major, driver->minor, driver->patchlevel, 201 driver->name, driver->major, driver->minor, driver->patchlevel,
200 driver->date, pci_name(pdev), dev->primary->index); 202 driver->date, pci_name(pdev), dev->primary->index);
201 203
204 mutex_unlock(&drm_global_mutex);
202 return 0; 205 return 0;
203 206
204err_g4: 207err_g4:
@@ -210,6 +213,7 @@ err_g2:
210 pci_disable_device(pdev); 213 pci_disable_device(pdev);
211err_g1: 214err_g1:
212 kfree(dev); 215 kfree(dev);
216 mutex_unlock(&drm_global_mutex);
213 return ret; 217 return ret;
214} 218}
215EXPORT_SYMBOL(drm_get_pci_dev); 219EXPORT_SYMBOL(drm_get_pci_dev);
diff --git a/drivers/gpu/drm/drm_platform.c b/drivers/gpu/drm/drm_platform.c
index 460e9a3afa8d..92d1d0fb7b75 100644
--- a/drivers/gpu/drm/drm_platform.c
+++ b/drivers/gpu/drm/drm_platform.c
@@ -53,6 +53,8 @@ int drm_get_platform_dev(struct platform_device *platdev,
53 dev->platformdev = platdev; 53 dev->platformdev = platdev;
54 dev->dev = &platdev->dev; 54 dev->dev = &platdev->dev;
55 55
56 mutex_lock(&drm_global_mutex);
57
56 ret = drm_fill_in_dev(dev, NULL, driver); 58 ret = drm_fill_in_dev(dev, NULL, driver);
57 59
58 if (ret) { 60 if (ret) {
@@ -87,6 +89,8 @@ int drm_get_platform_dev(struct platform_device *platdev,
87 89
88 list_add_tail(&dev->driver_item, &driver->device_list); 90 list_add_tail(&dev->driver_item, &driver->device_list);
89 91
92 mutex_unlock(&drm_global_mutex);
93
90 DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n", 94 DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n",
91 driver->name, driver->major, driver->minor, driver->patchlevel, 95 driver->name, driver->major, driver->minor, driver->patchlevel,
92 driver->date, dev->primary->index); 96 driver->date, dev->primary->index);
@@ -100,6 +104,7 @@ err_g2:
100 drm_put_minor(&dev->control); 104 drm_put_minor(&dev->control);
101err_g1: 105err_g1:
102 kfree(dev); 106 kfree(dev);
107 mutex_unlock(&drm_global_mutex);
103 return ret; 108 return ret;
104} 109}
105EXPORT_SYMBOL(drm_get_platform_dev); 110EXPORT_SYMBOL(drm_get_platform_dev);
diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c
index 86118a742231..85da4c40694c 100644
--- a/drivers/gpu/drm/drm_sysfs.c
+++ b/drivers/gpu/drm/drm_sysfs.c
@@ -159,7 +159,7 @@ static ssize_t status_show(struct device *device,
159 struct drm_connector *connector = to_drm_connector(device); 159 struct drm_connector *connector = to_drm_connector(device);
160 enum drm_connector_status status; 160 enum drm_connector_status status;
161 161
162 status = connector->funcs->detect(connector); 162 status = connector->funcs->detect(connector, true);
163 return snprintf(buf, PAGE_SIZE, "%s\n", 163 return snprintf(buf, PAGE_SIZE, "%s\n",
164 drm_get_connector_status_name(status)); 164 drm_get_connector_status_name(status));
165} 165}
diff --git a/drivers/gpu/drm/drm_vm.c b/drivers/gpu/drm/drm_vm.c
index fda67468e603..5df450683aab 100644
--- a/drivers/gpu/drm/drm_vm.c
+++ b/drivers/gpu/drm/drm_vm.c
@@ -433,15 +433,7 @@ static void drm_vm_open(struct vm_area_struct *vma)
433 mutex_unlock(&dev->struct_mutex); 433 mutex_unlock(&dev->struct_mutex);
434} 434}
435 435
436/** 436void drm_vm_close_locked(struct vm_area_struct *vma)
437 * \c close method for all virtual memory types.
438 *
439 * \param vma virtual memory area.
440 *
441 * Search the \p vma private data entry in drm_device::vmalist, unlink it, and
442 * free it.
443 */
444static void drm_vm_close(struct vm_area_struct *vma)
445{ 437{
446 struct drm_file *priv = vma->vm_file->private_data; 438 struct drm_file *priv = vma->vm_file->private_data;
447 struct drm_device *dev = priv->minor->dev; 439 struct drm_device *dev = priv->minor->dev;
@@ -451,7 +443,6 @@ static void drm_vm_close(struct vm_area_struct *vma)
451 vma->vm_start, vma->vm_end - vma->vm_start); 443 vma->vm_start, vma->vm_end - vma->vm_start);
452 atomic_dec(&dev->vma_count); 444 atomic_dec(&dev->vma_count);
453 445
454 mutex_lock(&dev->struct_mutex);
455 list_for_each_entry_safe(pt, temp, &dev->vmalist, head) { 446 list_for_each_entry_safe(pt, temp, &dev->vmalist, head) {
456 if (pt->vma == vma) { 447 if (pt->vma == vma) {
457 list_del(&pt->head); 448 list_del(&pt->head);
@@ -459,6 +450,23 @@ static void drm_vm_close(struct vm_area_struct *vma)
459 break; 450 break;
460 } 451 }
461 } 452 }
453}
454
455/**
456 * \c close method for all virtual memory types.
457 *
458 * \param vma virtual memory area.
459 *
460 * Search the \p vma private data entry in drm_device::vmalist, unlink it, and
461 * free it.
462 */
463static void drm_vm_close(struct vm_area_struct *vma)
464{
465 struct drm_file *priv = vma->vm_file->private_data;
466 struct drm_device *dev = priv->minor->dev;
467
468 mutex_lock(&dev->struct_mutex);
469 drm_vm_close_locked(vma);
462 mutex_unlock(&dev->struct_mutex); 470 mutex_unlock(&dev->struct_mutex);
463} 471}
464 472
diff --git a/drivers/gpu/drm/i810/i810_dma.c b/drivers/gpu/drm/i810/i810_dma.c
index 61b4caf220fa..fb07e73581e8 100644
--- a/drivers/gpu/drm/i810/i810_dma.c
+++ b/drivers/gpu/drm/i810/i810_dma.c
@@ -116,7 +116,7 @@ static int i810_mmap_buffers(struct file *filp, struct vm_area_struct *vma)
116static const struct file_operations i810_buffer_fops = { 116static const struct file_operations i810_buffer_fops = {
117 .open = drm_open, 117 .open = drm_open,
118 .release = drm_release, 118 .release = drm_release,
119 .unlocked_ioctl = drm_ioctl, 119 .unlocked_ioctl = i810_ioctl,
120 .mmap = i810_mmap_buffers, 120 .mmap = i810_mmap_buffers,
121 .fasync = drm_fasync, 121 .fasync = drm_fasync,
122}; 122};
diff --git a/drivers/gpu/drm/i830/i830_dma.c b/drivers/gpu/drm/i830/i830_dma.c
index 671aa18415ac..cc92c7e6236f 100644
--- a/drivers/gpu/drm/i830/i830_dma.c
+++ b/drivers/gpu/drm/i830/i830_dma.c
@@ -118,7 +118,7 @@ static int i830_mmap_buffers(struct file *filp, struct vm_area_struct *vma)
118static const struct file_operations i830_buffer_fops = { 118static const struct file_operations i830_buffer_fops = {
119 .open = drm_open, 119 .open = drm_open,
120 .release = drm_release, 120 .release = drm_release,
121 .unlocked_ioctl = drm_ioctl, 121 .unlocked_ioctl = i830_ioctl,
122 .mmap = i830_mmap_buffers, 122 .mmap = i830_mmap_buffers,
123 .fasync = drm_fasync, 123 .fasync = drm_fasync,
124}; 124};
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 9d67b4853030..c74e4e8006d4 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -1787,9 +1787,9 @@ unsigned long i915_chipset_val(struct drm_i915_private *dev_priv)
1787 } 1787 }
1788 } 1788 }
1789 1789
1790 div_u64(diff, diff1); 1790 diff = div_u64(diff, diff1);
1791 ret = ((m * diff) + c); 1791 ret = ((m * diff) + c);
1792 div_u64(ret, 10); 1792 ret = div_u64(ret, 10);
1793 1793
1794 dev_priv->last_count1 = total_count; 1794 dev_priv->last_count1 = total_count;
1795 dev_priv->last_time1 = now; 1795 dev_priv->last_time1 = now;
@@ -1858,7 +1858,7 @@ void i915_update_gfx_val(struct drm_i915_private *dev_priv)
1858 1858
1859 /* More magic constants... */ 1859 /* More magic constants... */
1860 diff = diff * 1181; 1860 diff = diff * 1181;
1861 div_u64(diff, diffms * 10); 1861 diff = div_u64(diff, diffms * 10);
1862 dev_priv->gfx_power = diff; 1862 dev_priv->gfx_power = diff;
1863} 1863}
1864 1864
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 216deb579785..6dbe14cc4f74 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -170,6 +170,7 @@ static const struct pci_device_id pciidlist[] = { /* aka */
170 INTEL_VGA_DEVICE(0x2e22, &intel_g45_info), /* G45_G */ 170 INTEL_VGA_DEVICE(0x2e22, &intel_g45_info), /* G45_G */
171 INTEL_VGA_DEVICE(0x2e32, &intel_g45_info), /* G41_G */ 171 INTEL_VGA_DEVICE(0x2e32, &intel_g45_info), /* G41_G */
172 INTEL_VGA_DEVICE(0x2e42, &intel_g45_info), /* B43_G */ 172 INTEL_VGA_DEVICE(0x2e42, &intel_g45_info), /* B43_G */
173 INTEL_VGA_DEVICE(0x2e92, &intel_g45_info), /* B43_G.1 */
173 INTEL_VGA_DEVICE(0xa001, &intel_pineview_info), 174 INTEL_VGA_DEVICE(0xa001, &intel_pineview_info),
174 INTEL_VGA_DEVICE(0xa011, &intel_pineview_info), 175 INTEL_VGA_DEVICE(0xa011, &intel_pineview_info),
175 INTEL_VGA_DEVICE(0x0042, &intel_ironlake_d_info), 176 INTEL_VGA_DEVICE(0x0042, &intel_ironlake_d_info),
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 16fca1d1799a..90b1d6753b9d 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -136,14 +136,12 @@ i915_gem_create_ioctl(struct drm_device *dev, void *data,
136 return -ENOMEM; 136 return -ENOMEM;
137 137
138 ret = drm_gem_handle_create(file_priv, obj, &handle); 138 ret = drm_gem_handle_create(file_priv, obj, &handle);
139 /* drop reference from allocate - handle holds it now */
140 drm_gem_object_unreference_unlocked(obj);
139 if (ret) { 141 if (ret) {
140 drm_gem_object_unreference_unlocked(obj);
141 return ret; 142 return ret;
142 } 143 }
143 144
144 /* Sink the floating reference from kref_init(handlecount) */
145 drm_gem_object_handle_unreference_unlocked(obj);
146
147 args->handle = handle; 145 args->handle = handle;
148 return 0; 146 return 0;
149} 147}
@@ -471,14 +469,17 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
471 return -ENOENT; 469 return -ENOENT;
472 obj_priv = to_intel_bo(obj); 470 obj_priv = to_intel_bo(obj);
473 471
474 /* Bounds check source. 472 /* Bounds check source. */
475 * 473 if (args->offset > obj->size || args->size > obj->size - args->offset) {
476 * XXX: This could use review for overflow issues... 474 ret = -EINVAL;
477 */ 475 goto err;
478 if (args->offset > obj->size || args->size > obj->size || 476 }
479 args->offset + args->size > obj->size) { 477
480 drm_gem_object_unreference_unlocked(obj); 478 if (!access_ok(VERIFY_WRITE,
481 return -EINVAL; 479 (char __user *)(uintptr_t)args->data_ptr,
480 args->size)) {
481 ret = -EFAULT;
482 goto err;
482 } 483 }
483 484
484 if (i915_gem_object_needs_bit17_swizzle(obj)) { 485 if (i915_gem_object_needs_bit17_swizzle(obj)) {
@@ -490,8 +491,8 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
490 file_priv); 491 file_priv);
491 } 492 }
492 493
494err:
493 drm_gem_object_unreference_unlocked(obj); 495 drm_gem_object_unreference_unlocked(obj);
494
495 return ret; 496 return ret;
496} 497}
497 498
@@ -580,8 +581,6 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
580 581
581 user_data = (char __user *) (uintptr_t) args->data_ptr; 582 user_data = (char __user *) (uintptr_t) args->data_ptr;
582 remain = args->size; 583 remain = args->size;
583 if (!access_ok(VERIFY_READ, user_data, remain))
584 return -EFAULT;
585 584
586 585
587 mutex_lock(&dev->struct_mutex); 586 mutex_lock(&dev->struct_mutex);
@@ -934,14 +933,17 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
934 return -ENOENT; 933 return -ENOENT;
935 obj_priv = to_intel_bo(obj); 934 obj_priv = to_intel_bo(obj);
936 935
937 /* Bounds check destination. 936 /* Bounds check destination. */
938 * 937 if (args->offset > obj->size || args->size > obj->size - args->offset) {
939 * XXX: This could use review for overflow issues... 938 ret = -EINVAL;
940 */ 939 goto err;
941 if (args->offset > obj->size || args->size > obj->size || 940 }
942 args->offset + args->size > obj->size) { 941
943 drm_gem_object_unreference_unlocked(obj); 942 if (!access_ok(VERIFY_READ,
944 return -EINVAL; 943 (char __user *)(uintptr_t)args->data_ptr,
944 args->size)) {
945 ret = -EFAULT;
946 goto err;
945 } 947 }
946 948
947 /* We can only do the GTT pwrite on untiled buffers, as otherwise 949 /* We can only do the GTT pwrite on untiled buffers, as otherwise
@@ -975,8 +977,8 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
975 DRM_INFO("pwrite failed %d\n", ret); 977 DRM_INFO("pwrite failed %d\n", ret);
976#endif 978#endif
977 979
980err:
978 drm_gem_object_unreference_unlocked(obj); 981 drm_gem_object_unreference_unlocked(obj);
979
980 return ret; 982 return ret;
981} 983}
982 984
@@ -2351,14 +2353,21 @@ i915_gem_object_get_fence_reg(struct drm_gem_object *obj)
2351 2353
2352 reg->obj = obj; 2354 reg->obj = obj;
2353 2355
2354 if (IS_GEN6(dev)) 2356 switch (INTEL_INFO(dev)->gen) {
2357 case 6:
2355 sandybridge_write_fence_reg(reg); 2358 sandybridge_write_fence_reg(reg);
2356 else if (IS_I965G(dev)) 2359 break;
2360 case 5:
2361 case 4:
2357 i965_write_fence_reg(reg); 2362 i965_write_fence_reg(reg);
2358 else if (IS_I9XX(dev)) 2363 break;
2364 case 3:
2359 i915_write_fence_reg(reg); 2365 i915_write_fence_reg(reg);
2360 else 2366 break;
2367 case 2:
2361 i830_write_fence_reg(reg); 2368 i830_write_fence_reg(reg);
2369 break;
2370 }
2362 2371
2363 trace_i915_gem_object_get_fence(obj, obj_priv->fence_reg, 2372 trace_i915_gem_object_get_fence(obj, obj_priv->fence_reg,
2364 obj_priv->tiling_mode); 2373 obj_priv->tiling_mode);
@@ -2381,22 +2390,26 @@ i915_gem_clear_fence_reg(struct drm_gem_object *obj)
2381 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); 2390 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
2382 struct drm_i915_fence_reg *reg = 2391 struct drm_i915_fence_reg *reg =
2383 &dev_priv->fence_regs[obj_priv->fence_reg]; 2392 &dev_priv->fence_regs[obj_priv->fence_reg];
2393 uint32_t fence_reg;
2384 2394
2385 if (IS_GEN6(dev)) { 2395 switch (INTEL_INFO(dev)->gen) {
2396 case 6:
2386 I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + 2397 I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 +
2387 (obj_priv->fence_reg * 8), 0); 2398 (obj_priv->fence_reg * 8), 0);
2388 } else if (IS_I965G(dev)) { 2399 break;
2400 case 5:
2401 case 4:
2389 I915_WRITE64(FENCE_REG_965_0 + (obj_priv->fence_reg * 8), 0); 2402 I915_WRITE64(FENCE_REG_965_0 + (obj_priv->fence_reg * 8), 0);
2390 } else { 2403 break;
2391 uint32_t fence_reg; 2404 case 3:
2392 2405 if (obj_priv->fence_reg >= 8)
2393 if (obj_priv->fence_reg < 8) 2406 fence_reg = FENCE_REG_945_8 + (obj_priv->fence_reg - 8) * 4;
2394 fence_reg = FENCE_REG_830_0 + obj_priv->fence_reg * 4;
2395 else 2407 else
2396 fence_reg = FENCE_REG_945_8 + (obj_priv->fence_reg - 2408 case 2:
2397 8) * 4; 2409 fence_reg = FENCE_REG_830_0 + obj_priv->fence_reg * 4;
2398 2410
2399 I915_WRITE(fence_reg, 0); 2411 I915_WRITE(fence_reg, 0);
2412 break;
2400 } 2413 }
2401 2414
2402 reg->obj = NULL; 2415 reg->obj = NULL;
@@ -3247,6 +3260,8 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
3247 (int) reloc->offset, 3260 (int) reloc->offset,
3248 reloc->read_domains, 3261 reloc->read_domains,
3249 reloc->write_domain); 3262 reloc->write_domain);
3263 drm_gem_object_unreference(target_obj);
3264 i915_gem_object_unpin(obj);
3250 return -EINVAL; 3265 return -EINVAL;
3251 } 3266 }
3252 if (reloc->write_domain & I915_GEM_DOMAIN_CPU || 3267 if (reloc->write_domain & I915_GEM_DOMAIN_CPU ||
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index 72cae3cccad8..5c428fa3e0b3 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -79,6 +79,7 @@ mark_free(struct drm_i915_gem_object *obj_priv,
79 struct list_head *unwind) 79 struct list_head *unwind)
80{ 80{
81 list_add(&obj_priv->evict_list, unwind); 81 list_add(&obj_priv->evict_list, unwind);
82 drm_gem_object_reference(&obj_priv->base);
82 return drm_mm_scan_add_block(obj_priv->gtt_space); 83 return drm_mm_scan_add_block(obj_priv->gtt_space);
83} 84}
84 85
@@ -92,7 +93,7 @@ i915_gem_evict_something(struct drm_device *dev, int min_size, unsigned alignmen
92{ 93{
93 drm_i915_private_t *dev_priv = dev->dev_private; 94 drm_i915_private_t *dev_priv = dev->dev_private;
94 struct list_head eviction_list, unwind_list; 95 struct list_head eviction_list, unwind_list;
95 struct drm_i915_gem_object *obj_priv, *tmp_obj_priv; 96 struct drm_i915_gem_object *obj_priv;
96 struct list_head *render_iter, *bsd_iter; 97 struct list_head *render_iter, *bsd_iter;
97 int ret = 0; 98 int ret = 0;
98 99
@@ -165,6 +166,7 @@ i915_gem_evict_something(struct drm_device *dev, int min_size, unsigned alignmen
165 list_for_each_entry(obj_priv, &unwind_list, evict_list) { 166 list_for_each_entry(obj_priv, &unwind_list, evict_list) {
166 ret = drm_mm_scan_remove_block(obj_priv->gtt_space); 167 ret = drm_mm_scan_remove_block(obj_priv->gtt_space);
167 BUG_ON(ret); 168 BUG_ON(ret);
169 drm_gem_object_unreference(&obj_priv->base);
168 } 170 }
169 171
170 /* We expect the caller to unpin, evict all and try again, or give up. 172 /* We expect the caller to unpin, evict all and try again, or give up.
@@ -173,36 +175,34 @@ i915_gem_evict_something(struct drm_device *dev, int min_size, unsigned alignmen
173 return -ENOSPC; 175 return -ENOSPC;
174 176
175found: 177found:
178 /* drm_mm doesn't allow any other other operations while
179 * scanning, therefore store to be evicted objects on a
180 * temporary list. */
176 INIT_LIST_HEAD(&eviction_list); 181 INIT_LIST_HEAD(&eviction_list);
177 list_for_each_entry_safe(obj_priv, tmp_obj_priv, 182 while (!list_empty(&unwind_list)) {
178 &unwind_list, evict_list) { 183 obj_priv = list_first_entry(&unwind_list,
184 struct drm_i915_gem_object,
185 evict_list);
179 if (drm_mm_scan_remove_block(obj_priv->gtt_space)) { 186 if (drm_mm_scan_remove_block(obj_priv->gtt_space)) {
180 /* drm_mm doesn't allow any other other operations while
181 * scanning, therefore store to be evicted objects on a
182 * temporary list. */
183 list_move(&obj_priv->evict_list, &eviction_list); 187 list_move(&obj_priv->evict_list, &eviction_list);
188 continue;
184 } 189 }
190 list_del(&obj_priv->evict_list);
191 drm_gem_object_unreference(&obj_priv->base);
185 } 192 }
186 193
187 /* Unbinding will emit any required flushes */ 194 /* Unbinding will emit any required flushes */
188 list_for_each_entry_safe(obj_priv, tmp_obj_priv, 195 while (!list_empty(&eviction_list)) {
189 &eviction_list, evict_list) { 196 obj_priv = list_first_entry(&eviction_list,
190#if WATCH_LRU 197 struct drm_i915_gem_object,
191 DRM_INFO("%s: evicting %p\n", __func__, obj); 198 evict_list);
192#endif 199 if (ret == 0)
193 ret = i915_gem_object_unbind(&obj_priv->base); 200 ret = i915_gem_object_unbind(&obj_priv->base);
194 if (ret) 201 list_del(&obj_priv->evict_list);
195 return ret; 202 drm_gem_object_unreference(&obj_priv->base);
196 } 203 }
197 204
198 /* The just created free hole should be on the top of the free stack 205 return ret;
199 * maintained by drm_mm, so this BUG_ON actually executes in O(1).
200 * Furthermore all accessed data has just recently been used, so it
201 * should be really fast, too. */
202 BUG_ON(!drm_mm_search_free(&dev_priv->mm.gtt_space, min_size,
203 alignment, 0));
204
205 return 0;
206} 206}
207 207
208int 208int
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 59457e83b011..744225ebb4b2 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -1350,17 +1350,25 @@ void i915_hangcheck_elapsed(unsigned long data)
1350 i915_seqno_passed(i915_get_gem_seqno(dev, 1350 i915_seqno_passed(i915_get_gem_seqno(dev,
1351 &dev_priv->render_ring), 1351 &dev_priv->render_ring),
1352 i915_get_tail_request(dev)->seqno)) { 1352 i915_get_tail_request(dev)->seqno)) {
1353 bool missed_wakeup = false;
1354
1353 dev_priv->hangcheck_count = 0; 1355 dev_priv->hangcheck_count = 0;
1354 1356
1355 /* Issue a wake-up to catch stuck h/w. */ 1357 /* Issue a wake-up to catch stuck h/w. */
1356 if (dev_priv->render_ring.waiting_gem_seqno | 1358 if (dev_priv->render_ring.waiting_gem_seqno &&
1357 dev_priv->bsd_ring.waiting_gem_seqno) { 1359 waitqueue_active(&dev_priv->render_ring.irq_queue)) {
1358 DRM_ERROR("Hangcheck timer elapsed... GPU idle, missed IRQ.\n"); 1360 DRM_WAKEUP(&dev_priv->render_ring.irq_queue);
1359 if (dev_priv->render_ring.waiting_gem_seqno) 1361 missed_wakeup = true;
1360 DRM_WAKEUP(&dev_priv->render_ring.irq_queue); 1362 }
1361 if (dev_priv->bsd_ring.waiting_gem_seqno) 1363
1362 DRM_WAKEUP(&dev_priv->bsd_ring.irq_queue); 1364 if (dev_priv->bsd_ring.waiting_gem_seqno &&
1365 waitqueue_active(&dev_priv->bsd_ring.irq_queue)) {
1366 DRM_WAKEUP(&dev_priv->bsd_ring.irq_queue);
1367 missed_wakeup = true;
1363 } 1368 }
1369
1370 if (missed_wakeup)
1371 DRM_ERROR("Hangcheck timer elapsed... GPU idle, missed IRQ.\n");
1364 return; 1372 return;
1365 } 1373 }
1366 1374
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index d094e9129223..4f5e15577e89 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -2206,9 +2206,17 @@
2206#define WM1_LP_SR_EN (1<<31) 2206#define WM1_LP_SR_EN (1<<31)
2207#define WM1_LP_LATENCY_SHIFT 24 2207#define WM1_LP_LATENCY_SHIFT 24
2208#define WM1_LP_LATENCY_MASK (0x7f<<24) 2208#define WM1_LP_LATENCY_MASK (0x7f<<24)
2209#define WM1_LP_FBC_LP1_MASK (0xf<<20)
2210#define WM1_LP_FBC_LP1_SHIFT 20
2209#define WM1_LP_SR_MASK (0x1ff<<8) 2211#define WM1_LP_SR_MASK (0x1ff<<8)
2210#define WM1_LP_SR_SHIFT 8 2212#define WM1_LP_SR_SHIFT 8
2211#define WM1_LP_CURSOR_MASK (0x3f) 2213#define WM1_LP_CURSOR_MASK (0x3f)
2214#define WM2_LP_ILK 0x4510c
2215#define WM2_LP_EN (1<<31)
2216#define WM3_LP_ILK 0x45110
2217#define WM3_LP_EN (1<<31)
2218#define WM1S_LP_ILK 0x45120
2219#define WM1S_LP_EN (1<<31)
2212 2220
2213/* Memory latency timer register */ 2221/* Memory latency timer register */
2214#define MLTR_ILK 0x11222 2222#define MLTR_ILK 0x11222
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index 2c6b98f2440e..31f08581e93a 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -789,16 +789,25 @@ int i915_save_state(struct drm_device *dev)
789 dev_priv->saveSWF2[i] = I915_READ(SWF30 + (i << 2)); 789 dev_priv->saveSWF2[i] = I915_READ(SWF30 + (i << 2));
790 790
791 /* Fences */ 791 /* Fences */
792 if (IS_I965G(dev)) { 792 switch (INTEL_INFO(dev)->gen) {
793 case 6:
794 for (i = 0; i < 16; i++)
795 dev_priv->saveFENCE[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8));
796 break;
797 case 5:
798 case 4:
793 for (i = 0; i < 16; i++) 799 for (i = 0; i < 16; i++)
794 dev_priv->saveFENCE[i] = I915_READ64(FENCE_REG_965_0 + (i * 8)); 800 dev_priv->saveFENCE[i] = I915_READ64(FENCE_REG_965_0 + (i * 8));
795 } else { 801 break;
796 for (i = 0; i < 8; i++) 802 case 3:
797 dev_priv->saveFENCE[i] = I915_READ(FENCE_REG_830_0 + (i * 4));
798
799 if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) 803 if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
800 for (i = 0; i < 8; i++) 804 for (i = 0; i < 8; i++)
801 dev_priv->saveFENCE[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4)); 805 dev_priv->saveFENCE[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4));
806 case 2:
807 for (i = 0; i < 8; i++)
808 dev_priv->saveFENCE[i] = I915_READ(FENCE_REG_830_0 + (i * 4));
809 break;
810
802 } 811 }
803 812
804 return 0; 813 return 0;
@@ -815,15 +824,24 @@ int i915_restore_state(struct drm_device *dev)
815 I915_WRITE(HWS_PGA, dev_priv->saveHWS); 824 I915_WRITE(HWS_PGA, dev_priv->saveHWS);
816 825
817 /* Fences */ 826 /* Fences */
818 if (IS_I965G(dev)) { 827 switch (INTEL_INFO(dev)->gen) {
828 case 6:
829 for (i = 0; i < 16; i++)
830 I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + (i * 8), dev_priv->saveFENCE[i]);
831 break;
832 case 5:
833 case 4:
819 for (i = 0; i < 16; i++) 834 for (i = 0; i < 16; i++)
820 I915_WRITE64(FENCE_REG_965_0 + (i * 8), dev_priv->saveFENCE[i]); 835 I915_WRITE64(FENCE_REG_965_0 + (i * 8), dev_priv->saveFENCE[i]);
821 } else { 836 break;
822 for (i = 0; i < 8; i++) 837 case 3:
823 I915_WRITE(FENCE_REG_830_0 + (i * 4), dev_priv->saveFENCE[i]); 838 case 2:
824 if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) 839 if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
825 for (i = 0; i < 8; i++) 840 for (i = 0; i < 8; i++)
826 I915_WRITE(FENCE_REG_945_8 + (i * 4), dev_priv->saveFENCE[i+8]); 841 I915_WRITE(FENCE_REG_945_8 + (i * 4), dev_priv->saveFENCE[i+8]);
842 for (i = 0; i < 8; i++)
843 I915_WRITE(FENCE_REG_830_0 + (i * 4), dev_priv->saveFENCE[i]);
844 break;
827 } 845 }
828 846
829 i915_restore_display(dev); 847 i915_restore_display(dev);
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index 4b7735196cd5..197d4f32585a 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -188,7 +188,7 @@ static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector)
188 188
189 if (wait_for((I915_READ(PCH_ADPA) & ADPA_CRT_HOTPLUG_FORCE_TRIGGER) == 0, 189 if (wait_for((I915_READ(PCH_ADPA) & ADPA_CRT_HOTPLUG_FORCE_TRIGGER) == 0,
190 1000, 1)) 190 1000, 1))
191 DRM_ERROR("timed out waiting for FORCE_TRIGGER"); 191 DRM_DEBUG_KMS("timed out waiting for FORCE_TRIGGER");
192 192
193 if (turn_off_dac) { 193 if (turn_off_dac) {
194 I915_WRITE(PCH_ADPA, temp); 194 I915_WRITE(PCH_ADPA, temp);
@@ -245,7 +245,7 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector)
245 if (wait_for((I915_READ(PORT_HOTPLUG_EN) & 245 if (wait_for((I915_READ(PORT_HOTPLUG_EN) &
246 CRT_HOTPLUG_FORCE_DETECT) == 0, 246 CRT_HOTPLUG_FORCE_DETECT) == 0,
247 1000, 1)) 247 1000, 1))
248 DRM_ERROR("timed out waiting for FORCE_DETECT to go off"); 248 DRM_DEBUG_KMS("timed out waiting for FORCE_DETECT to go off");
249 } 249 }
250 250
251 stat = I915_READ(PORT_HOTPLUG_STAT); 251 stat = I915_READ(PORT_HOTPLUG_STAT);
@@ -400,7 +400,8 @@ intel_crt_load_detect(struct drm_crtc *crtc, struct intel_encoder *intel_encoder
400 return status; 400 return status;
401} 401}
402 402
403static enum drm_connector_status intel_crt_detect(struct drm_connector *connector) 403static enum drm_connector_status
404intel_crt_detect(struct drm_connector *connector, bool force)
404{ 405{
405 struct drm_device *dev = connector->dev; 406 struct drm_device *dev = connector->dev;
406 struct drm_encoder *encoder = intel_attached_encoder(connector); 407 struct drm_encoder *encoder = intel_attached_encoder(connector);
@@ -419,6 +420,9 @@ static enum drm_connector_status intel_crt_detect(struct drm_connector *connecto
419 if (intel_crt_detect_ddc(encoder)) 420 if (intel_crt_detect_ddc(encoder))
420 return connector_status_connected; 421 return connector_status_connected;
421 422
423 if (!force)
424 return connector->status;
425
422 /* for pre-945g platforms use load detect */ 426 /* for pre-945g platforms use load detect */
423 if (encoder->crtc && encoder->crtc->enabled) { 427 if (encoder->crtc && encoder->crtc->enabled) {
424 status = intel_crt_load_detect(encoder->crtc, intel_encoder); 428 status = intel_crt_load_detect(encoder->crtc, intel_encoder);
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 40cc5da264a9..979228594599 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -1013,8 +1013,8 @@ void intel_wait_for_vblank(struct drm_device *dev, int pipe)
1013 DRM_DEBUG_KMS("vblank wait timed out\n"); 1013 DRM_DEBUG_KMS("vblank wait timed out\n");
1014} 1014}
1015 1015
1016/** 1016/*
1017 * intel_wait_for_vblank_off - wait for vblank after disabling a pipe 1017 * intel_wait_for_pipe_off - wait for pipe to turn off
1018 * @dev: drm device 1018 * @dev: drm device
1019 * @pipe: pipe to wait for 1019 * @pipe: pipe to wait for
1020 * 1020 *
@@ -1022,25 +1022,39 @@ void intel_wait_for_vblank(struct drm_device *dev, int pipe)
1022 * spinning on the vblank interrupt status bit, since we won't actually 1022 * spinning on the vblank interrupt status bit, since we won't actually
1023 * see an interrupt when the pipe is disabled. 1023 * see an interrupt when the pipe is disabled.
1024 * 1024 *
1025 * So this function waits for the display line value to settle (it 1025 * On Gen4 and above:
1026 * usually ends up stopping at the start of the next frame). 1026 * wait for the pipe register state bit to turn off
1027 *
1028 * Otherwise:
1029 * wait for the display line value to settle (it usually
1030 * ends up stopping at the start of the next frame).
1031 *
1027 */ 1032 */
1028void intel_wait_for_vblank_off(struct drm_device *dev, int pipe) 1033static void intel_wait_for_pipe_off(struct drm_device *dev, int pipe)
1029{ 1034{
1030 struct drm_i915_private *dev_priv = dev->dev_private; 1035 struct drm_i915_private *dev_priv = dev->dev_private;
1031 int pipedsl_reg = (pipe == 0 ? PIPEADSL : PIPEBDSL); 1036
1032 unsigned long timeout = jiffies + msecs_to_jiffies(100); 1037 if (INTEL_INFO(dev)->gen >= 4) {
1033 u32 last_line; 1038 int pipeconf_reg = (pipe == 0 ? PIPEACONF : PIPEBCONF);
1034 1039
1035 /* Wait for the display line to settle */ 1040 /* Wait for the Pipe State to go off */
1036 do { 1041 if (wait_for((I915_READ(pipeconf_reg) & I965_PIPECONF_ACTIVE) == 0,
1037 last_line = I915_READ(pipedsl_reg) & DSL_LINEMASK; 1042 100, 0))
1038 mdelay(5); 1043 DRM_DEBUG_KMS("pipe_off wait timed out\n");
1039 } while (((I915_READ(pipedsl_reg) & DSL_LINEMASK) != last_line) && 1044 } else {
1040 time_after(timeout, jiffies)); 1045 u32 last_line;
1041 1046 int pipedsl_reg = (pipe == 0 ? PIPEADSL : PIPEBDSL);
1042 if (time_after(jiffies, timeout)) 1047 unsigned long timeout = jiffies + msecs_to_jiffies(100);
1043 DRM_DEBUG_KMS("vblank wait timed out\n"); 1048
1049 /* Wait for the display line to settle */
1050 do {
1051 last_line = I915_READ(pipedsl_reg) & DSL_LINEMASK;
1052 mdelay(5);
1053 } while (((I915_READ(pipedsl_reg) & DSL_LINEMASK) != last_line) &&
1054 time_after(timeout, jiffies));
1055 if (time_after(jiffies, timeout))
1056 DRM_DEBUG_KMS("pipe_off wait timed out\n");
1057 }
1044} 1058}
1045 1059
1046/* Parameters have changed, update FBC info */ 1060/* Parameters have changed, update FBC info */
@@ -2328,13 +2342,13 @@ static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode)
2328 I915_READ(dspbase_reg); 2342 I915_READ(dspbase_reg);
2329 } 2343 }
2330 2344
2331 /* Wait for vblank for the disable to take effect */
2332 intel_wait_for_vblank_off(dev, pipe);
2333
2334 /* Don't disable pipe A or pipe A PLLs if needed */ 2345 /* Don't disable pipe A or pipe A PLLs if needed */
2335 if (pipeconf_reg == PIPEACONF && 2346 if (pipeconf_reg == PIPEACONF &&
2336 (dev_priv->quirks & QUIRK_PIPEA_FORCE)) 2347 (dev_priv->quirks & QUIRK_PIPEA_FORCE)) {
2348 /* Wait for vblank for the disable to take effect */
2349 intel_wait_for_vblank(dev, pipe);
2337 goto skip_pipe_off; 2350 goto skip_pipe_off;
2351 }
2338 2352
2339 /* Next, disable display pipes */ 2353 /* Next, disable display pipes */
2340 temp = I915_READ(pipeconf_reg); 2354 temp = I915_READ(pipeconf_reg);
@@ -2343,8 +2357,8 @@ static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode)
2343 I915_READ(pipeconf_reg); 2357 I915_READ(pipeconf_reg);
2344 } 2358 }
2345 2359
2346 /* Wait for vblank for the disable to take effect. */ 2360 /* Wait for the pipe to turn off */
2347 intel_wait_for_vblank_off(dev, pipe); 2361 intel_wait_for_pipe_off(dev, pipe);
2348 2362
2349 temp = I915_READ(dpll_reg); 2363 temp = I915_READ(dpll_reg);
2350 if ((temp & DPLL_VCO_ENABLE) != 0) { 2364 if ((temp & DPLL_VCO_ENABLE) != 0) {
@@ -2463,11 +2477,19 @@ static bool intel_crtc_mode_fixup(struct drm_crtc *crtc,
2463 struct drm_display_mode *adjusted_mode) 2477 struct drm_display_mode *adjusted_mode)
2464{ 2478{
2465 struct drm_device *dev = crtc->dev; 2479 struct drm_device *dev = crtc->dev;
2480
2466 if (HAS_PCH_SPLIT(dev)) { 2481 if (HAS_PCH_SPLIT(dev)) {
2467 /* FDI link clock is fixed at 2.7G */ 2482 /* FDI link clock is fixed at 2.7G */
2468 if (mode->clock * 3 > IRONLAKE_FDI_FREQ * 4) 2483 if (mode->clock * 3 > IRONLAKE_FDI_FREQ * 4)
2469 return false; 2484 return false;
2470 } 2485 }
2486
2487 /* XXX some encoders set the crtcinfo, others don't.
2488 * Obviously we need some form of conflict resolution here...
2489 */
2490 if (adjusted_mode->crtc_htotal == 0)
2491 drm_mode_set_crtcinfo(adjusted_mode, 0);
2492
2471 return true; 2493 return true;
2472} 2494}
2473 2495
@@ -2767,14 +2789,8 @@ static unsigned long intel_calculate_wm(unsigned long clock_in_khz,
2767 /* Don't promote wm_size to unsigned... */ 2789 /* Don't promote wm_size to unsigned... */
2768 if (wm_size > (long)wm->max_wm) 2790 if (wm_size > (long)wm->max_wm)
2769 wm_size = wm->max_wm; 2791 wm_size = wm->max_wm;
2770 if (wm_size <= 0) { 2792 if (wm_size <= 0)
2771 wm_size = wm->default_wm; 2793 wm_size = wm->default_wm;
2772 DRM_ERROR("Insufficient FIFO for plane, expect flickering:"
2773 " entries required = %ld, available = %lu.\n",
2774 entries_required + wm->guard_size,
2775 wm->fifo_size);
2776 }
2777
2778 return wm_size; 2794 return wm_size;
2779} 2795}
2780 2796
@@ -3388,8 +3404,7 @@ static void ironlake_update_wm(struct drm_device *dev, int planea_clock,
3388 reg_value = I915_READ(WM1_LP_ILK); 3404 reg_value = I915_READ(WM1_LP_ILK);
3389 reg_value &= ~(WM1_LP_LATENCY_MASK | WM1_LP_SR_MASK | 3405 reg_value &= ~(WM1_LP_LATENCY_MASK | WM1_LP_SR_MASK |
3390 WM1_LP_CURSOR_MASK); 3406 WM1_LP_CURSOR_MASK);
3391 reg_value |= WM1_LP_SR_EN | 3407 reg_value |= (ilk_sr_latency << WM1_LP_LATENCY_SHIFT) |
3392 (ilk_sr_latency << WM1_LP_LATENCY_SHIFT) |
3393 (sr_wm << WM1_LP_SR_SHIFT) | cursor_wm; 3408 (sr_wm << WM1_LP_SR_SHIFT) | cursor_wm;
3394 3409
3395 I915_WRITE(WM1_LP_ILK, reg_value); 3410 I915_WRITE(WM1_LP_ILK, reg_value);
@@ -5675,6 +5690,9 @@ void intel_init_clock_gating(struct drm_device *dev)
5675 I915_WRITE(DISP_ARB_CTL, 5690 I915_WRITE(DISP_ARB_CTL,
5676 (I915_READ(DISP_ARB_CTL) | 5691 (I915_READ(DISP_ARB_CTL) |
5677 DISP_FBC_WM_DIS)); 5692 DISP_FBC_WM_DIS));
5693 I915_WRITE(WM3_LP_ILK, 0);
5694 I915_WRITE(WM2_LP_ILK, 0);
5695 I915_WRITE(WM1_LP_ILK, 0);
5678 } 5696 }
5679 /* 5697 /*
5680 * Based on the document from hardware guys the following bits 5698 * Based on the document from hardware guys the following bits
@@ -5696,8 +5714,7 @@ void intel_init_clock_gating(struct drm_device *dev)
5696 ILK_DPFC_DIS2 | 5714 ILK_DPFC_DIS2 |
5697 ILK_CLK_FBC); 5715 ILK_CLK_FBC);
5698 } 5716 }
5699 if (IS_GEN6(dev)) 5717 return;
5700 return;
5701 } else if (IS_G4X(dev)) { 5718 } else if (IS_G4X(dev)) {
5702 uint32_t dspclk_gate; 5719 uint32_t dspclk_gate;
5703 I915_WRITE(RENCLK_GATE_D1, 0); 5720 I915_WRITE(RENCLK_GATE_D1, 0);
@@ -5758,11 +5775,9 @@ void intel_init_clock_gating(struct drm_device *dev)
5758 OUT_RING(MI_FLUSH); 5775 OUT_RING(MI_FLUSH);
5759 ADVANCE_LP_RING(); 5776 ADVANCE_LP_RING();
5760 } 5777 }
5761 } else { 5778 } else
5762 DRM_DEBUG_KMS("Failed to allocate render context." 5779 DRM_DEBUG_KMS("Failed to allocate render context."
5763 "Disable RC6\n"); 5780 "Disable RC6\n");
5764 return;
5765 }
5766 } 5781 }
5767 5782
5768 if (I915_HAS_RC6(dev) && drm_core_check_feature(dev, DRIVER_MODESET)) { 5783 if (I915_HAS_RC6(dev) && drm_core_check_feature(dev, DRIVER_MODESET)) {
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 51d142939a26..9ab8708ac6ba 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -1138,18 +1138,14 @@ static bool
1138intel_dp_set_link_train(struct intel_dp *intel_dp, 1138intel_dp_set_link_train(struct intel_dp *intel_dp,
1139 uint32_t dp_reg_value, 1139 uint32_t dp_reg_value,
1140 uint8_t dp_train_pat, 1140 uint8_t dp_train_pat,
1141 uint8_t train_set[4], 1141 uint8_t train_set[4])
1142 bool first)
1143{ 1142{
1144 struct drm_device *dev = intel_dp->base.enc.dev; 1143 struct drm_device *dev = intel_dp->base.enc.dev;
1145 struct drm_i915_private *dev_priv = dev->dev_private; 1144 struct drm_i915_private *dev_priv = dev->dev_private;
1146 struct intel_crtc *intel_crtc = to_intel_crtc(intel_dp->base.enc.crtc);
1147 int ret; 1145 int ret;
1148 1146
1149 I915_WRITE(intel_dp->output_reg, dp_reg_value); 1147 I915_WRITE(intel_dp->output_reg, dp_reg_value);
1150 POSTING_READ(intel_dp->output_reg); 1148 POSTING_READ(intel_dp->output_reg);
1151 if (first)
1152 intel_wait_for_vblank(dev, intel_crtc->pipe);
1153 1149
1154 intel_dp_aux_native_write_1(intel_dp, 1150 intel_dp_aux_native_write_1(intel_dp,
1155 DP_TRAINING_PATTERN_SET, 1151 DP_TRAINING_PATTERN_SET,
@@ -1174,10 +1170,15 @@ intel_dp_link_train(struct intel_dp *intel_dp)
1174 uint8_t voltage; 1170 uint8_t voltage;
1175 bool clock_recovery = false; 1171 bool clock_recovery = false;
1176 bool channel_eq = false; 1172 bool channel_eq = false;
1177 bool first = true;
1178 int tries; 1173 int tries;
1179 u32 reg; 1174 u32 reg;
1180 uint32_t DP = intel_dp->DP; 1175 uint32_t DP = intel_dp->DP;
1176 struct intel_crtc *intel_crtc = to_intel_crtc(intel_dp->base.enc.crtc);
1177
1178 /* Enable output, wait for it to become active */
1179 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
1180 POSTING_READ(intel_dp->output_reg);
1181 intel_wait_for_vblank(dev, intel_crtc->pipe);
1181 1182
1182 /* Write the link configuration data */ 1183 /* Write the link configuration data */
1183 intel_dp_aux_native_write(intel_dp, DP_LINK_BW_SET, 1184 intel_dp_aux_native_write(intel_dp, DP_LINK_BW_SET,
@@ -1210,9 +1211,8 @@ intel_dp_link_train(struct intel_dp *intel_dp)
1210 reg = DP | DP_LINK_TRAIN_PAT_1; 1211 reg = DP | DP_LINK_TRAIN_PAT_1;
1211 1212
1212 if (!intel_dp_set_link_train(intel_dp, reg, 1213 if (!intel_dp_set_link_train(intel_dp, reg,
1213 DP_TRAINING_PATTERN_1, train_set, first)) 1214 DP_TRAINING_PATTERN_1, train_set))
1214 break; 1215 break;
1215 first = false;
1216 /* Set training pattern 1 */ 1216 /* Set training pattern 1 */
1217 1217
1218 udelay(100); 1218 udelay(100);
@@ -1266,8 +1266,7 @@ intel_dp_link_train(struct intel_dp *intel_dp)
1266 1266
1267 /* channel eq pattern */ 1267 /* channel eq pattern */
1268 if (!intel_dp_set_link_train(intel_dp, reg, 1268 if (!intel_dp_set_link_train(intel_dp, reg,
1269 DP_TRAINING_PATTERN_2, train_set, 1269 DP_TRAINING_PATTERN_2, train_set))
1270 false))
1271 break; 1270 break;
1272 1271
1273 udelay(400); 1272 udelay(400);
@@ -1386,7 +1385,7 @@ ironlake_dp_detect(struct drm_connector *connector)
1386 * \return false if DP port is disconnected. 1385 * \return false if DP port is disconnected.
1387 */ 1386 */
1388static enum drm_connector_status 1387static enum drm_connector_status
1389intel_dp_detect(struct drm_connector *connector) 1388intel_dp_detect(struct drm_connector *connector, bool force)
1390{ 1389{
1391 struct drm_encoder *encoder = intel_attached_encoder(connector); 1390 struct drm_encoder *encoder = intel_attached_encoder(connector);
1392 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 1391 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index ad312ca6b3e5..8828b3ac6414 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -229,7 +229,6 @@ extern struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
229 struct drm_crtc *crtc); 229 struct drm_crtc *crtc);
230int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data, 230int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
231 struct drm_file *file_priv); 231 struct drm_file *file_priv);
232extern void intel_wait_for_vblank_off(struct drm_device *dev, int pipe);
233extern void intel_wait_for_vblank(struct drm_device *dev, int pipe); 232extern void intel_wait_for_vblank(struct drm_device *dev, int pipe);
234extern struct drm_crtc *intel_get_crtc_from_pipe(struct drm_device *dev, int pipe); 233extern struct drm_crtc *intel_get_crtc_from_pipe(struct drm_device *dev, int pipe);
235extern struct drm_crtc *intel_get_load_detect_pipe(struct intel_encoder *intel_encoder, 234extern struct drm_crtc *intel_get_load_detect_pipe(struct intel_encoder *intel_encoder,
diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c
index a399f4b2c1c5..7c9ec1472d46 100644
--- a/drivers/gpu/drm/i915/intel_dvo.c
+++ b/drivers/gpu/drm/i915/intel_dvo.c
@@ -221,7 +221,8 @@ static void intel_dvo_mode_set(struct drm_encoder *encoder,
221 * 221 *
222 * Unimplemented. 222 * Unimplemented.
223 */ 223 */
224static enum drm_connector_status intel_dvo_detect(struct drm_connector *connector) 224static enum drm_connector_status
225intel_dvo_detect(struct drm_connector *connector, bool force)
225{ 226{
226 struct drm_encoder *encoder = intel_attached_encoder(connector); 227 struct drm_encoder *encoder = intel_attached_encoder(connector);
227 struct intel_dvo *intel_dvo = enc_to_intel_dvo(encoder); 228 struct intel_dvo *intel_dvo = enc_to_intel_dvo(encoder);
diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c
index 7bdc96256bf5..56ad9df2ccb5 100644
--- a/drivers/gpu/drm/i915/intel_fb.c
+++ b/drivers/gpu/drm/i915/intel_fb.c
@@ -237,8 +237,10 @@ int intel_fbdev_destroy(struct drm_device *dev,
237 drm_fb_helper_fini(&ifbdev->helper); 237 drm_fb_helper_fini(&ifbdev->helper);
238 238
239 drm_framebuffer_cleanup(&ifb->base); 239 drm_framebuffer_cleanup(&ifb->base);
240 if (ifb->obj) 240 if (ifb->obj) {
241 drm_gem_object_handle_unreference(ifb->obj);
241 drm_gem_object_unreference(ifb->obj); 242 drm_gem_object_unreference(ifb->obj);
243 }
242 244
243 return 0; 245 return 0;
244} 246}
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index ccd4c97e6524..926934a482ec 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -139,7 +139,7 @@ static bool intel_hdmi_mode_fixup(struct drm_encoder *encoder,
139} 139}
140 140
141static enum drm_connector_status 141static enum drm_connector_status
142intel_hdmi_detect(struct drm_connector *connector) 142intel_hdmi_detect(struct drm_connector *connector, bool force)
143{ 143{
144 struct drm_encoder *encoder = intel_attached_encoder(connector); 144 struct drm_encoder *encoder = intel_attached_encoder(connector);
145 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); 145 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index b819c1081147..6ec39a86ed06 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -445,7 +445,8 @@ static void intel_lvds_mode_set(struct drm_encoder *encoder,
445 * connected and closed means disconnected. We also send hotplug events as 445 * connected and closed means disconnected. We also send hotplug events as
446 * needed, using lid status notification from the input layer. 446 * needed, using lid status notification from the input layer.
447 */ 447 */
448static enum drm_connector_status intel_lvds_detect(struct drm_connector *connector) 448static enum drm_connector_status
449intel_lvds_detect(struct drm_connector *connector, bool force)
449{ 450{
450 struct drm_device *dev = connector->dev; 451 struct drm_device *dev = connector->dev;
451 enum drm_connector_status status = connector_status_connected; 452 enum drm_connector_status status = connector_status_connected;
@@ -540,7 +541,9 @@ static int intel_lid_notify(struct notifier_block *nb, unsigned long val,
540 * the LID nofication event. 541 * the LID nofication event.
541 */ 542 */
542 if (connector) 543 if (connector)
543 connector->status = connector->funcs->detect(connector); 544 connector->status = connector->funcs->detect(connector,
545 false);
546
544 /* Don't force modeset on machines where it causes a GPU lockup */ 547 /* Don't force modeset on machines where it causes a GPU lockup */
545 if (dmi_check_system(intel_no_modeset_on_lid)) 548 if (dmi_check_system(intel_no_modeset_on_lid))
546 return NOTIFY_OK; 549 return NOTIFY_OK;
@@ -875,8 +878,6 @@ void intel_lvds_init(struct drm_device *dev)
875 878
876 intel_encoder->clone_mask = (1 << INTEL_LVDS_CLONE_BIT); 879 intel_encoder->clone_mask = (1 << INTEL_LVDS_CLONE_BIT);
877 intel_encoder->crtc_mask = (1 << 1); 880 intel_encoder->crtc_mask = (1 << 1);
878 if (IS_I965G(dev))
879 intel_encoder->crtc_mask |= (1 << 0);
880 drm_encoder_helper_add(encoder, &intel_lvds_helper_funcs); 881 drm_encoder_helper_add(encoder, &intel_lvds_helper_funcs);
881 drm_connector_helper_add(connector, &intel_lvds_connector_helper_funcs); 882 drm_connector_helper_add(connector, &intel_lvds_connector_helper_funcs);
882 connector->display_info.subpixel_order = SubPixelHorizontalRGB; 883 connector->display_info.subpixel_order = SubPixelHorizontalRGB;
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index e3b7a7ee39cb..ee73e428a84a 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -1417,7 +1417,7 @@ intel_analog_is_connected(struct drm_device *dev)
1417 if (!analog_connector) 1417 if (!analog_connector)
1418 return false; 1418 return false;
1419 1419
1420 if (analog_connector->funcs->detect(analog_connector) == 1420 if (analog_connector->funcs->detect(analog_connector, false) ==
1421 connector_status_disconnected) 1421 connector_status_disconnected)
1422 return false; 1422 return false;
1423 1423
@@ -1486,7 +1486,8 @@ intel_sdvo_hdmi_sink_detect(struct drm_connector *connector)
1486 return status; 1486 return status;
1487} 1487}
1488 1488
1489static enum drm_connector_status intel_sdvo_detect(struct drm_connector *connector) 1489static enum drm_connector_status
1490intel_sdvo_detect(struct drm_connector *connector, bool force)
1490{ 1491{
1491 uint16_t response; 1492 uint16_t response;
1492 struct drm_encoder *encoder = intel_attached_encoder(connector); 1493 struct drm_encoder *encoder = intel_attached_encoder(connector);
@@ -2169,8 +2170,7 @@ intel_sdvo_tv_init(struct intel_sdvo *intel_sdvo, int type)
2169 return true; 2170 return true;
2170 2171
2171err: 2172err:
2172 intel_sdvo_destroy_enhance_property(connector); 2173 intel_sdvo_destroy(connector);
2173 kfree(intel_sdvo_connector);
2174 return false; 2174 return false;
2175} 2175}
2176 2176
@@ -2242,8 +2242,7 @@ intel_sdvo_lvds_init(struct intel_sdvo *intel_sdvo, int device)
2242 return true; 2242 return true;
2243 2243
2244err: 2244err:
2245 intel_sdvo_destroy_enhance_property(connector); 2245 intel_sdvo_destroy(connector);
2246 kfree(intel_sdvo_connector);
2247 return false; 2246 return false;
2248} 2247}
2249 2248
@@ -2521,11 +2520,10 @@ static bool intel_sdvo_create_enhance_property(struct intel_sdvo *intel_sdvo,
2521 uint16_t response; 2520 uint16_t response;
2522 } enhancements; 2521 } enhancements;
2523 2522
2524 if (!intel_sdvo_get_value(intel_sdvo, 2523 enhancements.response = 0;
2525 SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS, 2524 intel_sdvo_get_value(intel_sdvo,
2526 &enhancements, sizeof(enhancements))) 2525 SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS,
2527 return false; 2526 &enhancements, sizeof(enhancements));
2528
2529 if (enhancements.response == 0) { 2527 if (enhancements.response == 0) {
2530 DRM_DEBUG_KMS("No enhancement is supported\n"); 2528 DRM_DEBUG_KMS("No enhancement is supported\n");
2531 return true; 2529 return true;
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index c671f60ce80b..4a117e318a73 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -1341,7 +1341,7 @@ static void intel_tv_find_better_format(struct drm_connector *connector)
1341 * we have a pipe programmed in order to probe the TV. 1341 * we have a pipe programmed in order to probe the TV.
1342 */ 1342 */
1343static enum drm_connector_status 1343static enum drm_connector_status
1344intel_tv_detect(struct drm_connector *connector) 1344intel_tv_detect(struct drm_connector *connector, bool force)
1345{ 1345{
1346 struct drm_display_mode mode; 1346 struct drm_display_mode mode;
1347 struct drm_encoder *encoder = intel_attached_encoder(connector); 1347 struct drm_encoder *encoder = intel_attached_encoder(connector);
@@ -1353,7 +1353,7 @@ intel_tv_detect(struct drm_connector *connector)
1353 1353
1354 if (encoder->crtc && encoder->crtc->enabled) { 1354 if (encoder->crtc && encoder->crtc->enabled) {
1355 type = intel_tv_detect_type(intel_tv); 1355 type = intel_tv_detect_type(intel_tv);
1356 } else { 1356 } else if (force) {
1357 struct drm_crtc *crtc; 1357 struct drm_crtc *crtc;
1358 int dpms_mode; 1358 int dpms_mode;
1359 1359
@@ -1364,10 +1364,9 @@ intel_tv_detect(struct drm_connector *connector)
1364 intel_release_load_detect_pipe(&intel_tv->base, connector, 1364 intel_release_load_detect_pipe(&intel_tv->base, connector,
1365 dpms_mode); 1365 dpms_mode);
1366 } else 1366 } else
1367 type = -1; 1367 return connector_status_unknown;
1368 } 1368 } else
1369 1369 return connector->status;
1370 intel_tv->type = type;
1371 1370
1372 if (type < 0) 1371 if (type < 0)
1373 return connector_status_disconnected; 1372 return connector_status_disconnected;
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index a1473fff06ac..fc737037f751 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -168,7 +168,7 @@ nouveau_connector_set_encoder(struct drm_connector *connector,
168} 168}
169 169
170static enum drm_connector_status 170static enum drm_connector_status
171nouveau_connector_detect(struct drm_connector *connector) 171nouveau_connector_detect(struct drm_connector *connector, bool force)
172{ 172{
173 struct drm_device *dev = connector->dev; 173 struct drm_device *dev = connector->dev;
174 struct nouveau_connector *nv_connector = nouveau_connector(connector); 174 struct nouveau_connector *nv_connector = nouveau_connector(connector);
@@ -246,7 +246,7 @@ detect_analog:
246} 246}
247 247
248static enum drm_connector_status 248static enum drm_connector_status
249nouveau_connector_detect_lvds(struct drm_connector *connector) 249nouveau_connector_detect_lvds(struct drm_connector *connector, bool force)
250{ 250{
251 struct drm_device *dev = connector->dev; 251 struct drm_device *dev = connector->dev;
252 struct drm_nouveau_private *dev_priv = dev->dev_private; 252 struct drm_nouveau_private *dev_priv = dev->dev_private;
@@ -267,7 +267,7 @@ nouveau_connector_detect_lvds(struct drm_connector *connector)
267 267
268 /* Try retrieving EDID via DDC */ 268 /* Try retrieving EDID via DDC */
269 if (!dev_priv->vbios.fp_no_ddc) { 269 if (!dev_priv->vbios.fp_no_ddc) {
270 status = nouveau_connector_detect(connector); 270 status = nouveau_connector_detect(connector, force);
271 if (status == connector_status_connected) 271 if (status == connector_status_connected)
272 goto out; 272 goto out;
273 } 273 }
@@ -558,8 +558,10 @@ nouveau_connector_get_modes(struct drm_connector *connector)
558 if (nv_encoder->dcb->type == OUTPUT_LVDS && 558 if (nv_encoder->dcb->type == OUTPUT_LVDS &&
559 (nv_encoder->dcb->lvdsconf.use_straps_for_mode || 559 (nv_encoder->dcb->lvdsconf.use_straps_for_mode ||
560 dev_priv->vbios.fp_no_ddc) && nouveau_bios_fp_mode(dev, NULL)) { 560 dev_priv->vbios.fp_no_ddc) && nouveau_bios_fp_mode(dev, NULL)) {
561 nv_connector->native_mode = drm_mode_create(dev); 561 struct drm_display_mode mode;
562 nouveau_bios_fp_mode(dev, nv_connector->native_mode); 562
563 nouveau_bios_fp_mode(dev, &mode);
564 nv_connector->native_mode = drm_mode_duplicate(dev, &mode);
563 } 565 }
564 566
565 /* Find the native mode if this is a digital panel, if we didn't 567 /* Find the native mode if this is a digital panel, if we didn't
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index dbd30b2e43fd..d2047713dc59 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -352,6 +352,7 @@ nouveau_fbcon_destroy(struct drm_device *dev, struct nouveau_fbdev *nfbdev)
352 352
353 if (nouveau_fb->nvbo) { 353 if (nouveau_fb->nvbo) {
354 nouveau_bo_unmap(nouveau_fb->nvbo); 354 nouveau_bo_unmap(nouveau_fb->nvbo);
355 drm_gem_object_handle_unreference_unlocked(nouveau_fb->nvbo->gem);
355 drm_gem_object_unreference_unlocked(nouveau_fb->nvbo->gem); 356 drm_gem_object_unreference_unlocked(nouveau_fb->nvbo->gem);
356 nouveau_fb->nvbo = NULL; 357 nouveau_fb->nvbo = NULL;
357 } 358 }
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index ead7b8fc53fc..19620a6709f5 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -167,11 +167,9 @@ nouveau_gem_ioctl_new(struct drm_device *dev, void *data,
167 goto out; 167 goto out;
168 168
169 ret = drm_gem_handle_create(file_priv, nvbo->gem, &req->info.handle); 169 ret = drm_gem_handle_create(file_priv, nvbo->gem, &req->info.handle);
170 /* drop reference from allocate - handle holds it now */
171 drm_gem_object_unreference_unlocked(nvbo->gem);
170out: 172out:
171 drm_gem_object_handle_unreference_unlocked(nvbo->gem);
172
173 if (ret)
174 drm_gem_object_unreference_unlocked(nvbo->gem);
175 return ret; 173 return ret;
176} 174}
177 175
diff --git a/drivers/gpu/drm/nouveau/nouveau_notifier.c b/drivers/gpu/drm/nouveau/nouveau_notifier.c
index 3ec181ff50ce..3c9964a8fbad 100644
--- a/drivers/gpu/drm/nouveau/nouveau_notifier.c
+++ b/drivers/gpu/drm/nouveau/nouveau_notifier.c
@@ -79,6 +79,7 @@ nouveau_notifier_takedown_channel(struct nouveau_channel *chan)
79 mutex_lock(&dev->struct_mutex); 79 mutex_lock(&dev->struct_mutex);
80 nouveau_bo_unpin(chan->notifier_bo); 80 nouveau_bo_unpin(chan->notifier_bo);
81 mutex_unlock(&dev->struct_mutex); 81 mutex_unlock(&dev->struct_mutex);
82 drm_gem_object_handle_unreference_unlocked(chan->notifier_bo->gem);
82 drm_gem_object_unreference_unlocked(chan->notifier_bo->gem); 83 drm_gem_object_unreference_unlocked(chan->notifier_bo->gem);
83 drm_mm_takedown(&chan->notifier_heap); 84 drm_mm_takedown(&chan->notifier_heap);
84} 85}
diff --git a/drivers/gpu/drm/radeon/atombios.h b/drivers/gpu/drm/radeon/atombios.h
index 1bc72c3190a9..fe359a239df3 100644
--- a/drivers/gpu/drm/radeon/atombios.h
+++ b/drivers/gpu/drm/radeon/atombios.h
@@ -4999,7 +4999,7 @@ typedef struct _SW_I2C_IO_DATA_PARAMETERS
4999#define SW_I2C_CNTL_WRITE1BIT 6 4999#define SW_I2C_CNTL_WRITE1BIT 6
5000 5000
5001//==============================VESA definition Portion=============================== 5001//==============================VESA definition Portion===============================
5002#define VESA_OEM_PRODUCT_REV '01.00' 5002#define VESA_OEM_PRODUCT_REV "01.00"
5003#define VESA_MODE_ATTRIBUTE_MODE_SUPPORT 0xBB //refer to VBE spec p.32, no TTY support 5003#define VESA_MODE_ATTRIBUTE_MODE_SUPPORT 0xBB //refer to VBE spec p.32, no TTY support
5004#define VESA_MODE_WIN_ATTRIBUTE 7 5004#define VESA_MODE_WIN_ATTRIBUTE 7
5005#define VESA_WIN_SIZE 64 5005#define VESA_WIN_SIZE 64
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index 464a81a1990f..cd0290f946cf 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -539,14 +539,15 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
539 pll->algo = PLL_ALGO_LEGACY; 539 pll->algo = PLL_ALGO_LEGACY;
540 pll->flags |= RADEON_PLL_PREFER_CLOSEST_LOWER; 540 pll->flags |= RADEON_PLL_PREFER_CLOSEST_LOWER;
541 } 541 }
542 /* There is some evidence (often anecdotal) that RV515 LVDS 542 /* There is some evidence (often anecdotal) that RV515/RV620 LVDS
543 * (on some boards at least) prefers the legacy algo. I'm not 543 * (on some boards at least) prefers the legacy algo. I'm not
544 * sure whether this should handled generically or on a 544 * sure whether this should handled generically or on a
545 * case-by-case quirk basis. Both algos should work fine in the 545 * case-by-case quirk basis. Both algos should work fine in the
546 * majority of cases. 546 * majority of cases.
547 */ 547 */
548 if ((radeon_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT)) && 548 if ((radeon_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT)) &&
549 (rdev->family == CHIP_RV515)) { 549 ((rdev->family == CHIP_RV515) ||
550 (rdev->family == CHIP_RV620))) {
550 /* allow the user to overrride just in case */ 551 /* allow the user to overrride just in case */
551 if (radeon_new_pll == 1) 552 if (radeon_new_pll == 1)
552 pll->algo = PLL_ALGO_NEW; 553 pll->algo = PLL_ALGO_NEW;
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index b8b7f010b25f..79082d4398ae 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -1160,14 +1160,25 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
1160 EVERGREEN_MAX_BACKENDS_MASK)); 1160 EVERGREEN_MAX_BACKENDS_MASK));
1161 break; 1161 break;
1162 } 1162 }
1163 } else 1163 } else {
1164 gb_backend_map = 1164 switch (rdev->family) {
1165 evergreen_get_tile_pipe_to_backend_map(rdev, 1165 case CHIP_CYPRESS:
1166 rdev->config.evergreen.max_tile_pipes, 1166 case CHIP_HEMLOCK:
1167 rdev->config.evergreen.max_backends, 1167 gb_backend_map = 0x66442200;
1168 ((EVERGREEN_MAX_BACKENDS_MASK << 1168 break;
1169 rdev->config.evergreen.max_backends) & 1169 case CHIP_JUNIPER:
1170 EVERGREEN_MAX_BACKENDS_MASK)); 1170 gb_backend_map = 0x00006420;
1171 break;
1172 default:
1173 gb_backend_map =
1174 evergreen_get_tile_pipe_to_backend_map(rdev,
1175 rdev->config.evergreen.max_tile_pipes,
1176 rdev->config.evergreen.max_backends,
1177 ((EVERGREEN_MAX_BACKENDS_MASK <<
1178 rdev->config.evergreen.max_backends) &
1179 EVERGREEN_MAX_BACKENDS_MASK));
1180 }
1181 }
1171 1182
1172 rdev->config.evergreen.tile_config = gb_addr_config; 1183 rdev->config.evergreen.tile_config = gb_addr_config;
1173 WREG32(GB_BACKEND_MAP, gb_backend_map); 1184 WREG32(GB_BACKEND_MAP, gb_backend_map);
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index e817a0bb5eb4..e151f16a8f86 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -2020,18 +2020,7 @@ bool r100_gpu_cp_is_lockup(struct radeon_device *rdev, struct r100_gpu_lockup *l
2020 return false; 2020 return false;
2021 } 2021 }
2022 elapsed = jiffies_to_msecs(cjiffies - lockup->last_jiffies); 2022 elapsed = jiffies_to_msecs(cjiffies - lockup->last_jiffies);
2023 if (elapsed >= 3000) { 2023 if (elapsed >= 10000) {
2024 /* very likely the improbable case where current
2025 * rptr is equal to last recorded, a while ago, rptr
2026 * this is more likely a false positive update tracking
2027 * information which should force us to be recall at
2028 * latter point
2029 */
2030 lockup->last_cp_rptr = cp->rptr;
2031 lockup->last_jiffies = jiffies;
2032 return false;
2033 }
2034 if (elapsed >= 1000) {
2035 dev_err(rdev->dev, "GPU lockup CP stall for more than %lumsec\n", elapsed); 2024 dev_err(rdev->dev, "GPU lockup CP stall for more than %lumsec\n", elapsed);
2036 return true; 2025 return true;
2037 } 2026 }
@@ -3308,13 +3297,14 @@ int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track)
3308 unsigned long size; 3297 unsigned long size;
3309 unsigned prim_walk; 3298 unsigned prim_walk;
3310 unsigned nverts; 3299 unsigned nverts;
3300 unsigned num_cb = track->num_cb;
3311 3301
3312 for (i = 0; i < track->num_cb; i++) { 3302 if (!track->zb_cb_clear && !track->color_channel_mask &&
3303 !track->blend_read_enable)
3304 num_cb = 0;
3305
3306 for (i = 0; i < num_cb; i++) {
3313 if (track->cb[i].robj == NULL) { 3307 if (track->cb[i].robj == NULL) {
3314 if (!(track->zb_cb_clear || track->color_channel_mask ||
3315 track->blend_read_enable)) {
3316 continue;
3317 }
3318 DRM_ERROR("[drm] No buffer for color buffer %d !\n", i); 3308 DRM_ERROR("[drm] No buffer for color buffer %d !\n", i);
3319 return -EINVAL; 3309 return -EINVAL;
3320 } 3310 }
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index afc18d87fdca..7a04959ba0ee 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -2729,7 +2729,7 @@ int r600_ib_test(struct radeon_device *rdev)
2729 if (i < rdev->usec_timeout) { 2729 if (i < rdev->usec_timeout) {
2730 DRM_INFO("ib test succeeded in %u usecs\n", i); 2730 DRM_INFO("ib test succeeded in %u usecs\n", i);
2731 } else { 2731 } else {
2732 DRM_ERROR("radeon: ib test failed (sracth(0x%04X)=0x%08X)\n", 2732 DRM_ERROR("radeon: ib test failed (scratch(0x%04X)=0x%08X)\n",
2733 scratch, tmp); 2733 scratch, tmp);
2734 r = -EINVAL; 2734 r = -EINVAL;
2735 } 2735 }
@@ -3528,7 +3528,8 @@ void r600_ioctl_wait_idle(struct radeon_device *rdev, struct radeon_bo *bo)
3528 /* r7xx hw bug. write to HDP_DEBUG1 followed by fb read 3528 /* r7xx hw bug. write to HDP_DEBUG1 followed by fb read
3529 * rather than write to HDP_REG_COHERENCY_FLUSH_CNTL 3529 * rather than write to HDP_REG_COHERENCY_FLUSH_CNTL
3530 */ 3530 */
3531 if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_RV740)) { 3531 if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_RV740) &&
3532 rdev->vram_scratch.ptr) {
3532 void __iomem *ptr = (void *)rdev->vram_scratch.ptr; 3533 void __iomem *ptr = (void *)rdev->vram_scratch.ptr;
3533 u32 tmp; 3534 u32 tmp;
3534 3535
diff --git a/drivers/gpu/drm/radeon/r600_blit_kms.c b/drivers/gpu/drm/radeon/r600_blit_kms.c
index d13622ae74e9..9ceb2a1ce799 100644
--- a/drivers/gpu/drm/radeon/r600_blit_kms.c
+++ b/drivers/gpu/drm/radeon/r600_blit_kms.c
@@ -1,3 +1,28 @@
1/*
2 * Copyright 2009 Advanced Micro Devices, Inc.
3 * Copyright 2009 Red Hat Inc.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 *
24 */
25
1#include "drmP.h" 26#include "drmP.h"
2#include "drm.h" 27#include "drm.h"
3#include "radeon_drm.h" 28#include "radeon_drm.h"
diff --git a/drivers/gpu/drm/radeon/r600_blit_shaders.h b/drivers/gpu/drm/radeon/r600_blit_shaders.h
index fdc3b378cbb0..f437d36dd98c 100644
--- a/drivers/gpu/drm/radeon/r600_blit_shaders.h
+++ b/drivers/gpu/drm/radeon/r600_blit_shaders.h
@@ -1,3 +1,27 @@
1/*
2 * Copyright 2009 Advanced Micro Devices, Inc.
3 * Copyright 2009 Red Hat Inc.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 *
24 */
1 25
2#ifndef R600_BLIT_SHADERS_H 26#ifndef R600_BLIT_SHADERS_H
3#define R600_BLIT_SHADERS_H 27#define R600_BLIT_SHADERS_H
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c
index d8864949e387..250a3a918193 100644
--- a/drivers/gpu/drm/radeon/r600_cs.c
+++ b/drivers/gpu/drm/radeon/r600_cs.c
@@ -1170,9 +1170,8 @@ static inline int r600_check_texture_resource(struct radeon_cs_parser *p, u32 i
1170 /* using get ib will give us the offset into the mipmap bo */ 1170 /* using get ib will give us the offset into the mipmap bo */
1171 word0 = radeon_get_ib_value(p, idx + 3) << 8; 1171 word0 = radeon_get_ib_value(p, idx + 3) << 8;
1172 if ((mipmap_size + word0) > radeon_bo_size(mipmap)) { 1172 if ((mipmap_size + word0) > radeon_bo_size(mipmap)) {
1173 dev_warn(p->dev, "mipmap bo too small (%d %d %d %d %d %d -> %d have %ld)\n", 1173 /*dev_warn(p->dev, "mipmap bo too small (%d %d %d %d %d %d -> %d have %ld)\n",
1174 w0, h0, bpe, blevel, nlevels, word0, mipmap_size, radeon_bo_size(texture)); 1174 w0, h0, bpe, blevel, nlevels, word0, mipmap_size, radeon_bo_size(texture));*/
1175 return -EINVAL;
1176 } 1175 }
1177 return 0; 1176 return 0;
1178} 1177}
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index ebae14c4b768..68932ba7b8a4 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -317,6 +317,15 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev,
317 *connector_type = DRM_MODE_CONNECTOR_DVID; 317 *connector_type = DRM_MODE_CONNECTOR_DVID;
318 } 318 }
319 319
320 /* MSI K9A2GM V2/V3 board has no HDMI or DVI */
321 if ((dev->pdev->device == 0x796e) &&
322 (dev->pdev->subsystem_vendor == 0x1462) &&
323 (dev->pdev->subsystem_device == 0x7302)) {
324 if ((supported_device == ATOM_DEVICE_DFP2_SUPPORT) ||
325 (supported_device == ATOM_DEVICE_DFP3_SUPPORT))
326 return false;
327 }
328
320 /* a-bit f-i90hd - ciaranm on #radeonhd - this board has no DVI */ 329 /* a-bit f-i90hd - ciaranm on #radeonhd - this board has no DVI */
321 if ((dev->pdev->device == 0x7941) && 330 if ((dev->pdev->device == 0x7941) &&
322 (dev->pdev->subsystem_vendor == 0x147b) && 331 (dev->pdev->subsystem_vendor == 0x147b) &&
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
index bd74e428bd14..a04b7a6ad95f 100644
--- a/drivers/gpu/drm/radeon/radeon_combios.c
+++ b/drivers/gpu/drm/radeon/radeon_combios.c
@@ -1485,6 +1485,11 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
1485 /* PowerMac8,1 ? */ 1485 /* PowerMac8,1 ? */
1486 /* imac g5 isight */ 1486 /* imac g5 isight */
1487 rdev->mode_info.connector_table = CT_IMAC_G5_ISIGHT; 1487 rdev->mode_info.connector_table = CT_IMAC_G5_ISIGHT;
1488 } else if ((rdev->pdev->device == 0x4a48) &&
1489 (rdev->pdev->subsystem_vendor == 0x1002) &&
1490 (rdev->pdev->subsystem_device == 0x4a48)) {
1491 /* Mac X800 */
1492 rdev->mode_info.connector_table = CT_MAC_X800;
1488 } else 1493 } else
1489#endif /* CONFIG_PPC_PMAC */ 1494#endif /* CONFIG_PPC_PMAC */
1490#ifdef CONFIG_PPC64 1495#ifdef CONFIG_PPC64
@@ -1961,6 +1966,48 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
1961 CONNECTOR_OBJECT_ID_VGA, 1966 CONNECTOR_OBJECT_ID_VGA,
1962 &hpd); 1967 &hpd);
1963 break; 1968 break;
1969 case CT_MAC_X800:
1970 DRM_INFO("Connector Table: %d (mac x800)\n",
1971 rdev->mode_info.connector_table);
1972 /* DVI - primary dac, internal tmds */
1973 ddc_i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0);
1974 hpd.hpd = RADEON_HPD_1; /* ??? */
1975 radeon_add_legacy_encoder(dev,
1976 radeon_get_encoder_enum(dev,
1977 ATOM_DEVICE_DFP1_SUPPORT,
1978 0),
1979 ATOM_DEVICE_DFP1_SUPPORT);
1980 radeon_add_legacy_encoder(dev,
1981 radeon_get_encoder_enum(dev,
1982 ATOM_DEVICE_CRT1_SUPPORT,
1983 1),
1984 ATOM_DEVICE_CRT1_SUPPORT);
1985 radeon_add_legacy_connector(dev, 0,
1986 ATOM_DEVICE_DFP1_SUPPORT |
1987 ATOM_DEVICE_CRT1_SUPPORT,
1988 DRM_MODE_CONNECTOR_DVII, &ddc_i2c,
1989 CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I,
1990 &hpd);
1991 /* DVI - tv dac, dvo */
1992 ddc_i2c = combios_setup_i2c_bus(rdev, DDC_MONID, 0, 0);
1993 hpd.hpd = RADEON_HPD_2; /* ??? */
1994 radeon_add_legacy_encoder(dev,
1995 radeon_get_encoder_enum(dev,
1996 ATOM_DEVICE_DFP2_SUPPORT,
1997 0),
1998 ATOM_DEVICE_DFP2_SUPPORT);
1999 radeon_add_legacy_encoder(dev,
2000 radeon_get_encoder_enum(dev,
2001 ATOM_DEVICE_CRT2_SUPPORT,
2002 2),
2003 ATOM_DEVICE_CRT2_SUPPORT);
2004 radeon_add_legacy_connector(dev, 1,
2005 ATOM_DEVICE_DFP2_SUPPORT |
2006 ATOM_DEVICE_CRT2_SUPPORT,
2007 DRM_MODE_CONNECTOR_DVII, &ddc_i2c,
2008 CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I,
2009 &hpd);
2010 break;
1964 default: 2011 default:
1965 DRM_INFO("Connector table: %d (invalid)\n", 2012 DRM_INFO("Connector table: %d (invalid)\n",
1966 rdev->mode_info.connector_table); 2013 rdev->mode_info.connector_table);
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index a9dd7847d96e..ecc1a8fafbfd 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -481,7 +481,8 @@ static int radeon_lvds_mode_valid(struct drm_connector *connector,
481 return MODE_OK; 481 return MODE_OK;
482} 482}
483 483
484static enum drm_connector_status radeon_lvds_detect(struct drm_connector *connector) 484static enum drm_connector_status
485radeon_lvds_detect(struct drm_connector *connector, bool force)
485{ 486{
486 struct radeon_connector *radeon_connector = to_radeon_connector(connector); 487 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
487 struct drm_encoder *encoder = radeon_best_single_encoder(connector); 488 struct drm_encoder *encoder = radeon_best_single_encoder(connector);
@@ -594,7 +595,8 @@ static int radeon_vga_mode_valid(struct drm_connector *connector,
594 return MODE_OK; 595 return MODE_OK;
595} 596}
596 597
597static enum drm_connector_status radeon_vga_detect(struct drm_connector *connector) 598static enum drm_connector_status
599radeon_vga_detect(struct drm_connector *connector, bool force)
598{ 600{
599 struct radeon_connector *radeon_connector = to_radeon_connector(connector); 601 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
600 struct drm_encoder *encoder; 602 struct drm_encoder *encoder;
@@ -691,7 +693,8 @@ static int radeon_tv_mode_valid(struct drm_connector *connector,
691 return MODE_OK; 693 return MODE_OK;
692} 694}
693 695
694static enum drm_connector_status radeon_tv_detect(struct drm_connector *connector) 696static enum drm_connector_status
697radeon_tv_detect(struct drm_connector *connector, bool force)
695{ 698{
696 struct drm_encoder *encoder; 699 struct drm_encoder *encoder;
697 struct drm_encoder_helper_funcs *encoder_funcs; 700 struct drm_encoder_helper_funcs *encoder_funcs;
@@ -748,7 +751,8 @@ static int radeon_dvi_get_modes(struct drm_connector *connector)
748 * we have to check if this analog encoder is shared with anyone else (TV) 751 * we have to check if this analog encoder is shared with anyone else (TV)
749 * if its shared we have to set the other connector to disconnected. 752 * if its shared we have to set the other connector to disconnected.
750 */ 753 */
751static enum drm_connector_status radeon_dvi_detect(struct drm_connector *connector) 754static enum drm_connector_status
755radeon_dvi_detect(struct drm_connector *connector, bool force)
752{ 756{
753 struct radeon_connector *radeon_connector = to_radeon_connector(connector); 757 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
754 struct drm_encoder *encoder = NULL; 758 struct drm_encoder *encoder = NULL;
@@ -972,7 +976,8 @@ static int radeon_dp_get_modes(struct drm_connector *connector)
972 return ret; 976 return ret;
973} 977}
974 978
975static enum drm_connector_status radeon_dp_detect(struct drm_connector *connector) 979static enum drm_connector_status
980radeon_dp_detect(struct drm_connector *connector, bool force)
976{ 981{
977 struct radeon_connector *radeon_connector = to_radeon_connector(connector); 982 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
978 enum drm_connector_status ret = connector_status_disconnected; 983 enum drm_connector_status ret = connector_status_disconnected;
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index 6dd434ad2429..b92d2f2fcbed 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -349,6 +349,8 @@ static void radeon_print_display_setup(struct drm_device *dev)
349 DRM_INFO(" DFP4: %s\n", encoder_names[radeon_encoder->encoder_id]); 349 DRM_INFO(" DFP4: %s\n", encoder_names[radeon_encoder->encoder_id]);
350 if (devices & ATOM_DEVICE_DFP5_SUPPORT) 350 if (devices & ATOM_DEVICE_DFP5_SUPPORT)
351 DRM_INFO(" DFP5: %s\n", encoder_names[radeon_encoder->encoder_id]); 351 DRM_INFO(" DFP5: %s\n", encoder_names[radeon_encoder->encoder_id]);
352 if (devices & ATOM_DEVICE_DFP6_SUPPORT)
353 DRM_INFO(" DFP6: %s\n", encoder_names[radeon_encoder->encoder_id]);
352 if (devices & ATOM_DEVICE_TV1_SUPPORT) 354 if (devices & ATOM_DEVICE_TV1_SUPPORT)
353 DRM_INFO(" TV1: %s\n", encoder_names[radeon_encoder->encoder_id]); 355 DRM_INFO(" TV1: %s\n", encoder_names[radeon_encoder->encoder_id]);
354 if (devices & ATOM_DEVICE_CV_SUPPORT) 356 if (devices & ATOM_DEVICE_CV_SUPPORT)
@@ -841,8 +843,9 @@ static void radeon_user_framebuffer_destroy(struct drm_framebuffer *fb)
841{ 843{
842 struct radeon_framebuffer *radeon_fb = to_radeon_framebuffer(fb); 844 struct radeon_framebuffer *radeon_fb = to_radeon_framebuffer(fb);
843 845
844 if (radeon_fb->obj) 846 if (radeon_fb->obj) {
845 drm_gem_object_unreference_unlocked(radeon_fb->obj); 847 drm_gem_object_unreference_unlocked(radeon_fb->obj);
848 }
846 drm_framebuffer_cleanup(fb); 849 drm_framebuffer_cleanup(fb);
847 kfree(radeon_fb); 850 kfree(radeon_fb);
848} 851}
@@ -1140,17 +1143,18 @@ bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
1140 radeon_crtc->rmx_type = radeon_encoder->rmx_type; 1143 radeon_crtc->rmx_type = radeon_encoder->rmx_type;
1141 else 1144 else
1142 radeon_crtc->rmx_type = RMX_OFF; 1145 radeon_crtc->rmx_type = RMX_OFF;
1143 src_v = crtc->mode.vdisplay;
1144 dst_v = radeon_crtc->native_mode.vdisplay;
1145 src_h = crtc->mode.hdisplay;
1146 dst_h = radeon_crtc->native_mode.vdisplay;
1147 /* copy native mode */ 1146 /* copy native mode */
1148 memcpy(&radeon_crtc->native_mode, 1147 memcpy(&radeon_crtc->native_mode,
1149 &radeon_encoder->native_mode, 1148 &radeon_encoder->native_mode,
1150 sizeof(struct drm_display_mode)); 1149 sizeof(struct drm_display_mode));
1150 src_v = crtc->mode.vdisplay;
1151 dst_v = radeon_crtc->native_mode.vdisplay;
1152 src_h = crtc->mode.hdisplay;
1153 dst_h = radeon_crtc->native_mode.hdisplay;
1151 1154
1152 /* fix up for overscan on hdmi */ 1155 /* fix up for overscan on hdmi */
1153 if (ASIC_IS_AVIVO(rdev) && 1156 if (ASIC_IS_AVIVO(rdev) &&
1157 (!(mode->flags & DRM_MODE_FLAG_INTERLACE)) &&
1154 ((radeon_encoder->underscan_type == UNDERSCAN_ON) || 1158 ((radeon_encoder->underscan_type == UNDERSCAN_ON) ||
1155 ((radeon_encoder->underscan_type == UNDERSCAN_AUTO) && 1159 ((radeon_encoder->underscan_type == UNDERSCAN_AUTO) &&
1156 drm_detect_hdmi_monitor(radeon_connector->edid) && 1160 drm_detect_hdmi_monitor(radeon_connector->edid) &&
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c
index c74a8b20d941..9cdf6a35bc2c 100644
--- a/drivers/gpu/drm/radeon/radeon_fb.c
+++ b/drivers/gpu/drm/radeon/radeon_fb.c
@@ -94,8 +94,10 @@ static void radeonfb_destroy_pinned_object(struct drm_gem_object *gobj)
94 ret = radeon_bo_reserve(rbo, false); 94 ret = radeon_bo_reserve(rbo, false);
95 if (likely(ret == 0)) { 95 if (likely(ret == 0)) {
96 radeon_bo_kunmap(rbo); 96 radeon_bo_kunmap(rbo);
97 radeon_bo_unpin(rbo);
97 radeon_bo_unreserve(rbo); 98 radeon_bo_unreserve(rbo);
98 } 99 }
100 drm_gem_object_handle_unreference(gobj);
99 drm_gem_object_unreference_unlocked(gobj); 101 drm_gem_object_unreference_unlocked(gobj);
100} 102}
101 103
@@ -325,8 +327,6 @@ static int radeon_fbdev_destroy(struct drm_device *dev, struct radeon_fbdev *rfb
325{ 327{
326 struct fb_info *info; 328 struct fb_info *info;
327 struct radeon_framebuffer *rfb = &rfbdev->rfb; 329 struct radeon_framebuffer *rfb = &rfbdev->rfb;
328 struct radeon_bo *rbo;
329 int r;
330 330
331 if (rfbdev->helper.fbdev) { 331 if (rfbdev->helper.fbdev) {
332 info = rfbdev->helper.fbdev; 332 info = rfbdev->helper.fbdev;
@@ -338,14 +338,8 @@ static int radeon_fbdev_destroy(struct drm_device *dev, struct radeon_fbdev *rfb
338 } 338 }
339 339
340 if (rfb->obj) { 340 if (rfb->obj) {
341 rbo = rfb->obj->driver_private; 341 radeonfb_destroy_pinned_object(rfb->obj);
342 r = radeon_bo_reserve(rbo, false); 342 rfb->obj = NULL;
343 if (likely(r == 0)) {
344 radeon_bo_kunmap(rbo);
345 radeon_bo_unpin(rbo);
346 radeon_bo_unreserve(rbo);
347 }
348 drm_gem_object_unreference_unlocked(rfb->obj);
349 } 343 }
350 drm_fb_helper_fini(&rfbdev->helper); 344 drm_fb_helper_fini(&rfbdev->helper);
351 drm_framebuffer_cleanup(&rfb->base); 345 drm_framebuffer_cleanup(&rfb->base);
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index c578f265b24c..d1e595d91723 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -201,11 +201,11 @@ int radeon_gem_create_ioctl(struct drm_device *dev, void *data,
201 return r; 201 return r;
202 } 202 }
203 r = drm_gem_handle_create(filp, gobj, &handle); 203 r = drm_gem_handle_create(filp, gobj, &handle);
204 /* drop reference from allocate - handle holds it now */
205 drm_gem_object_unreference_unlocked(gobj);
204 if (r) { 206 if (r) {
205 drm_gem_object_unreference_unlocked(gobj);
206 return r; 207 return r;
207 } 208 }
208 drm_gem_object_handle_unreference_unlocked(gobj);
209 args->handle = handle; 209 args->handle = handle;
210 return 0; 210 return 0;
211} 211}
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index 5eee3c41d124..8fbbe1c6ebbd 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -203,6 +203,10 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
203 */ 203 */
204int radeon_driver_firstopen_kms(struct drm_device *dev) 204int radeon_driver_firstopen_kms(struct drm_device *dev)
205{ 205{
206 struct radeon_device *rdev = dev->dev_private;
207
208 if (rdev->powered_down)
209 return -EINVAL;
206 return 0; 210 return 0;
207} 211}
208 212
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index efbe975312dc..17a6602b5885 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -204,7 +204,7 @@ struct radeon_i2c_chan {
204 204
205/* mostly for macs, but really any system without connector tables */ 205/* mostly for macs, but really any system without connector tables */
206enum radeon_connector_table { 206enum radeon_connector_table {
207 CT_NONE, 207 CT_NONE = 0,
208 CT_GENERIC, 208 CT_GENERIC,
209 CT_IBOOK, 209 CT_IBOOK,
210 CT_POWERBOOK_EXTERNAL, 210 CT_POWERBOOK_EXTERNAL,
@@ -215,6 +215,7 @@ enum radeon_connector_table {
215 CT_IMAC_G5_ISIGHT, 215 CT_IMAC_G5_ISIGHT,
216 CT_EMAC, 216 CT_EMAC,
217 CT_RN50_POWER, 217 CT_RN50_POWER,
218 CT_MAC_X800,
218}; 219};
219 220
220enum radeon_dvo_chip { 221enum radeon_dvo_chip {
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index 7cffb3e04232..3451a82adba7 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -351,6 +351,7 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
351 INIT_LIST_HEAD(&fbo->lru); 351 INIT_LIST_HEAD(&fbo->lru);
352 INIT_LIST_HEAD(&fbo->swap); 352 INIT_LIST_HEAD(&fbo->swap);
353 fbo->vm_node = NULL; 353 fbo->vm_node = NULL;
354 atomic_set(&fbo->cpu_writers, 0);
354 355
355 fbo->sync_obj = driver->sync_obj_ref(bo->sync_obj); 356 fbo->sync_obj = driver->sync_obj_ref(bo->sync_obj);
356 kref_init(&fbo->list_kref); 357 kref_init(&fbo->list_kref);
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
index ca904799f018..b1e02fffd3cc 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
@@ -69,7 +69,7 @@ struct ttm_page_pool {
69 spinlock_t lock; 69 spinlock_t lock;
70 bool fill_lock; 70 bool fill_lock;
71 struct list_head list; 71 struct list_head list;
72 int gfp_flags; 72 gfp_t gfp_flags;
73 unsigned npages; 73 unsigned npages;
74 char *name; 74 char *name;
75 unsigned long nfrees; 75 unsigned long nfrees;
@@ -475,7 +475,7 @@ static void ttm_handle_caching_state_failure(struct list_head *pages,
475 * This function is reentrant if caller updates count depending on number of 475 * This function is reentrant if caller updates count depending on number of
476 * pages returned in pages array. 476 * pages returned in pages array.
477 */ 477 */
478static int ttm_alloc_new_pages(struct list_head *pages, int gfp_flags, 478static int ttm_alloc_new_pages(struct list_head *pages, gfp_t gfp_flags,
479 int ttm_flags, enum ttm_caching_state cstate, unsigned count) 479 int ttm_flags, enum ttm_caching_state cstate, unsigned count)
480{ 480{
481 struct page **caching_array; 481 struct page **caching_array;
@@ -666,7 +666,7 @@ int ttm_get_pages(struct list_head *pages, int flags,
666{ 666{
667 struct ttm_page_pool *pool = ttm_get_pool(flags, cstate); 667 struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
668 struct page *p = NULL; 668 struct page *p = NULL;
669 int gfp_flags = GFP_USER; 669 gfp_t gfp_flags = GFP_USER;
670 int r; 670 int r;
671 671
672 /* set zero flag for page allocation if required */ 672 /* set zero flag for page allocation if required */
@@ -818,7 +818,7 @@ int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages)
818 return 0; 818 return 0;
819} 819}
820 820
821void ttm_page_alloc_fini() 821void ttm_page_alloc_fini(void)
822{ 822{
823 int i; 823 int i;
824 824
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 72ec2e2b6e97..a96ed6d9d010 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -148,13 +148,16 @@ static struct pci_device_id vmw_pci_id_list[] = {
148 {0, 0, 0} 148 {0, 0, 0}
149}; 149};
150 150
151static char *vmw_devname = "vmwgfx"; 151static int enable_fbdev;
152 152
153static int vmw_probe(struct pci_dev *, const struct pci_device_id *); 153static int vmw_probe(struct pci_dev *, const struct pci_device_id *);
154static void vmw_master_init(struct vmw_master *); 154static void vmw_master_init(struct vmw_master *);
155static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val, 155static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
156 void *ptr); 156 void *ptr);
157 157
158MODULE_PARM_DESC(enable_fbdev, "Enable vmwgfx fbdev");
159module_param_named(enable_fbdev, enable_fbdev, int, 0600);
160
158static void vmw_print_capabilities(uint32_t capabilities) 161static void vmw_print_capabilities(uint32_t capabilities)
159{ 162{
160 DRM_INFO("Capabilities:\n"); 163 DRM_INFO("Capabilities:\n");
@@ -192,8 +195,6 @@ static int vmw_request_device(struct vmw_private *dev_priv)
192{ 195{
193 int ret; 196 int ret;
194 197
195 vmw_kms_save_vga(dev_priv);
196
197 ret = vmw_fifo_init(dev_priv, &dev_priv->fifo); 198 ret = vmw_fifo_init(dev_priv, &dev_priv->fifo);
198 if (unlikely(ret != 0)) { 199 if (unlikely(ret != 0)) {
199 DRM_ERROR("Unable to initialize FIFO.\n"); 200 DRM_ERROR("Unable to initialize FIFO.\n");
@@ -206,9 +207,35 @@ static int vmw_request_device(struct vmw_private *dev_priv)
206static void vmw_release_device(struct vmw_private *dev_priv) 207static void vmw_release_device(struct vmw_private *dev_priv)
207{ 208{
208 vmw_fifo_release(dev_priv, &dev_priv->fifo); 209 vmw_fifo_release(dev_priv, &dev_priv->fifo);
209 vmw_kms_restore_vga(dev_priv);
210} 210}
211 211
212int vmw_3d_resource_inc(struct vmw_private *dev_priv)
213{
214 int ret = 0;
215
216 mutex_lock(&dev_priv->release_mutex);
217 if (unlikely(dev_priv->num_3d_resources++ == 0)) {
218 ret = vmw_request_device(dev_priv);
219 if (unlikely(ret != 0))
220 --dev_priv->num_3d_resources;
221 }
222 mutex_unlock(&dev_priv->release_mutex);
223 return ret;
224}
225
226
227void vmw_3d_resource_dec(struct vmw_private *dev_priv)
228{
229 int32_t n3d;
230
231 mutex_lock(&dev_priv->release_mutex);
232 if (unlikely(--dev_priv->num_3d_resources == 0))
233 vmw_release_device(dev_priv);
234 n3d = (int32_t) dev_priv->num_3d_resources;
235 mutex_unlock(&dev_priv->release_mutex);
236
237 BUG_ON(n3d < 0);
238}
212 239
213static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) 240static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
214{ 241{
@@ -228,6 +255,7 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
228 dev_priv->last_read_sequence = (uint32_t) -100; 255 dev_priv->last_read_sequence = (uint32_t) -100;
229 mutex_init(&dev_priv->hw_mutex); 256 mutex_init(&dev_priv->hw_mutex);
230 mutex_init(&dev_priv->cmdbuf_mutex); 257 mutex_init(&dev_priv->cmdbuf_mutex);
258 mutex_init(&dev_priv->release_mutex);
231 rwlock_init(&dev_priv->resource_lock); 259 rwlock_init(&dev_priv->resource_lock);
232 idr_init(&dev_priv->context_idr); 260 idr_init(&dev_priv->context_idr);
233 idr_init(&dev_priv->surface_idr); 261 idr_init(&dev_priv->surface_idr);
@@ -244,6 +272,8 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
244 dev_priv->vram_start = pci_resource_start(dev->pdev, 1); 272 dev_priv->vram_start = pci_resource_start(dev->pdev, 1);
245 dev_priv->mmio_start = pci_resource_start(dev->pdev, 2); 273 dev_priv->mmio_start = pci_resource_start(dev->pdev, 2);
246 274
275 dev_priv->enable_fb = enable_fbdev;
276
247 mutex_lock(&dev_priv->hw_mutex); 277 mutex_lock(&dev_priv->hw_mutex);
248 278
249 vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2); 279 vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2);
@@ -343,17 +373,6 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
343 373
344 dev->dev_private = dev_priv; 374 dev->dev_private = dev_priv;
345 375
346 if (!dev->devname)
347 dev->devname = vmw_devname;
348
349 if (dev_priv->capabilities & SVGA_CAP_IRQMASK) {
350 ret = drm_irq_install(dev);
351 if (unlikely(ret != 0)) {
352 DRM_ERROR("Failed installing irq: %d\n", ret);
353 goto out_no_irq;
354 }
355 }
356
357 ret = pci_request_regions(dev->pdev, "vmwgfx probe"); 376 ret = pci_request_regions(dev->pdev, "vmwgfx probe");
358 dev_priv->stealth = (ret != 0); 377 dev_priv->stealth = (ret != 0);
359 if (dev_priv->stealth) { 378 if (dev_priv->stealth) {
@@ -369,26 +388,52 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
369 goto out_no_device; 388 goto out_no_device;
370 } 389 }
371 } 390 }
372 ret = vmw_request_device(dev_priv); 391 ret = vmw_kms_init(dev_priv);
373 if (unlikely(ret != 0)) 392 if (unlikely(ret != 0))
374 goto out_no_device; 393 goto out_no_kms;
375 vmw_kms_init(dev_priv);
376 vmw_overlay_init(dev_priv); 394 vmw_overlay_init(dev_priv);
377 vmw_fb_init(dev_priv); 395 if (dev_priv->enable_fb) {
396 ret = vmw_3d_resource_inc(dev_priv);
397 if (unlikely(ret != 0))
398 goto out_no_fifo;
399 vmw_kms_save_vga(dev_priv);
400 vmw_fb_init(dev_priv);
401 DRM_INFO("%s", vmw_fifo_have_3d(dev_priv) ?
402 "Detected device 3D availability.\n" :
403 "Detected no device 3D availability.\n");
404 } else {
405 DRM_INFO("Delayed 3D detection since we're not "
406 "running the device in SVGA mode yet.\n");
407 }
408
409 if (dev_priv->capabilities & SVGA_CAP_IRQMASK) {
410 ret = drm_irq_install(dev);
411 if (unlikely(ret != 0)) {
412 DRM_ERROR("Failed installing irq: %d\n", ret);
413 goto out_no_irq;
414 }
415 }
378 416
379 dev_priv->pm_nb.notifier_call = vmwgfx_pm_notifier; 417 dev_priv->pm_nb.notifier_call = vmwgfx_pm_notifier;
380 register_pm_notifier(&dev_priv->pm_nb); 418 register_pm_notifier(&dev_priv->pm_nb);
381 419
382 DRM_INFO("%s", vmw_fifo_have_3d(dev_priv) ? "Have 3D\n" : "No 3D\n");
383
384 return 0; 420 return 0;
385 421
386out_no_device:
387 if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
388 drm_irq_uninstall(dev_priv->dev);
389 if (dev->devname == vmw_devname)
390 dev->devname = NULL;
391out_no_irq: 422out_no_irq:
423 if (dev_priv->enable_fb) {
424 vmw_fb_close(dev_priv);
425 vmw_kms_restore_vga(dev_priv);
426 vmw_3d_resource_dec(dev_priv);
427 }
428out_no_fifo:
429 vmw_overlay_close(dev_priv);
430 vmw_kms_close(dev_priv);
431out_no_kms:
432 if (dev_priv->stealth)
433 pci_release_region(dev->pdev, 2);
434 else
435 pci_release_regions(dev->pdev);
436out_no_device:
392 ttm_object_device_release(&dev_priv->tdev); 437 ttm_object_device_release(&dev_priv->tdev);
393out_err4: 438out_err4:
394 iounmap(dev_priv->mmio_virt); 439 iounmap(dev_priv->mmio_virt);
@@ -415,19 +460,20 @@ static int vmw_driver_unload(struct drm_device *dev)
415 460
416 unregister_pm_notifier(&dev_priv->pm_nb); 461 unregister_pm_notifier(&dev_priv->pm_nb);
417 462
418 vmw_fb_close(dev_priv); 463 if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
464 drm_irq_uninstall(dev_priv->dev);
465 if (dev_priv->enable_fb) {
466 vmw_fb_close(dev_priv);
467 vmw_kms_restore_vga(dev_priv);
468 vmw_3d_resource_dec(dev_priv);
469 }
419 vmw_kms_close(dev_priv); 470 vmw_kms_close(dev_priv);
420 vmw_overlay_close(dev_priv); 471 vmw_overlay_close(dev_priv);
421 vmw_release_device(dev_priv);
422 if (dev_priv->stealth) 472 if (dev_priv->stealth)
423 pci_release_region(dev->pdev, 2); 473 pci_release_region(dev->pdev, 2);
424 else 474 else
425 pci_release_regions(dev->pdev); 475 pci_release_regions(dev->pdev);
426 476
427 if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
428 drm_irq_uninstall(dev_priv->dev);
429 if (dev->devname == vmw_devname)
430 dev->devname = NULL;
431 ttm_object_device_release(&dev_priv->tdev); 477 ttm_object_device_release(&dev_priv->tdev);
432 iounmap(dev_priv->mmio_virt); 478 iounmap(dev_priv->mmio_virt);
433 drm_mtrr_del(dev_priv->mmio_mtrr, dev_priv->mmio_start, 479 drm_mtrr_del(dev_priv->mmio_mtrr, dev_priv->mmio_start,
@@ -500,7 +546,7 @@ static long vmw_unlocked_ioctl(struct file *filp, unsigned int cmd,
500 struct drm_ioctl_desc *ioctl = 546 struct drm_ioctl_desc *ioctl =
501 &vmw_ioctls[nr - DRM_COMMAND_BASE]; 547 &vmw_ioctls[nr - DRM_COMMAND_BASE];
502 548
503 if (unlikely(ioctl->cmd != cmd)) { 549 if (unlikely(ioctl->cmd_drv != cmd)) {
504 DRM_ERROR("Invalid command format, ioctl %d\n", 550 DRM_ERROR("Invalid command format, ioctl %d\n",
505 nr - DRM_COMMAND_BASE); 551 nr - DRM_COMMAND_BASE);
506 return -EINVAL; 552 return -EINVAL;
@@ -589,6 +635,16 @@ static int vmw_master_set(struct drm_device *dev,
589 struct vmw_master *vmaster = vmw_master(file_priv->master); 635 struct vmw_master *vmaster = vmw_master(file_priv->master);
590 int ret = 0; 636 int ret = 0;
591 637
638 if (!dev_priv->enable_fb) {
639 ret = vmw_3d_resource_inc(dev_priv);
640 if (unlikely(ret != 0))
641 return ret;
642 vmw_kms_save_vga(dev_priv);
643 mutex_lock(&dev_priv->hw_mutex);
644 vmw_write(dev_priv, SVGA_REG_TRACES, 0);
645 mutex_unlock(&dev_priv->hw_mutex);
646 }
647
592 if (active) { 648 if (active) {
593 BUG_ON(active != &dev_priv->fbdev_master); 649 BUG_ON(active != &dev_priv->fbdev_master);
594 ret = ttm_vt_lock(&active->lock, false, vmw_fp->tfile); 650 ret = ttm_vt_lock(&active->lock, false, vmw_fp->tfile);
@@ -617,7 +673,13 @@ static int vmw_master_set(struct drm_device *dev,
617 return 0; 673 return 0;
618 674
619out_no_active_lock: 675out_no_active_lock:
620 vmw_release_device(dev_priv); 676 if (!dev_priv->enable_fb) {
677 mutex_lock(&dev_priv->hw_mutex);
678 vmw_write(dev_priv, SVGA_REG_TRACES, 1);
679 mutex_unlock(&dev_priv->hw_mutex);
680 vmw_kms_restore_vga(dev_priv);
681 vmw_3d_resource_dec(dev_priv);
682 }
621 return ret; 683 return ret;
622} 684}
623 685
@@ -645,11 +707,23 @@ static void vmw_master_drop(struct drm_device *dev,
645 707
646 ttm_lock_set_kill(&vmaster->lock, true, SIGTERM); 708 ttm_lock_set_kill(&vmaster->lock, true, SIGTERM);
647 709
710 if (!dev_priv->enable_fb) {
711 ret = ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM);
712 if (unlikely(ret != 0))
713 DRM_ERROR("Unable to clean VRAM on master drop.\n");
714 mutex_lock(&dev_priv->hw_mutex);
715 vmw_write(dev_priv, SVGA_REG_TRACES, 1);
716 mutex_unlock(&dev_priv->hw_mutex);
717 vmw_kms_restore_vga(dev_priv);
718 vmw_3d_resource_dec(dev_priv);
719 }
720
648 dev_priv->active_master = &dev_priv->fbdev_master; 721 dev_priv->active_master = &dev_priv->fbdev_master;
649 ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM); 722 ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM);
650 ttm_vt_unlock(&dev_priv->fbdev_master.lock); 723 ttm_vt_unlock(&dev_priv->fbdev_master.lock);
651 724
652 vmw_fb_on(dev_priv); 725 if (dev_priv->enable_fb)
726 vmw_fb_on(dev_priv);
653} 727}
654 728
655 729
@@ -722,6 +796,7 @@ static struct drm_driver driver = {
722 .irq_postinstall = vmw_irq_postinstall, 796 .irq_postinstall = vmw_irq_postinstall,
723 .irq_uninstall = vmw_irq_uninstall, 797 .irq_uninstall = vmw_irq_uninstall,
724 .irq_handler = vmw_irq_handler, 798 .irq_handler = vmw_irq_handler,
799 .get_vblank_counter = vmw_get_vblank_counter,
725 .reclaim_buffers_locked = NULL, 800 .reclaim_buffers_locked = NULL,
726 .get_map_ofs = drm_core_get_map_ofs, 801 .get_map_ofs = drm_core_get_map_ofs,
727 .get_reg_ofs = drm_core_get_reg_ofs, 802 .get_reg_ofs = drm_core_get_reg_ofs,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index 429f917b60bf..58de6393f611 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -277,6 +277,7 @@ struct vmw_private {
277 277
278 bool stealth; 278 bool stealth;
279 bool is_opened; 279 bool is_opened;
280 bool enable_fb;
280 281
281 /** 282 /**
282 * Master management. 283 * Master management.
@@ -285,6 +286,9 @@ struct vmw_private {
285 struct vmw_master *active_master; 286 struct vmw_master *active_master;
286 struct vmw_master fbdev_master; 287 struct vmw_master fbdev_master;
287 struct notifier_block pm_nb; 288 struct notifier_block pm_nb;
289
290 struct mutex release_mutex;
291 uint32_t num_3d_resources;
288}; 292};
289 293
290static inline struct vmw_private *vmw_priv(struct drm_device *dev) 294static inline struct vmw_private *vmw_priv(struct drm_device *dev)
@@ -319,6 +323,9 @@ static inline uint32_t vmw_read(struct vmw_private *dev_priv,
319 return val; 323 return val;
320} 324}
321 325
326int vmw_3d_resource_inc(struct vmw_private *dev_priv);
327void vmw_3d_resource_dec(struct vmw_private *dev_priv);
328
322/** 329/**
323 * GMR utilities - vmwgfx_gmr.c 330 * GMR utilities - vmwgfx_gmr.c
324 */ 331 */
@@ -511,6 +518,7 @@ void vmw_kms_write_svga(struct vmw_private *vmw_priv,
511 unsigned bbp, unsigned depth); 518 unsigned bbp, unsigned depth);
512int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data, 519int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
513 struct drm_file *file_priv); 520 struct drm_file *file_priv);
521u32 vmw_get_vblank_counter(struct drm_device *dev, int crtc);
514 522
515/** 523/**
516 * Overlay control - vmwgfx_overlay.c 524 * Overlay control - vmwgfx_overlay.c
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
index 870967a97c15..409e172f4abf 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
@@ -615,6 +615,11 @@ int vmw_dmabuf_to_start_of_vram(struct vmw_private *vmw_priv,
615 if (unlikely(ret != 0)) 615 if (unlikely(ret != 0))
616 goto err_unlock; 616 goto err_unlock;
617 617
618 if (bo->mem.mem_type == TTM_PL_VRAM &&
619 bo->mem.mm_node->start < bo->num_pages)
620 (void) ttm_bo_validate(bo, &vmw_sys_placement, false,
621 false, false);
622
618 ret = ttm_bo_validate(bo, &ne_placement, false, false, false); 623 ret = ttm_bo_validate(bo, &ne_placement, false, false, false);
619 624
620 /* Could probably bug on */ 625 /* Could probably bug on */
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
index e6a1eb7ea954..0fe31766e4cf 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
@@ -106,6 +106,7 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
106 mutex_lock(&dev_priv->hw_mutex); 106 mutex_lock(&dev_priv->hw_mutex);
107 dev_priv->enable_state = vmw_read(dev_priv, SVGA_REG_ENABLE); 107 dev_priv->enable_state = vmw_read(dev_priv, SVGA_REG_ENABLE);
108 dev_priv->config_done_state = vmw_read(dev_priv, SVGA_REG_CONFIG_DONE); 108 dev_priv->config_done_state = vmw_read(dev_priv, SVGA_REG_CONFIG_DONE);
109 dev_priv->traces_state = vmw_read(dev_priv, SVGA_REG_TRACES);
109 vmw_write(dev_priv, SVGA_REG_ENABLE, 1); 110 vmw_write(dev_priv, SVGA_REG_ENABLE, 1);
110 111
111 min = 4; 112 min = 4;
@@ -175,6 +176,8 @@ void vmw_fifo_release(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
175 dev_priv->config_done_state); 176 dev_priv->config_done_state);
176 vmw_write(dev_priv, SVGA_REG_ENABLE, 177 vmw_write(dev_priv, SVGA_REG_ENABLE,
177 dev_priv->enable_state); 178 dev_priv->enable_state);
179 vmw_write(dev_priv, SVGA_REG_TRACES,
180 dev_priv->traces_state);
178 181
179 mutex_unlock(&dev_priv->hw_mutex); 182 mutex_unlock(&dev_priv->hw_mutex);
180 vmw_fence_queue_takedown(&fifo->fence_queue); 183 vmw_fence_queue_takedown(&fifo->fence_queue);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index 64d7f47df868..e882ba099f0c 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -898,7 +898,19 @@ int vmw_kms_save_vga(struct vmw_private *vmw_priv)
898 save->width = vmw_read(vmw_priv, SVGA_REG_DISPLAY_WIDTH); 898 save->width = vmw_read(vmw_priv, SVGA_REG_DISPLAY_WIDTH);
899 save->height = vmw_read(vmw_priv, SVGA_REG_DISPLAY_HEIGHT); 899 save->height = vmw_read(vmw_priv, SVGA_REG_DISPLAY_HEIGHT);
900 vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID); 900 vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID);
901 if (i == 0 && vmw_priv->num_displays == 1 &&
902 save->width == 0 && save->height == 0) {
903
904 /*
905 * It should be fairly safe to assume that these
906 * values are uninitialized.
907 */
908
909 save->width = vmw_priv->vga_width - save->pos_x;
910 save->height = vmw_priv->vga_height - save->pos_y;
911 }
901 } 912 }
913
902 return 0; 914 return 0;
903} 915}
904 916
@@ -984,3 +996,8 @@ out_unlock:
984 ttm_read_unlock(&vmaster->lock); 996 ttm_read_unlock(&vmaster->lock);
985 return ret; 997 return ret;
986} 998}
999
1000u32 vmw_get_vblank_counter(struct drm_device *dev, int crtc)
1001{
1002 return 0;
1003}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
index 2ff5cf78235f..11cb39e3accb 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
@@ -27,6 +27,8 @@
27 27
28#include "vmwgfx_kms.h" 28#include "vmwgfx_kms.h"
29 29
30#define VMWGFX_LDU_NUM_DU 8
31
30#define vmw_crtc_to_ldu(x) \ 32#define vmw_crtc_to_ldu(x) \
31 container_of(x, struct vmw_legacy_display_unit, base.crtc) 33 container_of(x, struct vmw_legacy_display_unit, base.crtc)
32#define vmw_encoder_to_ldu(x) \ 34#define vmw_encoder_to_ldu(x) \
@@ -335,7 +337,8 @@ static void vmw_ldu_connector_restore(struct drm_connector *connector)
335} 337}
336 338
337static enum drm_connector_status 339static enum drm_connector_status
338 vmw_ldu_connector_detect(struct drm_connector *connector) 340 vmw_ldu_connector_detect(struct drm_connector *connector,
341 bool force)
339{ 342{
340 if (vmw_connector_to_ldu(connector)->pref_active) 343 if (vmw_connector_to_ldu(connector)->pref_active)
341 return connector_status_connected; 344 return connector_status_connected;
@@ -516,7 +519,7 @@ static int vmw_ldu_init(struct vmw_private *dev_priv, unsigned unit)
516 519
517 drm_connector_init(dev, connector, &vmw_legacy_connector_funcs, 520 drm_connector_init(dev, connector, &vmw_legacy_connector_funcs,
518 DRM_MODE_CONNECTOR_LVDS); 521 DRM_MODE_CONNECTOR_LVDS);
519 connector->status = vmw_ldu_connector_detect(connector); 522 connector->status = vmw_ldu_connector_detect(connector, true);
520 523
521 drm_encoder_init(dev, encoder, &vmw_legacy_encoder_funcs, 524 drm_encoder_init(dev, encoder, &vmw_legacy_encoder_funcs,
522 DRM_MODE_ENCODER_LVDS); 525 DRM_MODE_ENCODER_LVDS);
@@ -535,6 +538,10 @@ static int vmw_ldu_init(struct vmw_private *dev_priv, unsigned unit)
535 538
536int vmw_kms_init_legacy_display_system(struct vmw_private *dev_priv) 539int vmw_kms_init_legacy_display_system(struct vmw_private *dev_priv)
537{ 540{
541 struct drm_device *dev = dev_priv->dev;
542 int i;
543 int ret;
544
538 if (dev_priv->ldu_priv) { 545 if (dev_priv->ldu_priv) {
539 DRM_INFO("ldu system already on\n"); 546 DRM_INFO("ldu system already on\n");
540 return -EINVAL; 547 return -EINVAL;
@@ -552,23 +559,24 @@ int vmw_kms_init_legacy_display_system(struct vmw_private *dev_priv)
552 559
553 drm_mode_create_dirty_info_property(dev_priv->dev); 560 drm_mode_create_dirty_info_property(dev_priv->dev);
554 561
555 vmw_ldu_init(dev_priv, 0);
556 /* for old hardware without multimon only enable one display */
557 if (dev_priv->capabilities & SVGA_CAP_MULTIMON) { 562 if (dev_priv->capabilities & SVGA_CAP_MULTIMON) {
558 vmw_ldu_init(dev_priv, 1); 563 for (i = 0; i < VMWGFX_LDU_NUM_DU; ++i)
559 vmw_ldu_init(dev_priv, 2); 564 vmw_ldu_init(dev_priv, i);
560 vmw_ldu_init(dev_priv, 3); 565 ret = drm_vblank_init(dev, VMWGFX_LDU_NUM_DU);
561 vmw_ldu_init(dev_priv, 4); 566 } else {
562 vmw_ldu_init(dev_priv, 5); 567 /* for old hardware without multimon only enable one display */
563 vmw_ldu_init(dev_priv, 6); 568 vmw_ldu_init(dev_priv, 0);
564 vmw_ldu_init(dev_priv, 7); 569 ret = drm_vblank_init(dev, 1);
565 } 570 }
566 571
567 return 0; 572 return ret;
568} 573}
569 574
570int vmw_kms_close_legacy_display_system(struct vmw_private *dev_priv) 575int vmw_kms_close_legacy_display_system(struct vmw_private *dev_priv)
571{ 576{
577 struct drm_device *dev = dev_priv->dev;
578
579 drm_vblank_cleanup(dev);
572 if (!dev_priv->ldu_priv) 580 if (!dev_priv->ldu_priv)
573 return -ENOSYS; 581 return -ENOSYS;
574 582
@@ -610,7 +618,7 @@ int vmw_kms_ldu_update_layout(struct vmw_private *dev_priv, unsigned num,
610 ldu->pref_height = 600; 618 ldu->pref_height = 600;
611 ldu->pref_active = false; 619 ldu->pref_active = false;
612 } 620 }
613 con->status = vmw_ldu_connector_detect(con); 621 con->status = vmw_ldu_connector_detect(con, true);
614 } 622 }
615 623
616 mutex_unlock(&dev->mode_config.mutex); 624 mutex_unlock(&dev->mode_config.mutex);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index 5f2d5df01e5c..c8c40e9979db 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -211,6 +211,7 @@ static void vmw_hw_context_destroy(struct vmw_resource *res)
211 cmd->body.cid = cpu_to_le32(res->id); 211 cmd->body.cid = cpu_to_le32(res->id);
212 212
213 vmw_fifo_commit(dev_priv, sizeof(*cmd)); 213 vmw_fifo_commit(dev_priv, sizeof(*cmd));
214 vmw_3d_resource_dec(dev_priv);
214} 215}
215 216
216static int vmw_context_init(struct vmw_private *dev_priv, 217static int vmw_context_init(struct vmw_private *dev_priv,
@@ -247,6 +248,7 @@ static int vmw_context_init(struct vmw_private *dev_priv,
247 cmd->body.cid = cpu_to_le32(res->id); 248 cmd->body.cid = cpu_to_le32(res->id);
248 249
249 vmw_fifo_commit(dev_priv, sizeof(*cmd)); 250 vmw_fifo_commit(dev_priv, sizeof(*cmd));
251 (void) vmw_3d_resource_inc(dev_priv);
250 vmw_resource_activate(res, vmw_hw_context_destroy); 252 vmw_resource_activate(res, vmw_hw_context_destroy);
251 return 0; 253 return 0;
252} 254}
@@ -406,6 +408,7 @@ static void vmw_hw_surface_destroy(struct vmw_resource *res)
406 cmd->body.sid = cpu_to_le32(res->id); 408 cmd->body.sid = cpu_to_le32(res->id);
407 409
408 vmw_fifo_commit(dev_priv, sizeof(*cmd)); 410 vmw_fifo_commit(dev_priv, sizeof(*cmd));
411 vmw_3d_resource_dec(dev_priv);
409} 412}
410 413
411void vmw_surface_res_free(struct vmw_resource *res) 414void vmw_surface_res_free(struct vmw_resource *res)
@@ -473,6 +476,7 @@ int vmw_surface_init(struct vmw_private *dev_priv,
473 } 476 }
474 477
475 vmw_fifo_commit(dev_priv, submit_size); 478 vmw_fifo_commit(dev_priv, submit_size);
479 (void) vmw_3d_resource_inc(dev_priv);
476 vmw_resource_activate(res, vmw_hw_surface_destroy); 480 vmw_resource_activate(res, vmw_hw_surface_destroy);
477 return 0; 481 return 0;
478} 482}
diff --git a/drivers/gpu/vga/vgaarb.c b/drivers/gpu/vga/vgaarb.c
index b87569e96b16..f366f968155a 100644
--- a/drivers/gpu/vga/vgaarb.c
+++ b/drivers/gpu/vga/vgaarb.c
@@ -598,7 +598,7 @@ static inline void vga_update_device_decodes(struct vga_device *vgadev,
598 pr_debug("vgaarb: decoding count now is: %d\n", vga_decode_count); 598 pr_debug("vgaarb: decoding count now is: %d\n", vga_decode_count);
599} 599}
600 600
601void __vga_set_legacy_decoding(struct pci_dev *pdev, unsigned int decodes, bool userspace) 601static void __vga_set_legacy_decoding(struct pci_dev *pdev, unsigned int decodes, bool userspace)
602{ 602{
603 struct vga_device *vgadev; 603 struct vga_device *vgadev;
604 unsigned long flags; 604 unsigned long flags;
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index 0c52899be964..3f7292486024 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -1285,8 +1285,11 @@ static const struct hid_device_id hid_blacklist[] = {
1285 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_JIS) }, 1285 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_JIS) },
1286 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY) }, 1286 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY) },
1287 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY) }, 1287 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY) },
1288 { HID_USB_DEVICE(USB_VENDOR_ID_ASUS, USB_DEVICE_ID_ASUS_T91MT) },
1289 { HID_USB_DEVICE(USB_VENDOR_ID_ASUS, USB_DEVICE_ID_ASUSTEK_MULTITOUCH_YFO) },
1288 { HID_USB_DEVICE(USB_VENDOR_ID_BELKIN, USB_DEVICE_ID_FLIP_KVM) }, 1290 { HID_USB_DEVICE(USB_VENDOR_ID_BELKIN, USB_DEVICE_ID_FLIP_KVM) },
1289 { HID_USB_DEVICE(USB_VENDOR_ID_BTC, USB_DEVICE_ID_BTC_EMPREX_REMOTE) }, 1291 { HID_USB_DEVICE(USB_VENDOR_ID_BTC, USB_DEVICE_ID_BTC_EMPREX_REMOTE) },
1292 { HID_USB_DEVICE(USB_VENDOR_ID_BTC, USB_DEVICE_ID_BTC_EMPREX_REMOTE_2) },
1290 { HID_USB_DEVICE(USB_VENDOR_ID_CANDO, USB_DEVICE_ID_CANDO_MULTI_TOUCH) }, 1293 { HID_USB_DEVICE(USB_VENDOR_ID_CANDO, USB_DEVICE_ID_CANDO_MULTI_TOUCH) },
1291 { HID_USB_DEVICE(USB_VENDOR_ID_CANDO, USB_DEVICE_ID_CANDO_MULTI_TOUCH_11_6) }, 1294 { HID_USB_DEVICE(USB_VENDOR_ID_CANDO, USB_DEVICE_ID_CANDO_MULTI_TOUCH_11_6) },
1292 { HID_USB_DEVICE(USB_VENDOR_ID_CHERRY, USB_DEVICE_ID_CHERRY_CYMOTION) }, 1295 { HID_USB_DEVICE(USB_VENDOR_ID_CHERRY, USB_DEVICE_ID_CHERRY_CYMOTION) },
@@ -1578,7 +1581,6 @@ static const struct hid_device_id hid_ignore_list[] = {
1578 { HID_USB_DEVICE(USB_VENDOR_ID_AIPTEK, USB_DEVICE_ID_AIPTEK_24) }, 1581 { HID_USB_DEVICE(USB_VENDOR_ID_AIPTEK, USB_DEVICE_ID_AIPTEK_24) },
1579 { HID_USB_DEVICE(USB_VENDOR_ID_AIRCABLE, USB_DEVICE_ID_AIRCABLE1) }, 1582 { HID_USB_DEVICE(USB_VENDOR_ID_AIRCABLE, USB_DEVICE_ID_AIRCABLE1) },
1580 { HID_USB_DEVICE(USB_VENDOR_ID_ALCOR, USB_DEVICE_ID_ALCOR_USBRS232) }, 1583 { HID_USB_DEVICE(USB_VENDOR_ID_ALCOR, USB_DEVICE_ID_ALCOR_USBRS232) },
1581 { HID_USB_DEVICE(USB_VENDOR_ID_ASUS, USB_DEVICE_ID_ASUS_T91MT)},
1582 { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_LCM)}, 1584 { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_LCM)},
1583 { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_LCM2)}, 1585 { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_LCM2)},
1584 { HID_USB_DEVICE(USB_VENDOR_ID_AVERMEDIA, USB_DEVICE_ID_AVER_FM_MR800) }, 1586 { HID_USB_DEVICE(USB_VENDOR_ID_AVERMEDIA, USB_DEVICE_ID_AVER_FM_MR800) },
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 85c6d13c9ffa..765a4f53eb5c 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -105,6 +105,7 @@
105 105
106#define USB_VENDOR_ID_ASUS 0x0486 106#define USB_VENDOR_ID_ASUS 0x0486
107#define USB_DEVICE_ID_ASUS_T91MT 0x0185 107#define USB_DEVICE_ID_ASUS_T91MT 0x0185
108#define USB_DEVICE_ID_ASUSTEK_MULTITOUCH_YFO 0x0186
108 109
109#define USB_VENDOR_ID_ASUSTEK 0x0b05 110#define USB_VENDOR_ID_ASUSTEK 0x0b05
110#define USB_DEVICE_ID_ASUSTEK_LCM 0x1726 111#define USB_DEVICE_ID_ASUSTEK_LCM 0x1726
@@ -128,6 +129,7 @@
128 129
129#define USB_VENDOR_ID_BTC 0x046e 130#define USB_VENDOR_ID_BTC 0x046e
130#define USB_DEVICE_ID_BTC_EMPREX_REMOTE 0x5578 131#define USB_DEVICE_ID_BTC_EMPREX_REMOTE 0x5578
132#define USB_DEVICE_ID_BTC_EMPREX_REMOTE_2 0x5577
131 133
132#define USB_VENDOR_ID_CANDO 0x2087 134#define USB_VENDOR_ID_CANDO 0x2087
133#define USB_DEVICE_ID_CANDO_MULTI_TOUCH 0x0a01 135#define USB_DEVICE_ID_CANDO_MULTI_TOUCH 0x0a01
@@ -149,6 +151,7 @@
149 151
150#define USB_VENDOR_ID_CHICONY 0x04f2 152#define USB_VENDOR_ID_CHICONY 0x04f2
151#define USB_DEVICE_ID_CHICONY_TACTICAL_PAD 0x0418 153#define USB_DEVICE_ID_CHICONY_TACTICAL_PAD 0x0418
154#define USB_DEVICE_ID_CHICONY_MULTI_TOUCH 0xb19d
152 155
153#define USB_VENDOR_ID_CIDC 0x1677 156#define USB_VENDOR_ID_CIDC 0x1677
154 157
@@ -507,6 +510,7 @@
507#define USB_VENDOR_ID_UCLOGIC 0x5543 510#define USB_VENDOR_ID_UCLOGIC 0x5543
508#define USB_DEVICE_ID_UCLOGIC_TABLET_PF1209 0x0042 511#define USB_DEVICE_ID_UCLOGIC_TABLET_PF1209 0x0042
509#define USB_DEVICE_ID_UCLOGIC_TABLET_WP4030U 0x0003 512#define USB_DEVICE_ID_UCLOGIC_TABLET_WP4030U 0x0003
513#define USB_DEVICE_ID_UCLOGIC_TABLET_KNA5 0x6001
510 514
511#define USB_VENDOR_ID_VERNIER 0x08f7 515#define USB_VENDOR_ID_VERNIER 0x08f7
512#define USB_DEVICE_ID_VERNIER_LABPRO 0x0001 516#define USB_DEVICE_ID_VERNIER_LABPRO 0x0001
diff --git a/drivers/hid/hid-mosart.c b/drivers/hid/hid-mosart.c
index e91437c18906..ac5421d568f1 100644
--- a/drivers/hid/hid-mosart.c
+++ b/drivers/hid/hid-mosart.c
@@ -239,6 +239,7 @@ static void mosart_remove(struct hid_device *hdev)
239 239
240static const struct hid_device_id mosart_devices[] = { 240static const struct hid_device_id mosart_devices[] = {
241 { HID_USB_DEVICE(USB_VENDOR_ID_ASUS, USB_DEVICE_ID_ASUS_T91MT) }, 241 { HID_USB_DEVICE(USB_VENDOR_ID_ASUS, USB_DEVICE_ID_ASUS_T91MT) },
242 { HID_USB_DEVICE(USB_VENDOR_ID_ASUS, USB_DEVICE_ID_ASUSTEK_MULTITOUCH_YFO) },
242 { } 243 { }
243}; 244};
244MODULE_DEVICE_TABLE(hid, mosart_devices); 245MODULE_DEVICE_TABLE(hid, mosart_devices);
diff --git a/drivers/hid/hid-topseed.c b/drivers/hid/hid-topseed.c
index 5771f851f856..956ed9ac19d4 100644
--- a/drivers/hid/hid-topseed.c
+++ b/drivers/hid/hid-topseed.c
@@ -64,6 +64,7 @@ static int ts_input_mapping(struct hid_device *hdev, struct hid_input *hi,
64static const struct hid_device_id ts_devices[] = { 64static const struct hid_device_id ts_devices[] = {
65 { HID_USB_DEVICE(USB_VENDOR_ID_TOPSEED, USB_DEVICE_ID_TOPSEED_CYBERLINK) }, 65 { HID_USB_DEVICE(USB_VENDOR_ID_TOPSEED, USB_DEVICE_ID_TOPSEED_CYBERLINK) },
66 { HID_USB_DEVICE(USB_VENDOR_ID_BTC, USB_DEVICE_ID_BTC_EMPREX_REMOTE) }, 66 { HID_USB_DEVICE(USB_VENDOR_ID_BTC, USB_DEVICE_ID_BTC_EMPREX_REMOTE) },
67 { HID_USB_DEVICE(USB_VENDOR_ID_BTC, USB_DEVICE_ID_BTC_EMPREX_REMOTE_2) },
67 { HID_USB_DEVICE(USB_VENDOR_ID_TOPSEED2, USB_DEVICE_ID_TOPSEED2_RF_COMBO) }, 68 { HID_USB_DEVICE(USB_VENDOR_ID_TOPSEED2, USB_DEVICE_ID_TOPSEED2_RF_COMBO) },
68 { } 69 { }
69}; 70};
diff --git a/drivers/hid/usbhid/hid-core.c b/drivers/hid/usbhid/hid-core.c
index b729c0286679..599041a7f670 100644
--- a/drivers/hid/usbhid/hid-core.c
+++ b/drivers/hid/usbhid/hid-core.c
@@ -828,6 +828,7 @@ static int usbhid_output_raw_report(struct hid_device *hid, __u8 *buf, size_t co
828 } 828 }
829 } else { 829 } else {
830 int skipped_report_id = 0; 830 int skipped_report_id = 0;
831 int report_id = buf[0];
831 if (buf[0] == 0x0) { 832 if (buf[0] == 0x0) {
832 /* Don't send the Report ID */ 833 /* Don't send the Report ID */
833 buf++; 834 buf++;
@@ -837,7 +838,7 @@ static int usbhid_output_raw_report(struct hid_device *hid, __u8 *buf, size_t co
837 ret = usb_control_msg(dev, usb_sndctrlpipe(dev, 0), 838 ret = usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
838 HID_REQ_SET_REPORT, 839 HID_REQ_SET_REPORT,
839 USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE, 840 USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
840 ((report_type + 1) << 8) | *buf, 841 ((report_type + 1) << 8) | report_id,
841 interface->desc.bInterfaceNumber, buf, count, 842 interface->desc.bInterfaceNumber, buf, count,
842 USB_CTRL_SET_TIMEOUT); 843 USB_CTRL_SET_TIMEOUT);
843 /* count also the report id, if this was a numbered report. */ 844 /* count also the report id, if this was a numbered report. */
@@ -1445,6 +1446,11 @@ static const struct hid_device_id hid_usb_table[] = {
1445 { } 1446 { }
1446}; 1447};
1447 1448
1449struct usb_interface *usbhid_find_interface(int minor)
1450{
1451 return usb_find_interface(&hid_driver, minor);
1452}
1453
1448static struct hid_driver hid_usb_driver = { 1454static struct hid_driver hid_usb_driver = {
1449 .name = "generic-usb", 1455 .name = "generic-usb",
1450 .id_table = hid_usb_table, 1456 .id_table = hid_usb_table,
diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
index 2643d3147621..70da3181c8a0 100644
--- a/drivers/hid/usbhid/hid-quirks.c
+++ b/drivers/hid/usbhid/hid-quirks.c
@@ -33,6 +33,7 @@ static const struct hid_blacklist {
33 { USB_VENDOR_ID_AASHIMA, USB_DEVICE_ID_AASHIMA_PREDATOR, HID_QUIRK_BADPAD }, 33 { USB_VENDOR_ID_AASHIMA, USB_DEVICE_ID_AASHIMA_PREDATOR, HID_QUIRK_BADPAD },
34 { USB_VENDOR_ID_ALPS, USB_DEVICE_ID_IBM_GAMEPAD, HID_QUIRK_BADPAD }, 34 { USB_VENDOR_ID_ALPS, USB_DEVICE_ID_IBM_GAMEPAD, HID_QUIRK_BADPAD },
35 { USB_VENDOR_ID_CHIC, USB_DEVICE_ID_CHIC_GAMEPAD, HID_QUIRK_BADPAD }, 35 { USB_VENDOR_ID_CHIC, USB_DEVICE_ID_CHIC_GAMEPAD, HID_QUIRK_BADPAD },
36 { USB_VENDOR_ID_DWAV, USB_DEVICE_ID_EGALAX_TOUCHCONTROLLER, HID_QUIRK_MULTI_INPUT | HID_QUIRK_NOGET },
36 { USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH, HID_QUIRK_MULTI_INPUT }, 37 { USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH, HID_QUIRK_MULTI_INPUT },
37 { USB_VENDOR_ID_MOJO, USB_DEVICE_ID_RETRO_ADAPTER, HID_QUIRK_MULTI_INPUT }, 38 { USB_VENDOR_ID_MOJO, USB_DEVICE_ID_RETRO_ADAPTER, HID_QUIRK_MULTI_INPUT },
38 { USB_VENDOR_ID_HAPP, USB_DEVICE_ID_UGCI_DRIVING, HID_QUIRK_BADPAD | HID_QUIRK_MULTI_INPUT }, 39 { USB_VENDOR_ID_HAPP, USB_DEVICE_ID_UGCI_DRIVING, HID_QUIRK_BADPAD | HID_QUIRK_MULTI_INPUT },
@@ -69,6 +70,7 @@ static const struct hid_blacklist {
69 { USB_VENDOR_ID_TURBOX, USB_DEVICE_ID_TURBOX_KEYBOARD, HID_QUIRK_NOGET }, 70 { USB_VENDOR_ID_TURBOX, USB_DEVICE_ID_TURBOX_KEYBOARD, HID_QUIRK_NOGET },
70 { USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_PF1209, HID_QUIRK_MULTI_INPUT }, 71 { USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_PF1209, HID_QUIRK_MULTI_INPUT },
71 { USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_WP4030U, HID_QUIRK_MULTI_INPUT }, 72 { USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_WP4030U, HID_QUIRK_MULTI_INPUT },
73 { USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_KNA5, HID_QUIRK_MULTI_INPUT },
72 { USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_DUAL_USB_JOYPAD, HID_QUIRK_NOGET | HID_QUIRK_MULTI_INPUT | HID_QUIRK_SKIP_OUTPUT_REPORTS }, 74 { USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_DUAL_USB_JOYPAD, HID_QUIRK_NOGET | HID_QUIRK_MULTI_INPUT | HID_QUIRK_SKIP_OUTPUT_REPORTS },
73 { USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_QUAD_USB_JOYPAD, HID_QUIRK_NOGET | HID_QUIRK_MULTI_INPUT }, 75 { USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_QUAD_USB_JOYPAD, HID_QUIRK_NOGET | HID_QUIRK_MULTI_INPUT },
74 76
@@ -77,6 +79,8 @@ static const struct hid_blacklist {
77 79
78 { USB_VENDOR_ID_PI_ENGINEERING, USB_DEVICE_ID_PI_ENGINEERING_VEC_USB_FOOTPEDAL, HID_QUIRK_HIDINPUT_FORCE }, 80 { USB_VENDOR_ID_PI_ENGINEERING, USB_DEVICE_ID_PI_ENGINEERING_VEC_USB_FOOTPEDAL, HID_QUIRK_HIDINPUT_FORCE },
79 81
82 { USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_MULTI_TOUCH, HID_QUIRK_MULTI_INPUT },
83
80 { 0, 0 } 84 { 0, 0 }
81}; 85};
82 86
diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c
index 0a29c51114aa..681e620eb95b 100644
--- a/drivers/hid/usbhid/hiddev.c
+++ b/drivers/hid/usbhid/hiddev.c
@@ -270,7 +270,7 @@ static int hiddev_open(struct inode *inode, struct file *file)
270 struct hiddev *hiddev; 270 struct hiddev *hiddev;
271 int res; 271 int res;
272 272
273 intf = usb_find_interface(&hiddev_driver, iminor(inode)); 273 intf = usbhid_find_interface(iminor(inode));
274 if (!intf) 274 if (!intf)
275 return -ENODEV; 275 return -ENODEV;
276 hid = usb_get_intfdata(intf); 276 hid = usb_get_intfdata(intf);
diff --git a/drivers/hid/usbhid/usbhid.h b/drivers/hid/usbhid/usbhid.h
index 693fd3e720df..89d2e847dcc6 100644
--- a/drivers/hid/usbhid/usbhid.h
+++ b/drivers/hid/usbhid/usbhid.h
@@ -42,6 +42,7 @@ void usbhid_submit_report
42(struct hid_device *hid, struct hid_report *report, unsigned char dir); 42(struct hid_device *hid, struct hid_report *report, unsigned char dir);
43int usbhid_get_power(struct hid_device *hid); 43int usbhid_get_power(struct hid_device *hid);
44void usbhid_put_power(struct hid_device *hid); 44void usbhid_put_power(struct hid_device *hid);
45struct usb_interface *usbhid_find_interface(int minor);
45 46
46/* iofl flags */ 47/* iofl flags */
47#define HID_CTRL_RUNNING 1 48#define HID_CTRL_RUNNING 1
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index 4d4d09bdec0a..97499d00615a 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -409,7 +409,7 @@ config SENSORS_CORETEMP
409 409
410config SENSORS_PKGTEMP 410config SENSORS_PKGTEMP
411 tristate "Intel processor package temperature sensor" 411 tristate "Intel processor package temperature sensor"
412 depends on X86 && PCI && EXPERIMENTAL 412 depends on X86 && EXPERIMENTAL
413 help 413 help
414 If you say yes here you get support for the package level temperature 414 If you say yes here you get support for the package level temperature
415 sensor inside your CPU. Check documentation/driver for details. 415 sensor inside your CPU. Check documentation/driver for details.
diff --git a/drivers/hwmon/adm1031.c b/drivers/hwmon/adm1031.c
index 15c1a9616af3..0683e6be662c 100644
--- a/drivers/hwmon/adm1031.c
+++ b/drivers/hwmon/adm1031.c
@@ -79,7 +79,7 @@ struct adm1031_data {
79 int chip_type; 79 int chip_type;
80 char valid; /* !=0 if following fields are valid */ 80 char valid; /* !=0 if following fields are valid */
81 unsigned long last_updated; /* In jiffies */ 81 unsigned long last_updated; /* In jiffies */
82 unsigned int update_rate; /* In milliseconds */ 82 unsigned int update_interval; /* In milliseconds */
83 /* The chan_select_table contains the possible configurations for 83 /* The chan_select_table contains the possible configurations for
84 * auto fan control. 84 * auto fan control.
85 */ 85 */
@@ -743,23 +743,23 @@ static SENSOR_DEVICE_ATTR(temp3_crit_alarm, S_IRUGO, show_alarm, NULL, 12);
743static SENSOR_DEVICE_ATTR(temp3_fault, S_IRUGO, show_alarm, NULL, 13); 743static SENSOR_DEVICE_ATTR(temp3_fault, S_IRUGO, show_alarm, NULL, 13);
744static SENSOR_DEVICE_ATTR(temp1_crit_alarm, S_IRUGO, show_alarm, NULL, 14); 744static SENSOR_DEVICE_ATTR(temp1_crit_alarm, S_IRUGO, show_alarm, NULL, 14);
745 745
746/* Update Rate */ 746/* Update Interval */
747static const unsigned int update_rates[] = { 747static const unsigned int update_intervals[] = {
748 16000, 8000, 4000, 2000, 1000, 500, 250, 125, 748 16000, 8000, 4000, 2000, 1000, 500, 250, 125,
749}; 749};
750 750
751static ssize_t show_update_rate(struct device *dev, 751static ssize_t show_update_interval(struct device *dev,
752 struct device_attribute *attr, char *buf) 752 struct device_attribute *attr, char *buf)
753{ 753{
754 struct i2c_client *client = to_i2c_client(dev); 754 struct i2c_client *client = to_i2c_client(dev);
755 struct adm1031_data *data = i2c_get_clientdata(client); 755 struct adm1031_data *data = i2c_get_clientdata(client);
756 756
757 return sprintf(buf, "%u\n", data->update_rate); 757 return sprintf(buf, "%u\n", data->update_interval);
758} 758}
759 759
760static ssize_t set_update_rate(struct device *dev, 760static ssize_t set_update_interval(struct device *dev,
761 struct device_attribute *attr, 761 struct device_attribute *attr,
762 const char *buf, size_t count) 762 const char *buf, size_t count)
763{ 763{
764 struct i2c_client *client = to_i2c_client(dev); 764 struct i2c_client *client = to_i2c_client(dev);
765 struct adm1031_data *data = i2c_get_clientdata(client); 765 struct adm1031_data *data = i2c_get_clientdata(client);
@@ -771,12 +771,15 @@ static ssize_t set_update_rate(struct device *dev,
771 if (err) 771 if (err)
772 return err; 772 return err;
773 773
774 /* find the nearest update rate from the table */ 774 /*
775 for (i = 0; i < ARRAY_SIZE(update_rates) - 1; i++) { 775 * Find the nearest update interval from the table.
776 if (val >= update_rates[i]) 776 * Use it to determine the matching update rate.
777 */
778 for (i = 0; i < ARRAY_SIZE(update_intervals) - 1; i++) {
779 if (val >= update_intervals[i])
777 break; 780 break;
778 } 781 }
779 /* if not found, we point to the last entry (lowest update rate) */ 782 /* if not found, we point to the last entry (lowest update interval) */
780 783
781 /* set the new update rate while preserving other settings */ 784 /* set the new update rate while preserving other settings */
782 reg = adm1031_read_value(client, ADM1031_REG_FAN_FILTER); 785 reg = adm1031_read_value(client, ADM1031_REG_FAN_FILTER);
@@ -785,14 +788,14 @@ static ssize_t set_update_rate(struct device *dev,
785 adm1031_write_value(client, ADM1031_REG_FAN_FILTER, reg); 788 adm1031_write_value(client, ADM1031_REG_FAN_FILTER, reg);
786 789
787 mutex_lock(&data->update_lock); 790 mutex_lock(&data->update_lock);
788 data->update_rate = update_rates[i]; 791 data->update_interval = update_intervals[i];
789 mutex_unlock(&data->update_lock); 792 mutex_unlock(&data->update_lock);
790 793
791 return count; 794 return count;
792} 795}
793 796
794static DEVICE_ATTR(update_rate, S_IRUGO | S_IWUSR, show_update_rate, 797static DEVICE_ATTR(update_interval, S_IRUGO | S_IWUSR, show_update_interval,
795 set_update_rate); 798 set_update_interval);
796 799
797static struct attribute *adm1031_attributes[] = { 800static struct attribute *adm1031_attributes[] = {
798 &sensor_dev_attr_fan1_input.dev_attr.attr, 801 &sensor_dev_attr_fan1_input.dev_attr.attr,
@@ -830,7 +833,7 @@ static struct attribute *adm1031_attributes[] = {
830 833
831 &sensor_dev_attr_auto_fan1_min_pwm.dev_attr.attr, 834 &sensor_dev_attr_auto_fan1_min_pwm.dev_attr.attr,
832 835
833 &dev_attr_update_rate.attr, 836 &dev_attr_update_interval.attr,
834 &dev_attr_alarms.attr, 837 &dev_attr_alarms.attr,
835 838
836 NULL 839 NULL
@@ -981,7 +984,8 @@ static void adm1031_init_client(struct i2c_client *client)
981 mask = ADM1031_UPDATE_RATE_MASK; 984 mask = ADM1031_UPDATE_RATE_MASK;
982 read_val = adm1031_read_value(client, ADM1031_REG_FAN_FILTER); 985 read_val = adm1031_read_value(client, ADM1031_REG_FAN_FILTER);
983 i = (read_val & mask) >> ADM1031_UPDATE_RATE_SHIFT; 986 i = (read_val & mask) >> ADM1031_UPDATE_RATE_SHIFT;
984 data->update_rate = update_rates[i]; 987 /* Save it as update interval */
988 data->update_interval = update_intervals[i];
985} 989}
986 990
987static struct adm1031_data *adm1031_update_device(struct device *dev) 991static struct adm1031_data *adm1031_update_device(struct device *dev)
@@ -993,7 +997,8 @@ static struct adm1031_data *adm1031_update_device(struct device *dev)
993 997
994 mutex_lock(&data->update_lock); 998 mutex_lock(&data->update_lock);
995 999
996 next_update = data->last_updated + msecs_to_jiffies(data->update_rate); 1000 next_update = data->last_updated
1001 + msecs_to_jiffies(data->update_interval);
997 if (time_after(jiffies, next_update) || !data->valid) { 1002 if (time_after(jiffies, next_update) || !data->valid) {
998 1003
999 dev_dbg(&client->dev, "Starting adm1031 update\n"); 1004 dev_dbg(&client->dev, "Starting adm1031 update\n");
diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
index de8111114f46..a23b17a78ace 100644
--- a/drivers/hwmon/coretemp.c
+++ b/drivers/hwmon/coretemp.c
@@ -36,6 +36,7 @@
36#include <linux/pci.h> 36#include <linux/pci.h>
37#include <asm/msr.h> 37#include <asm/msr.h>
38#include <asm/processor.h> 38#include <asm/processor.h>
39#include <asm/smp.h>
39 40
40#define DRVNAME "coretemp" 41#define DRVNAME "coretemp"
41 42
@@ -423,9 +424,18 @@ static int __cpuinit coretemp_device_add(unsigned int cpu)
423 int err; 424 int err;
424 struct platform_device *pdev; 425 struct platform_device *pdev;
425 struct pdev_entry *pdev_entry; 426 struct pdev_entry *pdev_entry;
426#ifdef CONFIG_SMP
427 struct cpuinfo_x86 *c = &cpu_data(cpu); 427 struct cpuinfo_x86 *c = &cpu_data(cpu);
428#endif 428
429 /*
430 * CPUID.06H.EAX[0] indicates whether the CPU has thermal
431 * sensors. We check this bit only, all the early CPUs
432 * without thermal sensors will be filtered out.
433 */
434 if (!cpu_has(c, X86_FEATURE_DTS)) {
435 printk(KERN_INFO DRVNAME ": CPU (model=0x%x)"
436 " has no thermal sensor.\n", c->x86_model);
437 return 0;
438 }
429 439
430 mutex_lock(&pdev_list_mutex); 440 mutex_lock(&pdev_list_mutex);
431 441
@@ -482,14 +492,22 @@ exit:
482 492
483static void coretemp_device_remove(unsigned int cpu) 493static void coretemp_device_remove(unsigned int cpu)
484{ 494{
485 struct pdev_entry *p, *n; 495 struct pdev_entry *p;
496 unsigned int i;
497
486 mutex_lock(&pdev_list_mutex); 498 mutex_lock(&pdev_list_mutex);
487 list_for_each_entry_safe(p, n, &pdev_list, list) { 499 list_for_each_entry(p, &pdev_list, list) {
488 if (p->cpu == cpu) { 500 if (p->cpu != cpu)
489 platform_device_unregister(p->pdev); 501 continue;
490 list_del(&p->list); 502
491 kfree(p); 503 platform_device_unregister(p->pdev);
492 } 504 list_del(&p->list);
505 mutex_unlock(&pdev_list_mutex);
506 kfree(p);
507 for_each_cpu(i, cpu_sibling_mask(cpu))
508 if (i != cpu && !coretemp_device_add(i))
509 break;
510 return;
493 } 511 }
494 mutex_unlock(&pdev_list_mutex); 512 mutex_unlock(&pdev_list_mutex);
495} 513}
@@ -527,30 +545,21 @@ static int __init coretemp_init(void)
527 if (err) 545 if (err)
528 goto exit; 546 goto exit;
529 547
530 for_each_online_cpu(i) { 548 for_each_online_cpu(i)
531 struct cpuinfo_x86 *c = &cpu_data(i); 549 coretemp_device_add(i);
532 /* 550
533 * CPUID.06H.EAX[0] indicates whether the CPU has thermal 551#ifndef CONFIG_HOTPLUG_CPU
534 * sensors. We check this bit only, all the early CPUs
535 * without thermal sensors will be filtered out.
536 */
537 if (c->cpuid_level >= 6 && (cpuid_eax(0x06) & 0x01))
538 coretemp_device_add(i);
539 else {
540 printk(KERN_INFO DRVNAME ": CPU (model=0x%x)"
541 " has no thermal sensor.\n", c->x86_model);
542 }
543 }
544 if (list_empty(&pdev_list)) { 552 if (list_empty(&pdev_list)) {
545 err = -ENODEV; 553 err = -ENODEV;
546 goto exit_driver_unreg; 554 goto exit_driver_unreg;
547 } 555 }
556#endif
548 557
549 register_hotcpu_notifier(&coretemp_cpu_notifier); 558 register_hotcpu_notifier(&coretemp_cpu_notifier);
550 return 0; 559 return 0;
551 560
552exit_driver_unreg:
553#ifndef CONFIG_HOTPLUG_CPU 561#ifndef CONFIG_HOTPLUG_CPU
562exit_driver_unreg:
554 platform_driver_unregister(&coretemp_driver); 563 platform_driver_unregister(&coretemp_driver);
555#endif 564#endif
556exit: 565exit:
diff --git a/drivers/hwmon/emc1403.c b/drivers/hwmon/emc1403.c
index 5b58b20dead1..8dee3f38fdfb 100644
--- a/drivers/hwmon/emc1403.c
+++ b/drivers/hwmon/emc1403.c
@@ -308,7 +308,6 @@ static int emc1403_probe(struct i2c_client *client,
308 res = sysfs_create_group(&client->dev.kobj, &m_thermal_gr); 308 res = sysfs_create_group(&client->dev.kobj, &m_thermal_gr);
309 if (res) { 309 if (res) {
310 dev_warn(&client->dev, "create group failed\n"); 310 dev_warn(&client->dev, "create group failed\n");
311 hwmon_device_unregister(data->hwmon_dev);
312 goto thermal_error1; 311 goto thermal_error1;
313 } 312 }
314 data->hwmon_dev = hwmon_device_register(&client->dev); 313 data->hwmon_dev = hwmon_device_register(&client->dev);
diff --git a/drivers/hwmon/f71882fg.c b/drivers/hwmon/f71882fg.c
index 537841ef44b9..75afb3b0e076 100644
--- a/drivers/hwmon/f71882fg.c
+++ b/drivers/hwmon/f71882fg.c
@@ -111,7 +111,7 @@ static struct platform_device *f71882fg_pdev;
111/* Super-I/O Function prototypes */ 111/* Super-I/O Function prototypes */
112static inline int superio_inb(int base, int reg); 112static inline int superio_inb(int base, int reg);
113static inline int superio_inw(int base, int reg); 113static inline int superio_inw(int base, int reg);
114static inline void superio_enter(int base); 114static inline int superio_enter(int base);
115static inline void superio_select(int base, int ld); 115static inline void superio_select(int base, int ld);
116static inline void superio_exit(int base); 116static inline void superio_exit(int base);
117 117
@@ -861,11 +861,20 @@ static int superio_inw(int base, int reg)
861 return val; 861 return val;
862} 862}
863 863
864static inline void superio_enter(int base) 864static inline int superio_enter(int base)
865{ 865{
866 /* Don't step on other drivers' I/O space by accident */
867 if (!request_muxed_region(base, 2, DRVNAME)) {
868 printk(KERN_ERR DRVNAME ": I/O address 0x%04x already in use\n",
869 base);
870 return -EBUSY;
871 }
872
866 /* according to the datasheet the key must be send twice! */ 873 /* according to the datasheet the key must be send twice! */
867 outb(SIO_UNLOCK_KEY, base); 874 outb(SIO_UNLOCK_KEY, base);
868 outb(SIO_UNLOCK_KEY, base); 875 outb(SIO_UNLOCK_KEY, base);
876
877 return 0;
869} 878}
870 879
871static inline void superio_select(int base, int ld) 880static inline void superio_select(int base, int ld)
@@ -877,6 +886,7 @@ static inline void superio_select(int base, int ld)
877static inline void superio_exit(int base) 886static inline void superio_exit(int base)
878{ 887{
879 outb(SIO_LOCK_KEY, base); 888 outb(SIO_LOCK_KEY, base);
889 release_region(base, 2);
880} 890}
881 891
882static inline int fan_from_reg(u16 reg) 892static inline int fan_from_reg(u16 reg)
@@ -2175,21 +2185,15 @@ static int f71882fg_remove(struct platform_device *pdev)
2175static int __init f71882fg_find(int sioaddr, unsigned short *address, 2185static int __init f71882fg_find(int sioaddr, unsigned short *address,
2176 struct f71882fg_sio_data *sio_data) 2186 struct f71882fg_sio_data *sio_data)
2177{ 2187{
2178 int err = -ENODEV;
2179 u16 devid; 2188 u16 devid;
2180 2189 int err = superio_enter(sioaddr);
2181 /* Don't step on other drivers' I/O space by accident */ 2190 if (err)
2182 if (!request_region(sioaddr, 2, DRVNAME)) { 2191 return err;
2183 printk(KERN_ERR DRVNAME ": I/O address 0x%04x already in use\n",
2184 (int)sioaddr);
2185 return -EBUSY;
2186 }
2187
2188 superio_enter(sioaddr);
2189 2192
2190 devid = superio_inw(sioaddr, SIO_REG_MANID); 2193 devid = superio_inw(sioaddr, SIO_REG_MANID);
2191 if (devid != SIO_FINTEK_ID) { 2194 if (devid != SIO_FINTEK_ID) {
2192 pr_debug(DRVNAME ": Not a Fintek device\n"); 2195 pr_debug(DRVNAME ": Not a Fintek device\n");
2196 err = -ENODEV;
2193 goto exit; 2197 goto exit;
2194 } 2198 }
2195 2199
@@ -2213,6 +2217,7 @@ static int __init f71882fg_find(int sioaddr, unsigned short *address,
2213 default: 2217 default:
2214 printk(KERN_INFO DRVNAME ": Unsupported Fintek device: %04x\n", 2218 printk(KERN_INFO DRVNAME ": Unsupported Fintek device: %04x\n",
2215 (unsigned int)devid); 2219 (unsigned int)devid);
2220 err = -ENODEV;
2216 goto exit; 2221 goto exit;
2217 } 2222 }
2218 2223
@@ -2223,12 +2228,14 @@ static int __init f71882fg_find(int sioaddr, unsigned short *address,
2223 2228
2224 if (!(superio_inb(sioaddr, SIO_REG_ENABLE) & 0x01)) { 2229 if (!(superio_inb(sioaddr, SIO_REG_ENABLE) & 0x01)) {
2225 printk(KERN_WARNING DRVNAME ": Device not activated\n"); 2230 printk(KERN_WARNING DRVNAME ": Device not activated\n");
2231 err = -ENODEV;
2226 goto exit; 2232 goto exit;
2227 } 2233 }
2228 2234
2229 *address = superio_inw(sioaddr, SIO_REG_ADDR); 2235 *address = superio_inw(sioaddr, SIO_REG_ADDR);
2230 if (*address == 0) { 2236 if (*address == 0) {
2231 printk(KERN_WARNING DRVNAME ": Base address not set\n"); 2237 printk(KERN_WARNING DRVNAME ": Base address not set\n");
2238 err = -ENODEV;
2232 goto exit; 2239 goto exit;
2233 } 2240 }
2234 *address &= ~(REGION_LENGTH - 1); /* Ignore 3 LSB */ 2241 *address &= ~(REGION_LENGTH - 1); /* Ignore 3 LSB */
@@ -2239,7 +2246,6 @@ static int __init f71882fg_find(int sioaddr, unsigned short *address,
2239 (int)superio_inb(sioaddr, SIO_REG_DEVREV)); 2246 (int)superio_inb(sioaddr, SIO_REG_DEVREV));
2240exit: 2247exit:
2241 superio_exit(sioaddr); 2248 superio_exit(sioaddr);
2242 release_region(sioaddr, 2);
2243 return err; 2249 return err;
2244} 2250}
2245 2251
diff --git a/drivers/hwmon/f75375s.c b/drivers/hwmon/f75375s.c
index 0f58ecc5334d..9638d58f99fd 100644
--- a/drivers/hwmon/f75375s.c
+++ b/drivers/hwmon/f75375s.c
@@ -79,7 +79,7 @@ enum chips { f75373, f75375 };
79#define F75375_REG_PWM2_DROP_DUTY 0x6C 79#define F75375_REG_PWM2_DROP_DUTY 0x6C
80 80
81#define FAN_CTRL_LINEAR(nr) (4 + nr) 81#define FAN_CTRL_LINEAR(nr) (4 + nr)
82#define FAN_CTRL_MODE(nr) (5 + ((nr) * 2)) 82#define FAN_CTRL_MODE(nr) (4 + ((nr) * 2))
83 83
84/* 84/*
85 * Data structures and manipulation thereof 85 * Data structures and manipulation thereof
@@ -298,7 +298,7 @@ static int set_pwm_enable_direct(struct i2c_client *client, int nr, int val)
298 return -EINVAL; 298 return -EINVAL;
299 299
300 fanmode = f75375_read8(client, F75375_REG_FAN_TIMER); 300 fanmode = f75375_read8(client, F75375_REG_FAN_TIMER);
301 fanmode = ~(3 << FAN_CTRL_MODE(nr)); 301 fanmode &= ~(3 << FAN_CTRL_MODE(nr));
302 302
303 switch (val) { 303 switch (val) {
304 case 0: /* Full speed */ 304 case 0: /* Full speed */
@@ -350,7 +350,7 @@ static ssize_t set_pwm_mode(struct device *dev, struct device_attribute *attr,
350 350
351 mutex_lock(&data->update_lock); 351 mutex_lock(&data->update_lock);
352 conf = f75375_read8(client, F75375_REG_CONFIG1); 352 conf = f75375_read8(client, F75375_REG_CONFIG1);
353 conf = ~(1 << FAN_CTRL_LINEAR(nr)); 353 conf &= ~(1 << FAN_CTRL_LINEAR(nr));
354 354
355 if (val == 0) 355 if (val == 0)
356 conf |= (1 << FAN_CTRL_LINEAR(nr)) ; 356 conf |= (1 << FAN_CTRL_LINEAR(nr)) ;
diff --git a/drivers/hwmon/hp_accel.c b/drivers/hwmon/hp_accel.c
index 7580f55e67e3..36e957532230 100644
--- a/drivers/hwmon/hp_accel.c
+++ b/drivers/hwmon/hp_accel.c
@@ -221,6 +221,8 @@ static struct dmi_system_id lis3lv02d_dmi_ids[] = {
221 AXIS_DMI_MATCH("HPB442x", "HP ProBook 442", xy_rotated_left), 221 AXIS_DMI_MATCH("HPB442x", "HP ProBook 442", xy_rotated_left),
222 AXIS_DMI_MATCH("HPB452x", "HP ProBook 452", y_inverted), 222 AXIS_DMI_MATCH("HPB452x", "HP ProBook 452", y_inverted),
223 AXIS_DMI_MATCH("HPB522x", "HP ProBook 522", xy_swap), 223 AXIS_DMI_MATCH("HPB522x", "HP ProBook 522", xy_swap),
224 AXIS_DMI_MATCH("HPB532x", "HP ProBook 532", y_inverted),
225 AXIS_DMI_MATCH("Mini5102", "HP Mini 5102", xy_rotated_left_usd),
224 { NULL, } 226 { NULL, }
225/* Laptop models without axis info (yet): 227/* Laptop models without axis info (yet):
226 * "NC6910" "HP Compaq 6910" 228 * "NC6910" "HP Compaq 6910"
diff --git a/drivers/hwmon/lis3lv02d.c b/drivers/hwmon/lis3lv02d.c
index 6138f036b159..fc591ae53107 100644
--- a/drivers/hwmon/lis3lv02d.c
+++ b/drivers/hwmon/lis3lv02d.c
@@ -277,7 +277,7 @@ static irqreturn_t lis302dl_interrupt(int irq, void *dummy)
277 wake_up_interruptible(&lis3_dev.misc_wait); 277 wake_up_interruptible(&lis3_dev.misc_wait);
278 kill_fasync(&lis3_dev.async_queue, SIGIO, POLL_IN); 278 kill_fasync(&lis3_dev.async_queue, SIGIO, POLL_IN);
279out: 279out:
280 if (lis3_dev.whoami == WAI_8B && lis3_dev.idev && 280 if (lis3_dev.pdata && lis3_dev.whoami == WAI_8B && lis3_dev.idev &&
281 lis3_dev.idev->input->users) 281 lis3_dev.idev->input->users)
282 return IRQ_WAKE_THREAD; 282 return IRQ_WAKE_THREAD;
283 return IRQ_HANDLED; 283 return IRQ_HANDLED;
@@ -718,7 +718,7 @@ int lis3lv02d_init_device(struct lis3lv02d *dev)
718 * io-apic is not configurable (and generates a warning) but I keep it 718 * io-apic is not configurable (and generates a warning) but I keep it
719 * in case of support for other hardware. 719 * in case of support for other hardware.
720 */ 720 */
721 if (dev->whoami == WAI_8B) 721 if (dev->pdata && dev->whoami == WAI_8B)
722 thread_fn = lis302dl_interrupt_thread1_8b; 722 thread_fn = lis302dl_interrupt_thread1_8b;
723 else 723 else
724 thread_fn = NULL; 724 thread_fn = NULL;
diff --git a/drivers/hwmon/lis3lv02d_i2c.c b/drivers/hwmon/lis3lv02d_i2c.c
index dc1f5402c1d7..8e5933b72d19 100644
--- a/drivers/hwmon/lis3lv02d_i2c.c
+++ b/drivers/hwmon/lis3lv02d_i2c.c
@@ -121,7 +121,7 @@ static int lis3lv02d_i2c_suspend(struct i2c_client *client, pm_message_t mesg)
121{ 121{
122 struct lis3lv02d *lis3 = i2c_get_clientdata(client); 122 struct lis3lv02d *lis3 = i2c_get_clientdata(client);
123 123
124 if (!lis3->pdata->wakeup_flags) 124 if (!lis3->pdata || !lis3->pdata->wakeup_flags)
125 lis3lv02d_poweroff(lis3); 125 lis3lv02d_poweroff(lis3);
126 return 0; 126 return 0;
127} 127}
@@ -130,7 +130,7 @@ static int lis3lv02d_i2c_resume(struct i2c_client *client)
130{ 130{
131 struct lis3lv02d *lis3 = i2c_get_clientdata(client); 131 struct lis3lv02d *lis3 = i2c_get_clientdata(client);
132 132
133 if (!lis3->pdata->wakeup_flags) 133 if (!lis3->pdata || !lis3->pdata->wakeup_flags)
134 lis3lv02d_poweron(lis3); 134 lis3lv02d_poweron(lis3);
135 return 0; 135 return 0;
136} 136}
diff --git a/drivers/hwmon/lis3lv02d_spi.c b/drivers/hwmon/lis3lv02d_spi.c
index 82b16808a274..b9be5e3a22b3 100644
--- a/drivers/hwmon/lis3lv02d_spi.c
+++ b/drivers/hwmon/lis3lv02d_spi.c
@@ -92,7 +92,7 @@ static int lis3lv02d_spi_suspend(struct spi_device *spi, pm_message_t mesg)
92{ 92{
93 struct lis3lv02d *lis3 = spi_get_drvdata(spi); 93 struct lis3lv02d *lis3 = spi_get_drvdata(spi);
94 94
95 if (!lis3->pdata->wakeup_flags) 95 if (!lis3->pdata || !lis3->pdata->wakeup_flags)
96 lis3lv02d_poweroff(&lis3_dev); 96 lis3lv02d_poweroff(&lis3_dev);
97 97
98 return 0; 98 return 0;
@@ -102,7 +102,7 @@ static int lis3lv02d_spi_resume(struct spi_device *spi)
102{ 102{
103 struct lis3lv02d *lis3 = spi_get_drvdata(spi); 103 struct lis3lv02d *lis3 = spi_get_drvdata(spi);
104 104
105 if (!lis3->pdata->wakeup_flags) 105 if (!lis3->pdata || !lis3->pdata->wakeup_flags)
106 lis3lv02d_poweron(lis3); 106 lis3lv02d_poweron(lis3);
107 107
108 return 0; 108 return 0;
diff --git a/drivers/hwmon/lm95241.c b/drivers/hwmon/lm95241.c
index 94741d42112d..464340f25496 100644
--- a/drivers/hwmon/lm95241.c
+++ b/drivers/hwmon/lm95241.c
@@ -91,7 +91,7 @@ static struct lm95241_data *lm95241_update_device(struct device *dev);
91struct lm95241_data { 91struct lm95241_data {
92 struct device *hwmon_dev; 92 struct device *hwmon_dev;
93 struct mutex update_lock; 93 struct mutex update_lock;
94 unsigned long last_updated, rate; /* in jiffies */ 94 unsigned long last_updated, interval; /* in jiffies */
95 char valid; /* zero until following fields are valid */ 95 char valid; /* zero until following fields are valid */
96 /* registers values */ 96 /* registers values */
97 u8 local_h, local_l; /* local */ 97 u8 local_h, local_l; /* local */
@@ -114,23 +114,23 @@ show_temp(local);
114show_temp(remote1); 114show_temp(remote1);
115show_temp(remote2); 115show_temp(remote2);
116 116
117static ssize_t show_rate(struct device *dev, struct device_attribute *attr, 117static ssize_t show_interval(struct device *dev, struct device_attribute *attr,
118 char *buf) 118 char *buf)
119{ 119{
120 struct lm95241_data *data = lm95241_update_device(dev); 120 struct lm95241_data *data = lm95241_update_device(dev);
121 121
122 snprintf(buf, PAGE_SIZE - 1, "%lu\n", 1000 * data->rate / HZ); 122 snprintf(buf, PAGE_SIZE - 1, "%lu\n", 1000 * data->interval / HZ);
123 return strlen(buf); 123 return strlen(buf);
124} 124}
125 125
126static ssize_t set_rate(struct device *dev, struct device_attribute *attr, 126static ssize_t set_interval(struct device *dev, struct device_attribute *attr,
127 const char *buf, size_t count) 127 const char *buf, size_t count)
128{ 128{
129 struct i2c_client *client = to_i2c_client(dev); 129 struct i2c_client *client = to_i2c_client(dev);
130 struct lm95241_data *data = i2c_get_clientdata(client); 130 struct lm95241_data *data = i2c_get_clientdata(client);
131 131
132 strict_strtol(buf, 10, &data->rate); 132 strict_strtol(buf, 10, &data->interval);
133 data->rate = data->rate * HZ / 1000; 133 data->interval = data->interval * HZ / 1000;
134 134
135 return count; 135 return count;
136} 136}
@@ -286,7 +286,8 @@ static DEVICE_ATTR(temp2_min, S_IWUSR | S_IRUGO, show_min1, set_min1);
286static DEVICE_ATTR(temp3_min, S_IWUSR | S_IRUGO, show_min2, set_min2); 286static DEVICE_ATTR(temp3_min, S_IWUSR | S_IRUGO, show_min2, set_min2);
287static DEVICE_ATTR(temp2_max, S_IWUSR | S_IRUGO, show_max1, set_max1); 287static DEVICE_ATTR(temp2_max, S_IWUSR | S_IRUGO, show_max1, set_max1);
288static DEVICE_ATTR(temp3_max, S_IWUSR | S_IRUGO, show_max2, set_max2); 288static DEVICE_ATTR(temp3_max, S_IWUSR | S_IRUGO, show_max2, set_max2);
289static DEVICE_ATTR(rate, S_IWUSR | S_IRUGO, show_rate, set_rate); 289static DEVICE_ATTR(update_interval, S_IWUSR | S_IRUGO, show_interval,
290 set_interval);
290 291
291static struct attribute *lm95241_attributes[] = { 292static struct attribute *lm95241_attributes[] = {
292 &dev_attr_temp1_input.attr, 293 &dev_attr_temp1_input.attr,
@@ -298,7 +299,7 @@ static struct attribute *lm95241_attributes[] = {
298 &dev_attr_temp3_min.attr, 299 &dev_attr_temp3_min.attr,
299 &dev_attr_temp2_max.attr, 300 &dev_attr_temp2_max.attr,
300 &dev_attr_temp3_max.attr, 301 &dev_attr_temp3_max.attr,
301 &dev_attr_rate.attr, 302 &dev_attr_update_interval.attr,
302 NULL 303 NULL
303}; 304};
304 305
@@ -376,7 +377,7 @@ static void lm95241_init_client(struct i2c_client *client)
376{ 377{
377 struct lm95241_data *data = i2c_get_clientdata(client); 378 struct lm95241_data *data = i2c_get_clientdata(client);
378 379
379 data->rate = HZ; /* 1 sec default */ 380 data->interval = HZ; /* 1 sec default */
380 data->valid = 0; 381 data->valid = 0;
381 data->config = CFG_CR0076; 382 data->config = CFG_CR0076;
382 data->model = 0; 383 data->model = 0;
@@ -410,7 +411,7 @@ static struct lm95241_data *lm95241_update_device(struct device *dev)
410 411
411 mutex_lock(&data->update_lock); 412 mutex_lock(&data->update_lock);
412 413
413 if (time_after(jiffies, data->last_updated + data->rate) || 414 if (time_after(jiffies, data->last_updated + data->interval) ||
414 !data->valid) { 415 !data->valid) {
415 dev_dbg(&client->dev, "Updating lm95241 data.\n"); 416 dev_dbg(&client->dev, "Updating lm95241 data.\n");
416 data->local_h = 417 data->local_h =
diff --git a/drivers/hwmon/pkgtemp.c b/drivers/hwmon/pkgtemp.c
index 74157fcda6ed..f11903936c8b 100644
--- a/drivers/hwmon/pkgtemp.c
+++ b/drivers/hwmon/pkgtemp.c
@@ -33,7 +33,6 @@
33#include <linux/list.h> 33#include <linux/list.h>
34#include <linux/platform_device.h> 34#include <linux/platform_device.h>
35#include <linux/cpu.h> 35#include <linux/cpu.h>
36#include <linux/pci.h>
37#include <asm/msr.h> 36#include <asm/msr.h>
38#include <asm/processor.h> 37#include <asm/processor.h>
39 38
@@ -224,7 +223,7 @@ static int __devinit pkgtemp_probe(struct platform_device *pdev)
224 223
225 err = sysfs_create_group(&pdev->dev.kobj, &pkgtemp_group); 224 err = sysfs_create_group(&pdev->dev.kobj, &pkgtemp_group);
226 if (err) 225 if (err)
227 goto exit_free; 226 goto exit_dev;
228 227
229 data->hwmon_dev = hwmon_device_register(&pdev->dev); 228 data->hwmon_dev = hwmon_device_register(&pdev->dev);
230 if (IS_ERR(data->hwmon_dev)) { 229 if (IS_ERR(data->hwmon_dev)) {
@@ -238,6 +237,8 @@ static int __devinit pkgtemp_probe(struct platform_device *pdev)
238 237
239exit_class: 238exit_class:
240 sysfs_remove_group(&pdev->dev.kobj, &pkgtemp_group); 239 sysfs_remove_group(&pdev->dev.kobj, &pkgtemp_group);
240exit_dev:
241 device_remove_file(&pdev->dev, &sensor_dev_attr_temp1_max.dev_attr);
241exit_free: 242exit_free:
242 kfree(data); 243 kfree(data);
243exit: 244exit:
@@ -250,6 +251,7 @@ static int __devexit pkgtemp_remove(struct platform_device *pdev)
250 251
251 hwmon_device_unregister(data->hwmon_dev); 252 hwmon_device_unregister(data->hwmon_dev);
252 sysfs_remove_group(&pdev->dev.kobj, &pkgtemp_group); 253 sysfs_remove_group(&pdev->dev.kobj, &pkgtemp_group);
254 device_remove_file(&pdev->dev, &sensor_dev_attr_temp1_max.dev_attr);
253 platform_set_drvdata(pdev, NULL); 255 platform_set_drvdata(pdev, NULL);
254 kfree(data); 256 kfree(data);
255 return 0; 257 return 0;
@@ -281,9 +283,10 @@ static int __cpuinit pkgtemp_device_add(unsigned int cpu)
281 int err; 283 int err;
282 struct platform_device *pdev; 284 struct platform_device *pdev;
283 struct pdev_entry *pdev_entry; 285 struct pdev_entry *pdev_entry;
284#ifdef CONFIG_SMP
285 struct cpuinfo_x86 *c = &cpu_data(cpu); 286 struct cpuinfo_x86 *c = &cpu_data(cpu);
286#endif 287
288 if (!cpu_has(c, X86_FEATURE_PTS))
289 return 0;
287 290
288 mutex_lock(&pdev_list_mutex); 291 mutex_lock(&pdev_list_mutex);
289 292
@@ -339,17 +342,18 @@ exit:
339#ifdef CONFIG_HOTPLUG_CPU 342#ifdef CONFIG_HOTPLUG_CPU
340static void pkgtemp_device_remove(unsigned int cpu) 343static void pkgtemp_device_remove(unsigned int cpu)
341{ 344{
342 struct pdev_entry *p, *n; 345 struct pdev_entry *p;
343 unsigned int i; 346 unsigned int i;
344 int err; 347 int err;
345 348
346 mutex_lock(&pdev_list_mutex); 349 mutex_lock(&pdev_list_mutex);
347 list_for_each_entry_safe(p, n, &pdev_list, list) { 350 list_for_each_entry(p, &pdev_list, list) {
348 if (p->cpu != cpu) 351 if (p->cpu != cpu)
349 continue; 352 continue;
350 353
351 platform_device_unregister(p->pdev); 354 platform_device_unregister(p->pdev);
352 list_del(&p->list); 355 list_del(&p->list);
356 mutex_unlock(&pdev_list_mutex);
353 kfree(p); 357 kfree(p);
354 for_each_cpu(i, cpu_core_mask(cpu)) { 358 for_each_cpu(i, cpu_core_mask(cpu)) {
355 if (i != cpu) { 359 if (i != cpu) {
@@ -358,7 +362,7 @@ static void pkgtemp_device_remove(unsigned int cpu)
358 break; 362 break;
359 } 363 }
360 } 364 }
361 break; 365 return;
362 } 366 }
363 mutex_unlock(&pdev_list_mutex); 367 mutex_unlock(&pdev_list_mutex);
364} 368}
@@ -399,11 +403,6 @@ static int __init pkgtemp_init(void)
399 goto exit; 403 goto exit;
400 404
401 for_each_online_cpu(i) { 405 for_each_online_cpu(i) {
402 struct cpuinfo_x86 *c = &cpu_data(i);
403
404 if (!cpu_has(c, X86_FEATURE_PTS))
405 continue;
406
407 err = pkgtemp_device_add(i); 406 err = pkgtemp_device_add(i);
408 if (err) 407 if (err)
409 goto exit_devices_unreg; 408 goto exit_devices_unreg;
diff --git a/drivers/hwmon/w83627ehf.c b/drivers/hwmon/w83627ehf.c
index e96e69dd36fb..072c58008a63 100644
--- a/drivers/hwmon/w83627ehf.c
+++ b/drivers/hwmon/w83627ehf.c
@@ -127,6 +127,7 @@ superio_enter(int ioreg)
127static inline void 127static inline void
128superio_exit(int ioreg) 128superio_exit(int ioreg)
129{ 129{
130 outb(0xaa, ioreg);
130 outb(0x02, ioreg); 131 outb(0x02, ioreg);
131 outb(0x02, ioreg + 1); 132 outb(0x02, ioreg + 1);
132} 133}
diff --git a/drivers/i2c/busses/i2c-davinci.c b/drivers/i2c/busses/i2c-davinci.c
index 2222c87876b9..b8feac5f2ef4 100644
--- a/drivers/i2c/busses/i2c-davinci.c
+++ b/drivers/i2c/busses/i2c-davinci.c
@@ -357,9 +357,6 @@ i2c_davinci_xfer_msg(struct i2c_adapter *adap, struct i2c_msg *msg, int stop)
357 357
358 dev->terminate = 0; 358 dev->terminate = 0;
359 359
360 /* write the data into mode register */
361 davinci_i2c_write_reg(dev, DAVINCI_I2C_MDR_REG, flag);
362
363 /* 360 /*
364 * First byte should be set here, not after interrupt, 361 * First byte should be set here, not after interrupt,
365 * because transmit-data-ready interrupt can come before 362 * because transmit-data-ready interrupt can come before
@@ -371,6 +368,9 @@ i2c_davinci_xfer_msg(struct i2c_adapter *adap, struct i2c_msg *msg, int stop)
371 dev->buf_len--; 368 dev->buf_len--;
372 } 369 }
373 370
371 /* write the data into mode register; start transmitting */
372 davinci_i2c_write_reg(dev, DAVINCI_I2C_MDR_REG, flag);
373
374 r = wait_for_completion_interruptible_timeout(&dev->cmd_complete, 374 r = wait_for_completion_interruptible_timeout(&dev->cmd_complete,
375 dev->adapter.timeout); 375 dev->adapter.timeout);
376 if (r == 0) { 376 if (r == 0) {
diff --git a/drivers/i2c/busses/i2c-octeon.c b/drivers/i2c/busses/i2c-octeon.c
index 0e9f85d0a835..56dbe54e8811 100644
--- a/drivers/i2c/busses/i2c-octeon.c
+++ b/drivers/i2c/busses/i2c-octeon.c
@@ -218,7 +218,7 @@ static int octeon_i2c_wait(struct octeon_i2c *i2c)
218 return result; 218 return result;
219 } else if (result == 0) { 219 } else if (result == 0) {
220 dev_dbg(i2c->dev, "%s: timeout\n", __func__); 220 dev_dbg(i2c->dev, "%s: timeout\n", __func__);
221 result = -ETIMEDOUT; 221 return -ETIMEDOUT;
222 } 222 }
223 223
224 return 0; 224 return 0;
diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c
index 7674efb55378..b33c78586bfc 100644
--- a/drivers/i2c/busses/i2c-omap.c
+++ b/drivers/i2c/busses/i2c-omap.c
@@ -680,6 +680,8 @@ omap_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
680 680
681 if (r == 0) 681 if (r == 0)
682 r = num; 682 r = num;
683
684 omap_i2c_wait_for_bb(dev);
683out: 685out:
684 omap_i2c_idle(dev); 686 omap_i2c_idle(dev);
685 return r; 687 return r;
diff --git a/drivers/i2c/busses/i2c-s3c2410.c b/drivers/i2c/busses/i2c-s3c2410.c
index 72902e0bbfa7..bf831bf81587 100644
--- a/drivers/i2c/busses/i2c-s3c2410.c
+++ b/drivers/i2c/busses/i2c-s3c2410.c
@@ -662,8 +662,8 @@ static int s3c24xx_i2c_clockrate(struct s3c24xx_i2c *i2c, unsigned int *got)
662 unsigned long sda_delay; 662 unsigned long sda_delay;
663 663
664 if (pdata->sda_delay) { 664 if (pdata->sda_delay) {
665 sda_delay = (freq / 1000) * pdata->sda_delay; 665 sda_delay = clkin * pdata->sda_delay;
666 sda_delay /= 1000000; 666 sda_delay = DIV_ROUND_UP(sda_delay, 1000000);
667 sda_delay = DIV_ROUND_UP(sda_delay, 5); 667 sda_delay = DIV_ROUND_UP(sda_delay, 5);
668 if (sda_delay > 3) 668 if (sda_delay > 3)
669 sda_delay = 3; 669 sda_delay = 3;
diff --git a/drivers/ide/ide-probe.c b/drivers/ide/ide-probe.c
index 4c3d1bfec0c5..068cef0a987a 100644
--- a/drivers/ide/ide-probe.c
+++ b/drivers/ide/ide-probe.c
@@ -1448,19 +1448,13 @@ int ide_host_register(struct ide_host *host, const struct ide_port_info *d,
1448 if (hwif == NULL) 1448 if (hwif == NULL)
1449 continue; 1449 continue;
1450 1450
1451 if (hwif->present)
1452 hwif_register_devices(hwif);
1453 }
1454
1455 ide_host_for_each_port(i, hwif, host) {
1456 if (hwif == NULL)
1457 continue;
1458
1459 ide_sysfs_register_port(hwif); 1451 ide_sysfs_register_port(hwif);
1460 ide_proc_register_port(hwif); 1452 ide_proc_register_port(hwif);
1461 1453
1462 if (hwif->present) 1454 if (hwif->present) {
1463 ide_proc_port_register_devices(hwif); 1455 ide_proc_port_register_devices(hwif);
1456 hwif_register_devices(hwif);
1457 }
1464 } 1458 }
1465 1459
1466 return j ? 0 : -1; 1460 return j ? 0 : -1;
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
index a10152bb1427..0906fc5b69b9 100755..100644
--- a/drivers/idle/intel_idle.c
+++ b/drivers/idle/intel_idle.c
@@ -83,7 +83,7 @@ static unsigned int mwait_substates;
83/* Reliable LAPIC Timer States, bit 1 for C1 etc. */ 83/* Reliable LAPIC Timer States, bit 1 for C1 etc. */
84static unsigned int lapic_timer_reliable_states; 84static unsigned int lapic_timer_reliable_states;
85 85
86static struct cpuidle_device *intel_idle_cpuidle_devices; 86static struct cpuidle_device __percpu *intel_idle_cpuidle_devices;
87static int intel_idle(struct cpuidle_device *dev, struct cpuidle_state *state); 87static int intel_idle(struct cpuidle_device *dev, struct cpuidle_state *state);
88 88
89static struct cpuidle_state *cpuidle_state_table; 89static struct cpuidle_state *cpuidle_state_table;
@@ -108,7 +108,7 @@ static struct cpuidle_state nehalem_cstates[MWAIT_MAX_NUM_CSTATES] = {
108 .name = "NHM-C3", 108 .name = "NHM-C3",
109 .desc = "MWAIT 0x10", 109 .desc = "MWAIT 0x10",
110 .driver_data = (void *) 0x10, 110 .driver_data = (void *) 0x10,
111 .flags = CPUIDLE_FLAG_TIME_VALID, 111 .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
112 .exit_latency = 20, 112 .exit_latency = 20,
113 .power_usage = 500, 113 .power_usage = 500,
114 .target_residency = 80, 114 .target_residency = 80,
@@ -117,7 +117,7 @@ static struct cpuidle_state nehalem_cstates[MWAIT_MAX_NUM_CSTATES] = {
117 .name = "NHM-C6", 117 .name = "NHM-C6",
118 .desc = "MWAIT 0x20", 118 .desc = "MWAIT 0x20",
119 .driver_data = (void *) 0x20, 119 .driver_data = (void *) 0x20,
120 .flags = CPUIDLE_FLAG_TIME_VALID, 120 .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
121 .exit_latency = 200, 121 .exit_latency = 200,
122 .power_usage = 350, 122 .power_usage = 350,
123 .target_residency = 800, 123 .target_residency = 800,
@@ -149,7 +149,7 @@ static struct cpuidle_state atom_cstates[MWAIT_MAX_NUM_CSTATES] = {
149 .name = "ATM-C4", 149 .name = "ATM-C4",
150 .desc = "MWAIT 0x30", 150 .desc = "MWAIT 0x30",
151 .driver_data = (void *) 0x30, 151 .driver_data = (void *) 0x30,
152 .flags = CPUIDLE_FLAG_TIME_VALID, 152 .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
153 .exit_latency = 100, 153 .exit_latency = 100,
154 .power_usage = 250, 154 .power_usage = 250,
155 .target_residency = 400, 155 .target_residency = 400,
@@ -159,7 +159,7 @@ static struct cpuidle_state atom_cstates[MWAIT_MAX_NUM_CSTATES] = {
159 .name = "ATM-C6", 159 .name = "ATM-C6",
160 .desc = "MWAIT 0x40", 160 .desc = "MWAIT 0x40",
161 .driver_data = (void *) 0x40, 161 .driver_data = (void *) 0x40,
162 .flags = CPUIDLE_FLAG_TIME_VALID, 162 .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
163 .exit_latency = 200, 163 .exit_latency = 200,
164 .power_usage = 150, 164 .power_usage = 150,
165 .target_residency = 800, 165 .target_residency = 800,
@@ -185,6 +185,16 @@ static int intel_idle(struct cpuidle_device *dev, struct cpuidle_state *state)
185 185
186 local_irq_disable(); 186 local_irq_disable();
187 187
188 /*
189 * If the state flag indicates that the TLB will be flushed or if this
190 * is the deepest c-state supported, do a voluntary leave mm to avoid
191 * costly and mostly unnecessary wakeups for flushing the user TLB's
192 * associated with the active mm.
193 */
194 if (state->flags & CPUIDLE_FLAG_TLB_FLUSHED ||
195 (&dev->states[dev->state_count - 1] == state))
196 leave_mm(cpu);
197
188 if (!(lapic_timer_reliable_states & (1 << (cstate)))) 198 if (!(lapic_timer_reliable_states & (1 << (cstate))))
189 clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu); 199 clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu);
190 200
diff --git a/drivers/infiniband/hw/cxgb3/cxio_hal.h b/drivers/infiniband/hw/cxgb3/cxio_hal.h
index 8f0caf7d4482..78fbe9ffe7f0 100644
--- a/drivers/infiniband/hw/cxgb3/cxio_hal.h
+++ b/drivers/infiniband/hw/cxgb3/cxio_hal.h
@@ -53,7 +53,7 @@
53#define T3_MAX_PBL_SIZE 256 53#define T3_MAX_PBL_SIZE 256
54#define T3_MAX_RQ_SIZE 1024 54#define T3_MAX_RQ_SIZE 1024
55#define T3_MAX_QP_DEPTH (T3_MAX_RQ_SIZE-1) 55#define T3_MAX_QP_DEPTH (T3_MAX_RQ_SIZE-1)
56#define T3_MAX_CQ_DEPTH 262144 56#define T3_MAX_CQ_DEPTH 65536
57#define T3_MAX_NUM_STAG (1<<15) 57#define T3_MAX_NUM_STAG (1<<15)
58#define T3_MAX_MR_SIZE 0x100000000ULL 58#define T3_MAX_MR_SIZE 0x100000000ULL
59#define T3_PAGESIZE_MASK 0xffff000 /* 4KB-128MB */ 59#define T3_PAGESIZE_MASK 0xffff000 /* 4KB-128MB */
diff --git a/drivers/infiniband/hw/cxgb3/iwch_cm.c b/drivers/infiniband/hw/cxgb3/iwch_cm.c
index d88077a21994..13c88871dc3b 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_cm.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_cm.c
@@ -463,7 +463,8 @@ static int send_connect(struct iwch_ep *ep)
463 V_MSS_IDX(mtu_idx) | 463 V_MSS_IDX(mtu_idx) |
464 V_L2T_IDX(ep->l2t->idx) | V_TX_CHANNEL(ep->l2t->smt_idx); 464 V_L2T_IDX(ep->l2t->idx) | V_TX_CHANNEL(ep->l2t->smt_idx);
465 opt0l = V_TOS((ep->tos >> 2) & M_TOS) | V_RCV_BUFSIZ(rcv_win>>10); 465 opt0l = V_TOS((ep->tos >> 2) & M_TOS) | V_RCV_BUFSIZ(rcv_win>>10);
466 opt2 = V_FLAVORS_VALID(1) | V_CONG_CONTROL_FLAVOR(cong_flavor); 466 opt2 = F_RX_COALESCE_VALID | V_RX_COALESCE(0) | V_FLAVORS_VALID(1) |
467 V_CONG_CONTROL_FLAVOR(cong_flavor);
467 skb->priority = CPL_PRIORITY_SETUP; 468 skb->priority = CPL_PRIORITY_SETUP;
468 set_arp_failure_handler(skb, act_open_req_arp_failure); 469 set_arp_failure_handler(skb, act_open_req_arp_failure);
469 470
@@ -1280,7 +1281,8 @@ static void accept_cr(struct iwch_ep *ep, __be32 peer_ip, struct sk_buff *skb)
1280 V_MSS_IDX(mtu_idx) | 1281 V_MSS_IDX(mtu_idx) |
1281 V_L2T_IDX(ep->l2t->idx) | V_TX_CHANNEL(ep->l2t->smt_idx); 1282 V_L2T_IDX(ep->l2t->idx) | V_TX_CHANNEL(ep->l2t->smt_idx);
1282 opt0l = V_TOS((ep->tos >> 2) & M_TOS) | V_RCV_BUFSIZ(rcv_win>>10); 1283 opt0l = V_TOS((ep->tos >> 2) & M_TOS) | V_RCV_BUFSIZ(rcv_win>>10);
1283 opt2 = V_FLAVORS_VALID(1) | V_CONG_CONTROL_FLAVOR(cong_flavor); 1284 opt2 = F_RX_COALESCE_VALID | V_RX_COALESCE(0) | V_FLAVORS_VALID(1) |
1285 V_CONG_CONTROL_FLAVOR(cong_flavor);
1284 1286
1285 rpl = cplhdr(skb); 1287 rpl = cplhdr(skb);
1286 rpl->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); 1288 rpl->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
diff --git a/drivers/infiniband/hw/mlx4/Kconfig b/drivers/infiniband/hw/mlx4/Kconfig
index 4175a4bd0c78..bd995b2b50d8 100644
--- a/drivers/infiniband/hw/mlx4/Kconfig
+++ b/drivers/infiniband/hw/mlx4/Kconfig
@@ -1,5 +1,6 @@
1config MLX4_INFINIBAND 1config MLX4_INFINIBAND
2 tristate "Mellanox ConnectX HCA support" 2 tristate "Mellanox ConnectX HCA support"
3 depends on NETDEVICES && NETDEV_10000 && PCI
3 select MLX4_CORE 4 select MLX4_CORE
4 ---help--- 5 ---help---
5 This driver provides low-level InfiniBand support for 6 This driver provides low-level InfiniBand support for
diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
index 443cea55daac..6220d9d75b58 100644
--- a/drivers/infiniband/hw/nes/nes_cm.c
+++ b/drivers/infiniband/hw/nes/nes_cm.c
@@ -502,7 +502,9 @@ int schedule_nes_timer(struct nes_cm_node *cm_node, struct sk_buff *skb,
502static void nes_retrans_expired(struct nes_cm_node *cm_node) 502static void nes_retrans_expired(struct nes_cm_node *cm_node)
503{ 503{
504 struct iw_cm_id *cm_id = cm_node->cm_id; 504 struct iw_cm_id *cm_id = cm_node->cm_id;
505 switch (cm_node->state) { 505 enum nes_cm_node_state state = cm_node->state;
506 cm_node->state = NES_CM_STATE_CLOSED;
507 switch (state) {
506 case NES_CM_STATE_SYN_RCVD: 508 case NES_CM_STATE_SYN_RCVD:
507 case NES_CM_STATE_CLOSING: 509 case NES_CM_STATE_CLOSING:
508 rem_ref_cm_node(cm_node->cm_core, cm_node); 510 rem_ref_cm_node(cm_node->cm_core, cm_node);
@@ -511,7 +513,6 @@ static void nes_retrans_expired(struct nes_cm_node *cm_node)
511 case NES_CM_STATE_FIN_WAIT1: 513 case NES_CM_STATE_FIN_WAIT1:
512 if (cm_node->cm_id) 514 if (cm_node->cm_id)
513 cm_id->rem_ref(cm_id); 515 cm_id->rem_ref(cm_id);
514 cm_node->state = NES_CM_STATE_CLOSED;
515 send_reset(cm_node, NULL); 516 send_reset(cm_node, NULL);
516 break; 517 break;
517 default: 518 default:
@@ -1439,9 +1440,6 @@ static void handle_rst_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
1439 break; 1440 break;
1440 case NES_CM_STATE_MPAREQ_RCVD: 1441 case NES_CM_STATE_MPAREQ_RCVD:
1441 passive_state = atomic_add_return(1, &cm_node->passive_state); 1442 passive_state = atomic_add_return(1, &cm_node->passive_state);
1442 if (passive_state == NES_SEND_RESET_EVENT)
1443 create_event(cm_node, NES_CM_EVENT_RESET);
1444 cm_node->state = NES_CM_STATE_CLOSED;
1445 dev_kfree_skb_any(skb); 1443 dev_kfree_skb_any(skb);
1446 break; 1444 break;
1447 case NES_CM_STATE_ESTABLISHED: 1445 case NES_CM_STATE_ESTABLISHED:
@@ -1456,6 +1454,7 @@ static void handle_rst_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
1456 case NES_CM_STATE_CLOSED: 1454 case NES_CM_STATE_CLOSED:
1457 drop_packet(skb); 1455 drop_packet(skb);
1458 break; 1456 break;
1457 case NES_CM_STATE_FIN_WAIT2:
1459 case NES_CM_STATE_FIN_WAIT1: 1458 case NES_CM_STATE_FIN_WAIT1:
1460 case NES_CM_STATE_LAST_ACK: 1459 case NES_CM_STATE_LAST_ACK:
1461 cm_node->cm_id->rem_ref(cm_node->cm_id); 1460 cm_node->cm_id->rem_ref(cm_node->cm_id);
@@ -2702,7 +2701,7 @@ static int nes_disconnect(struct nes_qp *nesqp, int abrupt)
2702 nesibdev = nesvnic->nesibdev; 2701 nesibdev = nesvnic->nesibdev;
2703 2702
2704 nes_debug(NES_DBG_CM, "netdev refcnt = %u.\n", 2703 nes_debug(NES_DBG_CM, "netdev refcnt = %u.\n",
2705 atomic_read(&nesvnic->netdev->refcnt)); 2704 netdev_refcnt_read(nesvnic->netdev));
2706 2705
2707 if (nesqp->active_conn) { 2706 if (nesqp->active_conn) {
2708 2707
@@ -2777,6 +2776,12 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
2777 return -EINVAL; 2776 return -EINVAL;
2778 } 2777 }
2779 2778
2779 passive_state = atomic_add_return(1, &cm_node->passive_state);
2780 if (passive_state == NES_SEND_RESET_EVENT) {
2781 rem_ref_cm_node(cm_node->cm_core, cm_node);
2782 return -ECONNRESET;
2783 }
2784
2780 /* associate the node with the QP */ 2785 /* associate the node with the QP */
2781 nesqp->cm_node = (void *)cm_node; 2786 nesqp->cm_node = (void *)cm_node;
2782 cm_node->nesqp = nesqp; 2787 cm_node->nesqp = nesqp;
@@ -2786,7 +2791,7 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
2786 atomic_inc(&cm_accepts); 2791 atomic_inc(&cm_accepts);
2787 2792
2788 nes_debug(NES_DBG_CM, "netdev refcnt = %u.\n", 2793 nes_debug(NES_DBG_CM, "netdev refcnt = %u.\n",
2789 atomic_read(&nesvnic->netdev->refcnt)); 2794 netdev_refcnt_read(nesvnic->netdev));
2790 2795
2791 /* allocate the ietf frame and space for private data */ 2796 /* allocate the ietf frame and space for private data */
2792 nesqp->ietf_frame = pci_alloc_consistent(nesdev->pcidev, 2797 nesqp->ietf_frame = pci_alloc_consistent(nesdev->pcidev,
@@ -2979,9 +2984,6 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
2979 printk(KERN_ERR "%s[%u] OFA CM event_handler returned, " 2984 printk(KERN_ERR "%s[%u] OFA CM event_handler returned, "
2980 "ret=%d\n", __func__, __LINE__, ret); 2985 "ret=%d\n", __func__, __LINE__, ret);
2981 2986
2982 passive_state = atomic_add_return(1, &cm_node->passive_state);
2983 if (passive_state == NES_SEND_RESET_EVENT)
2984 create_event(cm_node, NES_CM_EVENT_RESET);
2985 return 0; 2987 return 0;
2986} 2988}
2987 2989
diff --git a/drivers/infiniband/hw/nes/nes_hw.c b/drivers/infiniband/hw/nes/nes_hw.c
index f8233c851c69..1980a461c499 100644
--- a/drivers/infiniband/hw/nes/nes_hw.c
+++ b/drivers/infiniband/hw/nes/nes_hw.c
@@ -3468,6 +3468,19 @@ static void nes_process_iwarp_aeqe(struct nes_device *nesdev,
3468 return; /* Ignore it, wait for close complete */ 3468 return; /* Ignore it, wait for close complete */
3469 3469
3470 if (atomic_inc_return(&nesqp->close_timer_started) == 1) { 3470 if (atomic_inc_return(&nesqp->close_timer_started) == 1) {
3471 if ((tcp_state == NES_AEQE_TCP_STATE_CLOSE_WAIT) &&
3472 (nesqp->ibqp_state == IB_QPS_RTS) &&
3473 ((nesadapter->eeprom_version >> 16) != NES_A0)) {
3474 spin_lock_irqsave(&nesqp->lock, flags);
3475 nesqp->hw_iwarp_state = iwarp_state;
3476 nesqp->hw_tcp_state = tcp_state;
3477 nesqp->last_aeq = async_event_id;
3478 next_iwarp_state = NES_CQP_QP_IWARP_STATE_CLOSING;
3479 nesqp->hw_iwarp_state = NES_AEQE_IWARP_STATE_CLOSING;
3480 spin_unlock_irqrestore(&nesqp->lock, flags);
3481 nes_hw_modify_qp(nesdev, nesqp, next_iwarp_state, 0, 0);
3482 nes_cm_disconn(nesqp);
3483 }
3471 nesqp->cm_id->add_ref(nesqp->cm_id); 3484 nesqp->cm_id->add_ref(nesqp->cm_id);
3472 schedule_nes_timer(nesqp->cm_node, (struct sk_buff *)nesqp, 3485 schedule_nes_timer(nesqp->cm_node, (struct sk_buff *)nesqp,
3473 NES_TIMER_TYPE_CLOSE, 1, 0); 3486 NES_TIMER_TYPE_CLOSE, 1, 0);
@@ -3477,7 +3490,6 @@ static void nes_process_iwarp_aeqe(struct nes_device *nesdev,
3477 nesqp->hwqp.qp_id, atomic_read(&nesqp->refcount), 3490 nesqp->hwqp.qp_id, atomic_read(&nesqp->refcount),
3478 async_event_id, nesqp->last_aeq, tcp_state); 3491 async_event_id, nesqp->last_aeq, tcp_state);
3479 } 3492 }
3480
3481 break; 3493 break;
3482 case NES_AEQE_AEID_LLP_CLOSE_COMPLETE: 3494 case NES_AEQE_AEID_LLP_CLOSE_COMPLETE:
3483 if (nesqp->term_flags) { 3495 if (nesqp->term_flags) {
diff --git a/drivers/infiniband/hw/nes/nes_hw.h b/drivers/infiniband/hw/nes/nes_hw.h
index aa9183db32b1..1204c3432b63 100644
--- a/drivers/infiniband/hw/nes/nes_hw.h
+++ b/drivers/infiniband/hw/nes/nes_hw.h
@@ -45,6 +45,7 @@
45#define NES_PHY_TYPE_KR 9 45#define NES_PHY_TYPE_KR 9
46 46
47#define NES_MULTICAST_PF_MAX 8 47#define NES_MULTICAST_PF_MAX 8
48#define NES_A0 3
48 49
49enum pci_regs { 50enum pci_regs {
50 NES_INT_STAT = 0x0000, 51 NES_INT_STAT = 0x0000,
diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
index 6dfdd49cdbcf..10560c796fd6 100644
--- a/drivers/infiniband/hw/nes/nes_nic.c
+++ b/drivers/infiniband/hw/nes/nes_nic.c
@@ -1446,14 +1446,14 @@ static int nes_netdev_set_pauseparam(struct net_device *netdev,
1446 NES_IDX_MAC_TX_CONFIG + (nesdev->mac_index*0x200)); 1446 NES_IDX_MAC_TX_CONFIG + (nesdev->mac_index*0x200));
1447 u32temp |= NES_IDX_MAC_TX_CONFIG_ENABLE_PAUSE; 1447 u32temp |= NES_IDX_MAC_TX_CONFIG_ENABLE_PAUSE;
1448 nes_write_indexed(nesdev, 1448 nes_write_indexed(nesdev,
1449 NES_IDX_MAC_TX_CONFIG_ENABLE_PAUSE + (nesdev->mac_index*0x200), u32temp); 1449 NES_IDX_MAC_TX_CONFIG + (nesdev->mac_index*0x200), u32temp);
1450 nesdev->disable_tx_flow_control = 0; 1450 nesdev->disable_tx_flow_control = 0;
1451 } else if ((et_pauseparam->tx_pause == 0) && (nesdev->disable_tx_flow_control == 0)) { 1451 } else if ((et_pauseparam->tx_pause == 0) && (nesdev->disable_tx_flow_control == 0)) {
1452 u32temp = nes_read_indexed(nesdev, 1452 u32temp = nes_read_indexed(nesdev,
1453 NES_IDX_MAC_TX_CONFIG + (nesdev->mac_index*0x200)); 1453 NES_IDX_MAC_TX_CONFIG + (nesdev->mac_index*0x200));
1454 u32temp &= ~NES_IDX_MAC_TX_CONFIG_ENABLE_PAUSE; 1454 u32temp &= ~NES_IDX_MAC_TX_CONFIG_ENABLE_PAUSE;
1455 nes_write_indexed(nesdev, 1455 nes_write_indexed(nesdev,
1456 NES_IDX_MAC_TX_CONFIG_ENABLE_PAUSE + (nesdev->mac_index*0x200), u32temp); 1456 NES_IDX_MAC_TX_CONFIG + (nesdev->mac_index*0x200), u32temp);
1457 nesdev->disable_tx_flow_control = 1; 1457 nesdev->disable_tx_flow_control = 1;
1458 } 1458 }
1459 if ((et_pauseparam->rx_pause == 1) && (nesdev->disable_rx_flow_control == 1)) { 1459 if ((et_pauseparam->rx_pause == 1) && (nesdev->disable_rx_flow_control == 1)) {
diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
index 9046e6675686..546fc22405fe 100644
--- a/drivers/infiniband/hw/nes/nes_verbs.c
+++ b/drivers/infiniband/hw/nes/nes_verbs.c
@@ -785,7 +785,7 @@ static struct ib_pd *nes_alloc_pd(struct ib_device *ibdev,
785 785
786 nes_debug(NES_DBG_PD, "nesvnic=%p, netdev=%p %s, ibdev=%p, context=%p, netdev refcnt=%u\n", 786 nes_debug(NES_DBG_PD, "nesvnic=%p, netdev=%p %s, ibdev=%p, context=%p, netdev refcnt=%u\n",
787 nesvnic, nesdev->netdev[0], nesdev->netdev[0]->name, ibdev, context, 787 nesvnic, nesdev->netdev[0], nesdev->netdev[0]->name, ibdev, context,
788 atomic_read(&nesvnic->netdev->refcnt)); 788 netdev_refcnt_read(nesvnic->netdev));
789 789
790 err = nes_alloc_resource(nesadapter, nesadapter->allocated_pds, 790 err = nes_alloc_resource(nesadapter, nesadapter->allocated_pds,
791 nesadapter->max_pd, &pd_num, &nesadapter->next_pd); 791 nesadapter->max_pd, &pd_num, &nesadapter->next_pd);
@@ -1416,7 +1416,7 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd,
1416 /* update the QP table */ 1416 /* update the QP table */
1417 nesdev->nesadapter->qp_table[nesqp->hwqp.qp_id-NES_FIRST_QPN] = nesqp; 1417 nesdev->nesadapter->qp_table[nesqp->hwqp.qp_id-NES_FIRST_QPN] = nesqp;
1418 nes_debug(NES_DBG_QP, "netdev refcnt=%u\n", 1418 nes_debug(NES_DBG_QP, "netdev refcnt=%u\n",
1419 atomic_read(&nesvnic->netdev->refcnt)); 1419 netdev_refcnt_read(nesvnic->netdev));
1420 1420
1421 return &nesqp->ibqp; 1421 return &nesqp->ibqp;
1422} 1422}
diff --git a/drivers/input/input.c b/drivers/input/input.c
index a9b025f4147a..ab6982056518 100644
--- a/drivers/input/input.c
+++ b/drivers/input/input.c
@@ -1599,11 +1599,14 @@ EXPORT_SYMBOL(input_free_device);
1599 * @dev: input device supporting MT events and finger tracking 1599 * @dev: input device supporting MT events and finger tracking
1600 * @num_slots: number of slots used by the device 1600 * @num_slots: number of slots used by the device
1601 * 1601 *
1602 * This function allocates all necessary memory for MT slot handling 1602 * This function allocates all necessary memory for MT slot handling in the
1603 * in the input device, and adds ABS_MT_SLOT to the device capabilities. 1603 * input device, and adds ABS_MT_SLOT to the device capabilities. All slots
1604 * are initially marked as unused iby setting ABS_MT_TRACKING_ID to -1.
1604 */ 1605 */
1605int input_mt_create_slots(struct input_dev *dev, unsigned int num_slots) 1606int input_mt_create_slots(struct input_dev *dev, unsigned int num_slots)
1606{ 1607{
1608 int i;
1609
1607 if (!num_slots) 1610 if (!num_slots)
1608 return 0; 1611 return 0;
1609 1612
@@ -1614,6 +1617,10 @@ int input_mt_create_slots(struct input_dev *dev, unsigned int num_slots)
1614 dev->mtsize = num_slots; 1617 dev->mtsize = num_slots;
1615 input_set_abs_params(dev, ABS_MT_SLOT, 0, num_slots - 1, 0, 0); 1618 input_set_abs_params(dev, ABS_MT_SLOT, 0, num_slots - 1, 0, 0);
1616 1619
1620 /* Mark slots as 'unused' */
1621 for (i = 0; i < num_slots; i++)
1622 dev->mt[i].abs[ABS_MT_TRACKING_ID - ABS_MT_FIRST] = -1;
1623
1617 return 0; 1624 return 0;
1618} 1625}
1619EXPORT_SYMBOL(input_mt_create_slots); 1626EXPORT_SYMBOL(input_mt_create_slots);
diff --git a/drivers/input/mouse/bcm5974.c b/drivers/input/mouse/bcm5974.c
index ea67c49146a3..b95231763911 100644
--- a/drivers/input/mouse/bcm5974.c
+++ b/drivers/input/mouse/bcm5974.c
@@ -337,10 +337,14 @@ static void report_finger_data(struct input_dev *input,
337 const struct bcm5974_config *cfg, 337 const struct bcm5974_config *cfg,
338 const struct tp_finger *f) 338 const struct tp_finger *f)
339{ 339{
340 input_report_abs(input, ABS_MT_TOUCH_MAJOR, raw2int(f->force_major)); 340 input_report_abs(input, ABS_MT_TOUCH_MAJOR,
341 input_report_abs(input, ABS_MT_TOUCH_MINOR, raw2int(f->force_minor)); 341 raw2int(f->force_major) << 1);
342 input_report_abs(input, ABS_MT_WIDTH_MAJOR, raw2int(f->size_major)); 342 input_report_abs(input, ABS_MT_TOUCH_MINOR,
343 input_report_abs(input, ABS_MT_WIDTH_MINOR, raw2int(f->size_minor)); 343 raw2int(f->force_minor) << 1);
344 input_report_abs(input, ABS_MT_WIDTH_MAJOR,
345 raw2int(f->size_major) << 1);
346 input_report_abs(input, ABS_MT_WIDTH_MINOR,
347 raw2int(f->size_minor) << 1);
344 input_report_abs(input, ABS_MT_ORIENTATION, 348 input_report_abs(input, ABS_MT_ORIENTATION,
345 MAX_FINGER_ORIENTATION - raw2int(f->orientation)); 349 MAX_FINGER_ORIENTATION - raw2int(f->orientation));
346 input_report_abs(input, ABS_MT_POSITION_X, raw2int(f->abs_x)); 350 input_report_abs(input, ABS_MT_POSITION_X, raw2int(f->abs_x));
diff --git a/drivers/input/serio/i8042.c b/drivers/input/serio/i8042.c
index 46e4ba0b9246..f58513160480 100644
--- a/drivers/input/serio/i8042.c
+++ b/drivers/input/serio/i8042.c
@@ -1485,8 +1485,8 @@ static int __init i8042_init(void)
1485 1485
1486static void __exit i8042_exit(void) 1486static void __exit i8042_exit(void)
1487{ 1487{
1488 platform_driver_unregister(&i8042_driver);
1489 platform_device_unregister(i8042_platform_device); 1488 platform_device_unregister(i8042_platform_device);
1489 platform_driver_unregister(&i8042_driver);
1490 i8042_platform_exit(); 1490 i8042_platform_exit();
1491 1491
1492 panic_blink = NULL; 1492 panic_blink = NULL;
diff --git a/drivers/input/tablet/wacom_wac.c b/drivers/input/tablet/wacom_wac.c
index 40d77ba8fdc1..6e29badb969e 100644
--- a/drivers/input/tablet/wacom_wac.c
+++ b/drivers/input/tablet/wacom_wac.c
@@ -243,10 +243,10 @@ static int wacom_graphire_irq(struct wacom_wac *wacom)
243 if (features->type == WACOM_G4 || 243 if (features->type == WACOM_G4 ||
244 features->type == WACOM_MO) { 244 features->type == WACOM_MO) {
245 input_report_abs(input, ABS_DISTANCE, data[6] & 0x3f); 245 input_report_abs(input, ABS_DISTANCE, data[6] & 0x3f);
246 rw = (signed)(data[7] & 0x04) - (data[7] & 0x03); 246 rw = (data[7] & 0x04) - (data[7] & 0x03);
247 } else { 247 } else {
248 input_report_abs(input, ABS_DISTANCE, data[7] & 0x3f); 248 input_report_abs(input, ABS_DISTANCE, data[7] & 0x3f);
249 rw = -(signed)data[6]; 249 rw = -(signed char)data[6];
250 } 250 }
251 input_report_rel(input, REL_WHEEL, rw); 251 input_report_rel(input, REL_WHEEL, rw);
252 } 252 }
diff --git a/drivers/isdn/capi/kcapi.c b/drivers/isdn/capi/kcapi.c
index b054494df846..3acf94cc5acd 100644
--- a/drivers/isdn/capi/kcapi.c
+++ b/drivers/isdn/capi/kcapi.c
@@ -98,6 +98,16 @@ static inline struct capi_ctr *get_capi_ctr_by_nr(u16 contr)
98 return capi_controller[contr - 1]; 98 return capi_controller[contr - 1];
99} 99}
100 100
101static inline struct capi20_appl *__get_capi_appl_by_nr(u16 applid)
102{
103 lockdep_assert_held(&capi_controller_lock);
104
105 if (applid - 1 >= CAPI_MAXAPPL)
106 return NULL;
107
108 return capi_applications[applid - 1];
109}
110
101static inline struct capi20_appl *get_capi_appl_by_nr(u16 applid) 111static inline struct capi20_appl *get_capi_appl_by_nr(u16 applid)
102{ 112{
103 if (applid - 1 >= CAPI_MAXAPPL) 113 if (applid - 1 >= CAPI_MAXAPPL)
@@ -185,10 +195,9 @@ static void notify_up(u32 contr)
185 ctr->state = CAPI_CTR_RUNNING; 195 ctr->state = CAPI_CTR_RUNNING;
186 196
187 for (applid = 1; applid <= CAPI_MAXAPPL; applid++) { 197 for (applid = 1; applid <= CAPI_MAXAPPL; applid++) {
188 ap = get_capi_appl_by_nr(applid); 198 ap = __get_capi_appl_by_nr(applid);
189 if (!ap) 199 if (ap)
190 continue; 200 register_appl(ctr, applid, &ap->rparam);
191 register_appl(ctr, applid, &ap->rparam);
192 } 201 }
193 202
194 wake_up_interruptible_all(&ctr->state_wait_queue); 203 wake_up_interruptible_all(&ctr->state_wait_queue);
@@ -215,7 +224,7 @@ static void ctr_down(struct capi_ctr *ctr, int new_state)
215 memset(ctr->serial, 0, sizeof(ctr->serial)); 224 memset(ctr->serial, 0, sizeof(ctr->serial));
216 225
217 for (applid = 1; applid <= CAPI_MAXAPPL; applid++) { 226 for (applid = 1; applid <= CAPI_MAXAPPL; applid++) {
218 ap = get_capi_appl_by_nr(applid); 227 ap = __get_capi_appl_by_nr(applid);
219 if (ap) 228 if (ap)
220 capi_ctr_put(ctr); 229 capi_ctr_put(ctr);
221 } 230 }
diff --git a/drivers/isdn/gigaset/bas-gigaset.c b/drivers/isdn/gigaset/bas-gigaset.c
index 707d9c94cf9e..178942a2ee61 100644
--- a/drivers/isdn/gigaset/bas-gigaset.c
+++ b/drivers/isdn/gigaset/bas-gigaset.c
@@ -109,6 +109,9 @@ struct bas_cardstate {
109 109
110 struct urb *urb_int_in; /* URB for interrupt pipe */ 110 struct urb *urb_int_in; /* URB for interrupt pipe */
111 unsigned char *int_in_buf; 111 unsigned char *int_in_buf;
112 struct work_struct int_in_wq; /* for usb_clear_halt() */
113 struct timer_list timer_int_in; /* int read retry delay */
114 int retry_int_in;
112 115
113 spinlock_t lock; /* locks all following */ 116 spinlock_t lock; /* locks all following */
114 int basstate; /* bitmap (BS_*) */ 117 int basstate; /* bitmap (BS_*) */
@@ -169,7 +172,7 @@ static char *get_usb_rcmsg(int rc)
169 case -EAGAIN: 172 case -EAGAIN:
170 return "start frame too early or too much scheduled"; 173 return "start frame too early or too much scheduled";
171 case -EFBIG: 174 case -EFBIG:
172 return "too many isochronous frames requested"; 175 return "too many isoc frames requested";
173 case -EPIPE: 176 case -EPIPE:
174 return "endpoint stalled"; 177 return "endpoint stalled";
175 case -EMSGSIZE: 178 case -EMSGSIZE:
@@ -200,13 +203,13 @@ static char *get_usb_statmsg(int status)
200 case -ENOENT: 203 case -ENOENT:
201 return "unlinked (sync)"; 204 return "unlinked (sync)";
202 case -EINPROGRESS: 205 case -EINPROGRESS:
203 return "pending"; 206 return "URB still pending";
204 case -EPROTO: 207 case -EPROTO:
205 return "bit stuffing error, timeout, or unknown USB error"; 208 return "bitstuff error, timeout, or unknown USB error";
206 case -EILSEQ: 209 case -EILSEQ:
207 return "CRC mismatch, timeout, or unknown USB error"; 210 return "CRC mismatch, timeout, or unknown USB error";
208 case -ETIME: 211 case -ETIME:
209 return "timed out"; 212 return "USB response timeout";
210 case -EPIPE: 213 case -EPIPE:
211 return "endpoint stalled"; 214 return "endpoint stalled";
212 case -ECOMM: 215 case -ECOMM:
@@ -214,15 +217,15 @@ static char *get_usb_statmsg(int status)
214 case -ENOSR: 217 case -ENOSR:
215 return "OUT buffer underrun"; 218 return "OUT buffer underrun";
216 case -EOVERFLOW: 219 case -EOVERFLOW:
217 return "too much data"; 220 return "endpoint babble";
218 case -EREMOTEIO: 221 case -EREMOTEIO:
219 return "short packet detected"; 222 return "short packet";
220 case -ENODEV: 223 case -ENODEV:
221 return "device removed"; 224 return "device removed";
222 case -EXDEV: 225 case -EXDEV:
223 return "partial isochronous transfer"; 226 return "partial isoc transfer";
224 case -EINVAL: 227 case -EINVAL:
225 return "invalid argument"; 228 return "ISO madness";
226 case -ECONNRESET: 229 case -ECONNRESET:
227 return "unlinked (async)"; 230 return "unlinked (async)";
228 case -ESHUTDOWN: 231 case -ESHUTDOWN:
@@ -350,7 +353,7 @@ static inline void error_hangup(struct bc_state *bcs)
350 * reset Gigaset device because of an unrecoverable error 353 * reset Gigaset device because of an unrecoverable error
351 * This function may be called from any context, and takes care of 354 * This function may be called from any context, and takes care of
352 * scheduling the necessary actions for execution outside of interrupt context. 355 * scheduling the necessary actions for execution outside of interrupt context.
353 * cs->lock must not be held. 356 * cs->hw.bas->lock must not be held.
354 * argument: 357 * argument:
355 * controller state structure 358 * controller state structure
356 */ 359 */
@@ -358,7 +361,9 @@ static inline void error_reset(struct cardstate *cs)
358{ 361{
359 /* reset interrupt pipe to recover (ignore errors) */ 362 /* reset interrupt pipe to recover (ignore errors) */
360 update_basstate(cs->hw.bas, BS_RESETTING, 0); 363 update_basstate(cs->hw.bas, BS_RESETTING, 0);
361 req_submit(cs->bcs, HD_RESET_INTERRUPT_PIPE, 0, BAS_TIMEOUT); 364 if (req_submit(cs->bcs, HD_RESET_INTERRUPT_PIPE, 0, BAS_TIMEOUT))
365 /* submission failed, escalate to USB port reset */
366 usb_queue_reset_device(cs->hw.bas->interface);
362} 367}
363 368
364/* check_pending 369/* check_pending
@@ -438,23 +443,27 @@ static void cmd_in_timeout(unsigned long data)
438 return; 443 return;
439 } 444 }
440 445
441 if (ucs->retry_cmd_in++ < BAS_RETRY) { 446 if (ucs->retry_cmd_in++ >= BAS_RETRY) {
442 dev_notice(cs->dev, "control read: timeout, retry %d\n",
443 ucs->retry_cmd_in);
444 rc = atread_submit(cs, BAS_TIMEOUT);
445 if (rc >= 0 || rc == -ENODEV)
446 /* resubmitted or disconnected */
447 /* - bypass regular exit block */
448 return;
449 } else {
450 dev_err(cs->dev, 447 dev_err(cs->dev,
451 "control read: timeout, giving up after %d tries\n", 448 "control read: timeout, giving up after %d tries\n",
452 ucs->retry_cmd_in); 449 ucs->retry_cmd_in);
450 kfree(ucs->rcvbuf);
451 ucs->rcvbuf = NULL;
452 ucs->rcvbuf_size = 0;
453 error_reset(cs);
454 return;
455 }
456
457 gig_dbg(DEBUG_USBREQ, "%s: timeout, retry %d",
458 __func__, ucs->retry_cmd_in);
459 rc = atread_submit(cs, BAS_TIMEOUT);
460 if (rc < 0) {
461 kfree(ucs->rcvbuf);
462 ucs->rcvbuf = NULL;
463 ucs->rcvbuf_size = 0;
464 if (rc != -ENODEV)
465 error_reset(cs);
453 } 466 }
454 kfree(ucs->rcvbuf);
455 ucs->rcvbuf = NULL;
456 ucs->rcvbuf_size = 0;
457 error_reset(cs);
458} 467}
459 468
460/* read_ctrl_callback 469/* read_ctrl_callback
@@ -470,18 +479,11 @@ static void read_ctrl_callback(struct urb *urb)
470 struct cardstate *cs = inbuf->cs; 479 struct cardstate *cs = inbuf->cs;
471 struct bas_cardstate *ucs = cs->hw.bas; 480 struct bas_cardstate *ucs = cs->hw.bas;
472 int status = urb->status; 481 int status = urb->status;
473 int have_data = 0;
474 unsigned numbytes; 482 unsigned numbytes;
475 int rc; 483 int rc;
476 484
477 update_basstate(ucs, 0, BS_ATRDPEND); 485 update_basstate(ucs, 0, BS_ATRDPEND);
478 wake_up(&ucs->waitqueue); 486 wake_up(&ucs->waitqueue);
479
480 if (!ucs->rcvbuf_size) {
481 dev_warn(cs->dev, "%s: no receive in progress\n", __func__);
482 return;
483 }
484
485 del_timer(&ucs->timer_cmd_in); 487 del_timer(&ucs->timer_cmd_in);
486 488
487 switch (status) { 489 switch (status) {
@@ -495,19 +497,10 @@ static void read_ctrl_callback(struct urb *urb)
495 numbytes = ucs->rcvbuf_size; 497 numbytes = ucs->rcvbuf_size;
496 } 498 }
497 499
498 /* copy received bytes to inbuf */ 500 /* copy received bytes to inbuf, notify event layer */
499 have_data = gigaset_fill_inbuf(inbuf, ucs->rcvbuf, numbytes); 501 if (gigaset_fill_inbuf(inbuf, ucs->rcvbuf, numbytes)) {
500 502 gig_dbg(DEBUG_INTR, "%s-->BH", __func__);
501 if (unlikely(numbytes < ucs->rcvbuf_size)) { 503 gigaset_schedule_event(cs);
502 /* incomplete - resubmit for remaining bytes */
503 ucs->rcvbuf_size -= numbytes;
504 ucs->retry_cmd_in = 0;
505 rc = atread_submit(cs, BAS_TIMEOUT);
506 if (rc >= 0 || rc == -ENODEV)
507 /* resubmitted or disconnected */
508 /* - bypass regular exit block */
509 return;
510 error_reset(cs);
511 } 504 }
512 break; 505 break;
513 506
@@ -516,37 +509,32 @@ static void read_ctrl_callback(struct urb *urb)
516 case -EINPROGRESS: /* pending */ 509 case -EINPROGRESS: /* pending */
517 case -ENODEV: /* device removed */ 510 case -ENODEV: /* device removed */
518 case -ESHUTDOWN: /* device shut down */ 511 case -ESHUTDOWN: /* device shut down */
519 /* no action necessary */ 512 /* no further action necessary */
520 gig_dbg(DEBUG_USBREQ, "%s: %s", 513 gig_dbg(DEBUG_USBREQ, "%s: %s",
521 __func__, get_usb_statmsg(status)); 514 __func__, get_usb_statmsg(status));
522 break; 515 break;
523 516
524 default: /* severe trouble */ 517 default: /* other errors: retry */
525 dev_warn(cs->dev, "control read: %s\n",
526 get_usb_statmsg(status));
527 if (ucs->retry_cmd_in++ < BAS_RETRY) { 518 if (ucs->retry_cmd_in++ < BAS_RETRY) {
528 dev_notice(cs->dev, "control read: retry %d\n", 519 gig_dbg(DEBUG_USBREQ, "%s: %s, retry %d", __func__,
529 ucs->retry_cmd_in); 520 get_usb_statmsg(status), ucs->retry_cmd_in);
530 rc = atread_submit(cs, BAS_TIMEOUT); 521 rc = atread_submit(cs, BAS_TIMEOUT);
531 if (rc >= 0 || rc == -ENODEV) 522 if (rc >= 0)
532 /* resubmitted or disconnected */ 523 /* successfully resubmitted, skip freeing */
533 /* - bypass regular exit block */
534 return; 524 return;
535 } else { 525 if (rc == -ENODEV)
536 dev_err(cs->dev, 526 /* disconnect, no further action necessary */
537 "control read: giving up after %d tries\n", 527 break;
538 ucs->retry_cmd_in);
539 } 528 }
529 dev_err(cs->dev, "control read: %s, giving up after %d tries\n",
530 get_usb_statmsg(status), ucs->retry_cmd_in);
540 error_reset(cs); 531 error_reset(cs);
541 } 532 }
542 533
534 /* read finished, free buffer */
543 kfree(ucs->rcvbuf); 535 kfree(ucs->rcvbuf);
544 ucs->rcvbuf = NULL; 536 ucs->rcvbuf = NULL;
545 ucs->rcvbuf_size = 0; 537 ucs->rcvbuf_size = 0;
546 if (have_data) {
547 gig_dbg(DEBUG_INTR, "%s-->BH", __func__);
548 gigaset_schedule_event(cs);
549 }
550} 538}
551 539
552/* atread_submit 540/* atread_submit
@@ -605,14 +593,67 @@ static int atread_submit(struct cardstate *cs, int timeout)
605 593
606 if (timeout > 0) { 594 if (timeout > 0) {
607 gig_dbg(DEBUG_USBREQ, "setting timeout of %d/10 secs", timeout); 595 gig_dbg(DEBUG_USBREQ, "setting timeout of %d/10 secs", timeout);
608 ucs->timer_cmd_in.expires = jiffies + timeout * HZ / 10; 596 mod_timer(&ucs->timer_cmd_in, jiffies + timeout * HZ / 10);
609 ucs->timer_cmd_in.data = (unsigned long) cs;
610 ucs->timer_cmd_in.function = cmd_in_timeout;
611 add_timer(&ucs->timer_cmd_in);
612 } 597 }
613 return 0; 598 return 0;
614} 599}
615 600
601/* int_in_work
602 * workqueue routine to clear halt on interrupt in endpoint
603 */
604
605static void int_in_work(struct work_struct *work)
606{
607 struct bas_cardstate *ucs =
608 container_of(work, struct bas_cardstate, int_in_wq);
609 struct urb *urb = ucs->urb_int_in;
610 struct cardstate *cs = urb->context;
611 int rc;
612
613 /* clear halt condition */
614 rc = usb_clear_halt(ucs->udev, urb->pipe);
615 gig_dbg(DEBUG_USBREQ, "clear_halt: %s", get_usb_rcmsg(rc));
616 if (rc == 0)
617 /* success, resubmit interrupt read URB */
618 rc = usb_submit_urb(urb, GFP_ATOMIC);
619 if (rc != 0 && rc != -ENODEV) {
620 dev_err(cs->dev, "clear halt failed: %s\n", get_usb_rcmsg(rc));
621 rc = usb_lock_device_for_reset(ucs->udev, ucs->interface);
622 if (rc == 0) {
623 rc = usb_reset_device(ucs->udev);
624 usb_unlock_device(ucs->udev);
625 }
626 }
627 ucs->retry_int_in = 0;
628}
629
630/* int_in_resubmit
631 * timer routine for interrupt read delayed resubmit
632 * argument:
633 * controller state structure
634 */
635static void int_in_resubmit(unsigned long data)
636{
637 struct cardstate *cs = (struct cardstate *) data;
638 struct bas_cardstate *ucs = cs->hw.bas;
639 int rc;
640
641 if (ucs->retry_int_in++ >= BAS_RETRY) {
642 dev_err(cs->dev, "interrupt read: giving up after %d tries\n",
643 ucs->retry_int_in);
644 usb_queue_reset_device(ucs->interface);
645 return;
646 }
647
648 gig_dbg(DEBUG_USBREQ, "%s: retry %d", __func__, ucs->retry_int_in);
649 rc = usb_submit_urb(ucs->urb_int_in, GFP_ATOMIC);
650 if (rc != 0 && rc != -ENODEV) {
651 dev_err(cs->dev, "could not resubmit interrupt URB: %s\n",
652 get_usb_rcmsg(rc));
653 usb_queue_reset_device(ucs->interface);
654 }
655}
656
616/* read_int_callback 657/* read_int_callback
617 * USB completion handler for interrupt pipe input 658 * USB completion handler for interrupt pipe input
618 * called by the USB subsystem in interrupt context 659 * called by the USB subsystem in interrupt context
@@ -633,19 +674,29 @@ static void read_int_callback(struct urb *urb)
633 674
634 switch (status) { 675 switch (status) {
635 case 0: /* success */ 676 case 0: /* success */
677 ucs->retry_int_in = 0;
636 break; 678 break;
679 case -EPIPE: /* endpoint stalled */
680 schedule_work(&ucs->int_in_wq);
681 /* fall through */
637 case -ENOENT: /* cancelled */ 682 case -ENOENT: /* cancelled */
638 case -ECONNRESET: /* cancelled (async) */ 683 case -ECONNRESET: /* cancelled (async) */
639 case -EINPROGRESS: /* pending */ 684 case -EINPROGRESS: /* pending */
640 /* ignore silently */ 685 case -ENODEV: /* device removed */
686 case -ESHUTDOWN: /* device shut down */
687 /* no further action necessary */
641 gig_dbg(DEBUG_USBREQ, "%s: %s", 688 gig_dbg(DEBUG_USBREQ, "%s: %s",
642 __func__, get_usb_statmsg(status)); 689 __func__, get_usb_statmsg(status));
643 return; 690 return;
644 case -ENODEV: /* device removed */ 691 case -EPROTO: /* protocol error or unplug */
645 case -ESHUTDOWN: /* device shut down */ 692 case -EILSEQ:
646 gig_dbg(DEBUG_USBREQ, "%s: device disconnected", __func__); 693 case -ETIME:
694 /* resubmit after delay */
695 gig_dbg(DEBUG_USBREQ, "%s: %s",
696 __func__, get_usb_statmsg(status));
697 mod_timer(&ucs->timer_int_in, jiffies + HZ / 10);
647 return; 698 return;
648 default: /* severe trouble */ 699 default: /* other errors: just resubmit */
649 dev_warn(cs->dev, "interrupt read: %s\n", 700 dev_warn(cs->dev, "interrupt read: %s\n",
650 get_usb_statmsg(status)); 701 get_usb_statmsg(status));
651 goto resubmit; 702 goto resubmit;
@@ -723,6 +774,13 @@ static void read_int_callback(struct urb *urb)
723 break; 774 break;
724 } 775 }
725 spin_lock_irqsave(&cs->lock, flags); 776 spin_lock_irqsave(&cs->lock, flags);
777 if (ucs->basstate & BS_ATRDPEND) {
778 spin_unlock_irqrestore(&cs->lock, flags);
779 dev_warn(cs->dev,
780 "HD_RECEIVEATDATA_ACK(%d) during HD_READ_ATMESSAGE(%d) ignored\n",
781 l, ucs->rcvbuf_size);
782 break;
783 }
726 if (ucs->rcvbuf_size) { 784 if (ucs->rcvbuf_size) {
727 /* throw away previous buffer - we have no queue */ 785 /* throw away previous buffer - we have no queue */
728 dev_err(cs->dev, 786 dev_err(cs->dev,
@@ -735,7 +793,6 @@ static void read_int_callback(struct urb *urb)
735 if (ucs->rcvbuf == NULL) { 793 if (ucs->rcvbuf == NULL) {
736 spin_unlock_irqrestore(&cs->lock, flags); 794 spin_unlock_irqrestore(&cs->lock, flags);
737 dev_err(cs->dev, "out of memory receiving AT data\n"); 795 dev_err(cs->dev, "out of memory receiving AT data\n");
738 error_reset(cs);
739 break; 796 break;
740 } 797 }
741 ucs->rcvbuf_size = l; 798 ucs->rcvbuf_size = l;
@@ -745,13 +802,10 @@ static void read_int_callback(struct urb *urb)
745 kfree(ucs->rcvbuf); 802 kfree(ucs->rcvbuf);
746 ucs->rcvbuf = NULL; 803 ucs->rcvbuf = NULL;
747 ucs->rcvbuf_size = 0; 804 ucs->rcvbuf_size = 0;
748 if (rc != -ENODEV) {
749 spin_unlock_irqrestore(&cs->lock, flags);
750 error_reset(cs);
751 break;
752 }
753 } 805 }
754 spin_unlock_irqrestore(&cs->lock, flags); 806 spin_unlock_irqrestore(&cs->lock, flags);
807 if (rc < 0 && rc != -ENODEV)
808 error_reset(cs);
755 break; 809 break;
756 810
757 case HD_RESET_INTERRUPT_PIPE_ACK: 811 case HD_RESET_INTERRUPT_PIPE_ACK:
@@ -818,6 +872,7 @@ static void read_iso_callback(struct urb *urb)
818 tasklet_hi_schedule(&ubc->rcvd_tasklet); 872 tasklet_hi_schedule(&ubc->rcvd_tasklet);
819 } else { 873 } else {
820 /* tasklet still busy, drop data and resubmit URB */ 874 /* tasklet still busy, drop data and resubmit URB */
875 gig_dbg(DEBUG_ISO, "%s: overrun", __func__);
821 ubc->loststatus = status; 876 ubc->loststatus = status;
822 for (i = 0; i < BAS_NUMFRAMES; i++) { 877 for (i = 0; i < BAS_NUMFRAMES; i++) {
823 ubc->isoinlost += urb->iso_frame_desc[i].actual_length; 878 ubc->isoinlost += urb->iso_frame_desc[i].actual_length;
@@ -833,13 +888,11 @@ static void read_iso_callback(struct urb *urb)
833 urb->dev = bcs->cs->hw.bas->udev; 888 urb->dev = bcs->cs->hw.bas->udev;
834 urb->transfer_flags = URB_ISO_ASAP; 889 urb->transfer_flags = URB_ISO_ASAP;
835 urb->number_of_packets = BAS_NUMFRAMES; 890 urb->number_of_packets = BAS_NUMFRAMES;
836 gig_dbg(DEBUG_ISO, "%s: isoc read overrun/resubmit",
837 __func__);
838 rc = usb_submit_urb(urb, GFP_ATOMIC); 891 rc = usb_submit_urb(urb, GFP_ATOMIC);
839 if (unlikely(rc != 0 && rc != -ENODEV)) { 892 if (unlikely(rc != 0 && rc != -ENODEV)) {
840 dev_err(bcs->cs->dev, 893 dev_err(bcs->cs->dev,
841 "could not resubmit isochronous read " 894 "could not resubmit isoc read URB: %s\n",
842 "URB: %s\n", get_usb_rcmsg(rc)); 895 get_usb_rcmsg(rc));
843 dump_urb(DEBUG_ISO, "isoc read", urb); 896 dump_urb(DEBUG_ISO, "isoc read", urb);
844 error_hangup(bcs); 897 error_hangup(bcs);
845 } 898 }
@@ -1081,7 +1134,7 @@ static int submit_iso_write_urb(struct isow_urbctx_t *ucx)
1081 gig_dbg(DEBUG_ISO, "%s: disconnected", __func__); 1134 gig_dbg(DEBUG_ISO, "%s: disconnected", __func__);
1082 else 1135 else
1083 dev_err(ucx->bcs->cs->dev, 1136 dev_err(ucx->bcs->cs->dev,
1084 "could not submit isochronous write URB: %s\n", 1137 "could not submit isoc write URB: %s\n",
1085 get_usb_rcmsg(rc)); 1138 get_usb_rcmsg(rc));
1086 return rc; 1139 return rc;
1087 } 1140 }
@@ -1126,7 +1179,7 @@ static void write_iso_tasklet(unsigned long data)
1126 ubc->isooutovfl = NULL; 1179 ubc->isooutovfl = NULL;
1127 spin_unlock_irqrestore(&ubc->isooutlock, flags); 1180 spin_unlock_irqrestore(&ubc->isooutlock, flags);
1128 if (ovfl) { 1181 if (ovfl) {
1129 dev_err(cs->dev, "isochronous write buffer underrun\n"); 1182 dev_err(cs->dev, "isoc write underrun\n");
1130 error_hangup(bcs); 1183 error_hangup(bcs);
1131 break; 1184 break;
1132 } 1185 }
@@ -1151,7 +1204,7 @@ static void write_iso_tasklet(unsigned long data)
1151 if (next) { 1204 if (next) {
1152 /* couldn't put it back */ 1205 /* couldn't put it back */
1153 dev_err(cs->dev, 1206 dev_err(cs->dev,
1154 "losing isochronous write URB\n"); 1207 "losing isoc write URB\n");
1155 error_hangup(bcs); 1208 error_hangup(bcs);
1156 } 1209 }
1157 } 1210 }
@@ -1178,10 +1231,10 @@ static void write_iso_tasklet(unsigned long data)
1178 if (ifd->status || 1231 if (ifd->status ||
1179 ifd->actual_length != ifd->length) { 1232 ifd->actual_length != ifd->length) {
1180 dev_warn(cs->dev, 1233 dev_warn(cs->dev,
1181 "isochronous write: frame %d: %s, " 1234 "isoc write: frame %d[%d/%d]: %s\n",
1182 "only %d of %d bytes sent\n", 1235 i, ifd->actual_length,
1183 i, get_usb_statmsg(ifd->status), 1236 ifd->length,
1184 ifd->actual_length, ifd->length); 1237 get_usb_statmsg(ifd->status));
1185 offset = (ifd->offset + 1238 offset = (ifd->offset +
1186 ifd->actual_length) 1239 ifd->actual_length)
1187 % BAS_OUTBUFSIZE; 1240 % BAS_OUTBUFSIZE;
@@ -1190,11 +1243,11 @@ static void write_iso_tasklet(unsigned long data)
1190 } 1243 }
1191 break; 1244 break;
1192 case -EPIPE: /* stall - probably underrun */ 1245 case -EPIPE: /* stall - probably underrun */
1193 dev_err(cs->dev, "isochronous write stalled\n"); 1246 dev_err(cs->dev, "isoc write: stalled\n");
1194 error_hangup(bcs); 1247 error_hangup(bcs);
1195 break; 1248 break;
1196 default: /* severe trouble */ 1249 default: /* other errors */
1197 dev_warn(cs->dev, "isochronous write: %s\n", 1250 dev_warn(cs->dev, "isoc write: %s\n",
1198 get_usb_statmsg(status)); 1251 get_usb_statmsg(status));
1199 } 1252 }
1200 1253
@@ -1250,6 +1303,7 @@ static void read_iso_tasklet(unsigned long data)
1250 struct cardstate *cs = bcs->cs; 1303 struct cardstate *cs = bcs->cs;
1251 struct urb *urb; 1304 struct urb *urb;
1252 int status; 1305 int status;
1306 struct usb_iso_packet_descriptor *ifd;
1253 char *rcvbuf; 1307 char *rcvbuf;
1254 unsigned long flags; 1308 unsigned long flags;
1255 int totleft, numbytes, offset, frame, rc; 1309 int totleft, numbytes, offset, frame, rc;
@@ -1267,8 +1321,7 @@ static void read_iso_tasklet(unsigned long data)
1267 ubc->isoindone = NULL; 1321 ubc->isoindone = NULL;
1268 if (unlikely(ubc->loststatus != -EINPROGRESS)) { 1322 if (unlikely(ubc->loststatus != -EINPROGRESS)) {
1269 dev_warn(cs->dev, 1323 dev_warn(cs->dev,
1270 "isochronous read overrun, " 1324 "isoc read overrun, URB dropped (status: %s, %d bytes)\n",
1271 "dropped URB with status: %s, %d bytes lost\n",
1272 get_usb_statmsg(ubc->loststatus), 1325 get_usb_statmsg(ubc->loststatus),
1273 ubc->isoinlost); 1326 ubc->isoinlost);
1274 ubc->loststatus = -EINPROGRESS; 1327 ubc->loststatus = -EINPROGRESS;
@@ -1298,11 +1351,11 @@ static void read_iso_tasklet(unsigned long data)
1298 __func__, get_usb_statmsg(status)); 1351 __func__, get_usb_statmsg(status));
1299 continue; /* -> skip */ 1352 continue; /* -> skip */
1300 case -EPIPE: 1353 case -EPIPE:
1301 dev_err(cs->dev, "isochronous read stalled\n"); 1354 dev_err(cs->dev, "isoc read: stalled\n");
1302 error_hangup(bcs); 1355 error_hangup(bcs);
1303 continue; /* -> skip */ 1356 continue; /* -> skip */
1304 default: /* severe trouble */ 1357 default: /* other error */
1305 dev_warn(cs->dev, "isochronous read: %s\n", 1358 dev_warn(cs->dev, "isoc read: %s\n",
1306 get_usb_statmsg(status)); 1359 get_usb_statmsg(status));
1307 goto error; 1360 goto error;
1308 } 1361 }
@@ -1310,40 +1363,52 @@ static void read_iso_tasklet(unsigned long data)
1310 rcvbuf = urb->transfer_buffer; 1363 rcvbuf = urb->transfer_buffer;
1311 totleft = urb->actual_length; 1364 totleft = urb->actual_length;
1312 for (frame = 0; totleft > 0 && frame < BAS_NUMFRAMES; frame++) { 1365 for (frame = 0; totleft > 0 && frame < BAS_NUMFRAMES; frame++) {
1313 numbytes = urb->iso_frame_desc[frame].actual_length; 1366 ifd = &urb->iso_frame_desc[frame];
1314 if (unlikely(urb->iso_frame_desc[frame].status)) 1367 numbytes = ifd->actual_length;
1368 switch (ifd->status) {
1369 case 0: /* success */
1370 break;
1371 case -EPROTO: /* protocol error or unplug */
1372 case -EILSEQ:
1373 case -ETIME:
1374 /* probably just disconnected, ignore */
1375 gig_dbg(DEBUG_ISO,
1376 "isoc read: frame %d[%d]: %s\n",
1377 frame, numbytes,
1378 get_usb_statmsg(ifd->status));
1379 break;
1380 default: /* other error */
1381 /* report, assume transferred bytes are ok */
1315 dev_warn(cs->dev, 1382 dev_warn(cs->dev,
1316 "isochronous read: frame %d[%d]: %s\n", 1383 "isoc read: frame %d[%d]: %s\n",
1317 frame, numbytes, 1384 frame, numbytes,
1318 get_usb_statmsg( 1385 get_usb_statmsg(ifd->status));
1319 urb->iso_frame_desc[frame].status)); 1386 }
1320 if (unlikely(numbytes > BAS_MAXFRAME)) 1387 if (unlikely(numbytes > BAS_MAXFRAME))
1321 dev_warn(cs->dev, 1388 dev_warn(cs->dev,
1322 "isochronous read: frame %d: " 1389 "isoc read: frame %d[%d]: %s\n",
1323 "numbytes (%d) > BAS_MAXFRAME\n", 1390 frame, numbytes,
1324 frame, numbytes); 1391 "exceeds max frame size");
1325 if (unlikely(numbytes > totleft)) { 1392 if (unlikely(numbytes > totleft)) {
1326 dev_warn(cs->dev, 1393 dev_warn(cs->dev,
1327 "isochronous read: frame %d: " 1394 "isoc read: frame %d[%d]: %s\n",
1328 "numbytes (%d) > totleft (%d)\n", 1395 frame, numbytes,
1329 frame, numbytes, totleft); 1396 "exceeds total transfer length");
1330 numbytes = totleft; 1397 numbytes = totleft;
1331 } 1398 }
1332 offset = urb->iso_frame_desc[frame].offset; 1399 offset = ifd->offset;
1333 if (unlikely(offset + numbytes > BAS_INBUFSIZE)) { 1400 if (unlikely(offset + numbytes > BAS_INBUFSIZE)) {
1334 dev_warn(cs->dev, 1401 dev_warn(cs->dev,
1335 "isochronous read: frame %d: " 1402 "isoc read: frame %d[%d]: %s\n",
1336 "offset (%d) + numbytes (%d) " 1403 frame, numbytes,
1337 "> BAS_INBUFSIZE\n", 1404 "exceeds end of buffer");
1338 frame, offset, numbytes);
1339 numbytes = BAS_INBUFSIZE - offset; 1405 numbytes = BAS_INBUFSIZE - offset;
1340 } 1406 }
1341 gigaset_isoc_receive(rcvbuf + offset, numbytes, bcs); 1407 gigaset_isoc_receive(rcvbuf + offset, numbytes, bcs);
1342 totleft -= numbytes; 1408 totleft -= numbytes;
1343 } 1409 }
1344 if (unlikely(totleft > 0)) 1410 if (unlikely(totleft > 0))
1345 dev_warn(cs->dev, 1411 dev_warn(cs->dev, "isoc read: %d data bytes missing\n",
1346 "isochronous read: %d data bytes missing\n",
1347 totleft); 1412 totleft);
1348 1413
1349error: 1414error:
@@ -1359,9 +1424,9 @@ error:
1359 rc = usb_submit_urb(urb, GFP_ATOMIC); 1424 rc = usb_submit_urb(urb, GFP_ATOMIC);
1360 if (unlikely(rc != 0 && rc != -ENODEV)) { 1425 if (unlikely(rc != 0 && rc != -ENODEV)) {
1361 dev_err(cs->dev, 1426 dev_err(cs->dev,
1362 "could not resubmit isochronous read URB: %s\n", 1427 "could not resubmit isoc read URB: %s\n",
1363 get_usb_rcmsg(rc)); 1428 get_usb_rcmsg(rc));
1364 dump_urb(DEBUG_ISO, "resubmit iso read", urb); 1429 dump_urb(DEBUG_ISO, "resubmit isoc read", urb);
1365 error_hangup(bcs); 1430 error_hangup(bcs);
1366 } 1431 }
1367 } 1432 }
@@ -1373,12 +1438,12 @@ error:
1373/* req_timeout 1438/* req_timeout
1374 * timeout routine for control output request 1439 * timeout routine for control output request
1375 * argument: 1440 * argument:
1376 * B channel control structure 1441 * controller state structure
1377 */ 1442 */
1378static void req_timeout(unsigned long data) 1443static void req_timeout(unsigned long data)
1379{ 1444{
1380 struct bc_state *bcs = (struct bc_state *) data; 1445 struct cardstate *cs = (struct cardstate *) data;
1381 struct bas_cardstate *ucs = bcs->cs->hw.bas; 1446 struct bas_cardstate *ucs = cs->hw.bas;
1382 int pending; 1447 int pending;
1383 unsigned long flags; 1448 unsigned long flags;
1384 1449
@@ -1395,38 +1460,44 @@ static void req_timeout(unsigned long data)
1395 break; 1460 break;
1396 1461
1397 case HD_OPEN_ATCHANNEL: 1462 case HD_OPEN_ATCHANNEL:
1398 dev_err(bcs->cs->dev, "timeout opening AT channel\n"); 1463 dev_err(cs->dev, "timeout opening AT channel\n");
1399 error_reset(bcs->cs); 1464 error_reset(cs);
1400 break; 1465 break;
1401 1466
1402 case HD_OPEN_B2CHANNEL:
1403 case HD_OPEN_B1CHANNEL: 1467 case HD_OPEN_B1CHANNEL:
1404 dev_err(bcs->cs->dev, "timeout opening channel %d\n", 1468 dev_err(cs->dev, "timeout opening channel 1\n");
1405 bcs->channel + 1); 1469 error_hangup(&cs->bcs[0]);
1406 error_hangup(bcs); 1470 break;
1471
1472 case HD_OPEN_B2CHANNEL:
1473 dev_err(cs->dev, "timeout opening channel 2\n");
1474 error_hangup(&cs->bcs[1]);
1407 break; 1475 break;
1408 1476
1409 case HD_CLOSE_ATCHANNEL: 1477 case HD_CLOSE_ATCHANNEL:
1410 dev_err(bcs->cs->dev, "timeout closing AT channel\n"); 1478 dev_err(cs->dev, "timeout closing AT channel\n");
1411 error_reset(bcs->cs); 1479 error_reset(cs);
1412 break; 1480 break;
1413 1481
1414 case HD_CLOSE_B2CHANNEL:
1415 case HD_CLOSE_B1CHANNEL: 1482 case HD_CLOSE_B1CHANNEL:
1416 dev_err(bcs->cs->dev, "timeout closing channel %d\n", 1483 dev_err(cs->dev, "timeout closing channel 1\n");
1417 bcs->channel + 1); 1484 error_reset(cs);
1418 error_reset(bcs->cs); 1485 break;
1486
1487 case HD_CLOSE_B2CHANNEL:
1488 dev_err(cs->dev, "timeout closing channel 2\n");
1489 error_reset(cs);
1419 break; 1490 break;
1420 1491
1421 case HD_RESET_INTERRUPT_PIPE: 1492 case HD_RESET_INTERRUPT_PIPE:
1422 /* error recovery escalation */ 1493 /* error recovery escalation */
1423 dev_err(bcs->cs->dev, 1494 dev_err(cs->dev,
1424 "reset interrupt pipe timeout, attempting USB reset\n"); 1495 "reset interrupt pipe timeout, attempting USB reset\n");
1425 usb_queue_reset_device(bcs->cs->hw.bas->interface); 1496 usb_queue_reset_device(ucs->interface);
1426 break; 1497 break;
1427 1498
1428 default: 1499 default:
1429 dev_warn(bcs->cs->dev, "request 0x%02x timed out, clearing\n", 1500 dev_warn(cs->dev, "request 0x%02x timed out, clearing\n",
1430 pending); 1501 pending);
1431 } 1502 }
1432 1503
@@ -1557,10 +1628,7 @@ static int req_submit(struct bc_state *bcs, int req, int val, int timeout)
1557 1628
1558 if (timeout > 0) { 1629 if (timeout > 0) {
1559 gig_dbg(DEBUG_USBREQ, "setting timeout of %d/10 secs", timeout); 1630 gig_dbg(DEBUG_USBREQ, "setting timeout of %d/10 secs", timeout);
1560 ucs->timer_ctrl.expires = jiffies + timeout * HZ / 10; 1631 mod_timer(&ucs->timer_ctrl, jiffies + timeout * HZ / 10);
1561 ucs->timer_ctrl.data = (unsigned long) bcs;
1562 ucs->timer_ctrl.function = req_timeout;
1563 add_timer(&ucs->timer_ctrl);
1564 } 1632 }
1565 1633
1566 spin_unlock_irqrestore(&ucs->lock, flags); 1634 spin_unlock_irqrestore(&ucs->lock, flags);
@@ -1590,21 +1658,20 @@ static int gigaset_init_bchannel(struct bc_state *bcs)
1590 1658
1591 if (cs->hw.bas->basstate & BS_SUSPEND) { 1659 if (cs->hw.bas->basstate & BS_SUSPEND) {
1592 dev_notice(cs->dev, 1660 dev_notice(cs->dev,
1593 "not starting isochronous I/O, " 1661 "not starting isoc I/O, suspend in progress\n");
1594 "suspend in progress\n");
1595 spin_unlock_irqrestore(&cs->lock, flags); 1662 spin_unlock_irqrestore(&cs->lock, flags);
1596 return -EHOSTUNREACH; 1663 return -EHOSTUNREACH;
1597 } 1664 }
1598 1665
1599 ret = starturbs(bcs); 1666 ret = starturbs(bcs);
1600 if (ret < 0) { 1667 if (ret < 0) {
1668 spin_unlock_irqrestore(&cs->lock, flags);
1601 dev_err(cs->dev, 1669 dev_err(cs->dev,
1602 "could not start isochronous I/O for channel B%d: %s\n", 1670 "could not start isoc I/O for channel B%d: %s\n",
1603 bcs->channel + 1, 1671 bcs->channel + 1,
1604 ret == -EFAULT ? "null URB" : get_usb_rcmsg(ret)); 1672 ret == -EFAULT ? "null URB" : get_usb_rcmsg(ret));
1605 if (ret != -ENODEV) 1673 if (ret != -ENODEV)
1606 error_hangup(bcs); 1674 error_hangup(bcs);
1607 spin_unlock_irqrestore(&cs->lock, flags);
1608 return ret; 1675 return ret;
1609 } 1676 }
1610 1677
@@ -1614,11 +1681,11 @@ static int gigaset_init_bchannel(struct bc_state *bcs)
1614 dev_err(cs->dev, "could not open channel B%d\n", 1681 dev_err(cs->dev, "could not open channel B%d\n",
1615 bcs->channel + 1); 1682 bcs->channel + 1);
1616 stopurbs(bcs->hw.bas); 1683 stopurbs(bcs->hw.bas);
1617 if (ret != -ENODEV)
1618 error_hangup(bcs);
1619 } 1684 }
1620 1685
1621 spin_unlock_irqrestore(&cs->lock, flags); 1686 spin_unlock_irqrestore(&cs->lock, flags);
1687 if (ret < 0 && ret != -ENODEV)
1688 error_hangup(bcs);
1622 return ret; 1689 return ret;
1623} 1690}
1624 1691
@@ -1826,10 +1893,7 @@ static int atwrite_submit(struct cardstate *cs, unsigned char *buf, int len)
1826 if (!(update_basstate(ucs, BS_ATTIMER, BS_ATREADY) & BS_ATTIMER)) { 1893 if (!(update_basstate(ucs, BS_ATTIMER, BS_ATREADY) & BS_ATTIMER)) {
1827 gig_dbg(DEBUG_OUTPUT, "setting ATREADY timeout of %d/10 secs", 1894 gig_dbg(DEBUG_OUTPUT, "setting ATREADY timeout of %d/10 secs",
1828 ATRDY_TIMEOUT); 1895 ATRDY_TIMEOUT);
1829 ucs->timer_atrdy.expires = jiffies + ATRDY_TIMEOUT * HZ / 10; 1896 mod_timer(&ucs->timer_atrdy, jiffies + ATRDY_TIMEOUT * HZ / 10);
1830 ucs->timer_atrdy.data = (unsigned long) cs;
1831 ucs->timer_atrdy.function = atrdy_timeout;
1832 add_timer(&ucs->timer_atrdy);
1833 } 1897 }
1834 return 0; 1898 return 0;
1835} 1899}
@@ -1914,6 +1978,28 @@ static int gigaset_write_cmd(struct cardstate *cs, struct cmdbuf_t *cb)
1914 * The next command will reopen the AT channel automatically. 1978 * The next command will reopen the AT channel automatically.
1915 */ 1979 */
1916 if (cb->len == 3 && !memcmp(cb->buf, "+++", 3)) { 1980 if (cb->len == 3 && !memcmp(cb->buf, "+++", 3)) {
1981 /* If an HD_RECEIVEATDATA_ACK message remains unhandled
1982 * because of an error, the base never sends another one.
1983 * The response channel is thus effectively blocked.
1984 * Closing and reopening the AT channel does *not* clear
1985 * this condition.
1986 * As a stopgap measure, submit a zero-length AT read
1987 * before closing the AT channel. This has the undocumented
1988 * effect of triggering a new HD_RECEIVEATDATA_ACK message
1989 * from the base if necessary.
1990 * The subsequent AT channel close then discards any pending
1991 * messages.
1992 */
1993 spin_lock_irqsave(&cs->lock, flags);
1994 if (!(cs->hw.bas->basstate & BS_ATRDPEND)) {
1995 kfree(cs->hw.bas->rcvbuf);
1996 cs->hw.bas->rcvbuf = NULL;
1997 cs->hw.bas->rcvbuf_size = 0;
1998 cs->hw.bas->retry_cmd_in = 0;
1999 atread_submit(cs, 0);
2000 }
2001 spin_unlock_irqrestore(&cs->lock, flags);
2002
1917 rc = req_submit(cs->bcs, HD_CLOSE_ATCHANNEL, 0, BAS_TIMEOUT); 2003 rc = req_submit(cs->bcs, HD_CLOSE_ATCHANNEL, 0, BAS_TIMEOUT);
1918 if (cb->wake_tasklet) 2004 if (cb->wake_tasklet)
1919 tasklet_schedule(cb->wake_tasklet); 2005 tasklet_schedule(cb->wake_tasklet);
@@ -2010,7 +2096,7 @@ static int gigaset_freebcshw(struct bc_state *bcs)
2010 2096
2011 /* kill URBs and tasklets before freeing - better safe than sorry */ 2097 /* kill URBs and tasklets before freeing - better safe than sorry */
2012 ubc->running = 0; 2098 ubc->running = 0;
2013 gig_dbg(DEBUG_INIT, "%s: killing iso URBs", __func__); 2099 gig_dbg(DEBUG_INIT, "%s: killing isoc URBs", __func__);
2014 for (i = 0; i < BAS_OUTURBS; ++i) { 2100 for (i = 0; i < BAS_OUTURBS; ++i) {
2015 usb_kill_urb(ubc->isoouturbs[i].urb); 2101 usb_kill_urb(ubc->isoouturbs[i].urb);
2016 usb_free_urb(ubc->isoouturbs[i].urb); 2102 usb_free_urb(ubc->isoouturbs[i].urb);
@@ -2131,10 +2217,12 @@ static int gigaset_initcshw(struct cardstate *cs)
2131 ucs->pending = 0; 2217 ucs->pending = 0;
2132 2218
2133 ucs->basstate = 0; 2219 ucs->basstate = 0;
2134 init_timer(&ucs->timer_ctrl); 2220 setup_timer(&ucs->timer_ctrl, req_timeout, (unsigned long) cs);
2135 init_timer(&ucs->timer_atrdy); 2221 setup_timer(&ucs->timer_atrdy, atrdy_timeout, (unsigned long) cs);
2136 init_timer(&ucs->timer_cmd_in); 2222 setup_timer(&ucs->timer_cmd_in, cmd_in_timeout, (unsigned long) cs);
2223 setup_timer(&ucs->timer_int_in, int_in_resubmit, (unsigned long) cs);
2137 init_waitqueue_head(&ucs->waitqueue); 2224 init_waitqueue_head(&ucs->waitqueue);
2225 INIT_WORK(&ucs->int_in_wq, int_in_work);
2138 2226
2139 return 1; 2227 return 1;
2140} 2228}
@@ -2282,6 +2370,7 @@ static int gigaset_probe(struct usb_interface *interface,
2282 get_usb_rcmsg(rc)); 2370 get_usb_rcmsg(rc));
2283 goto error; 2371 goto error;
2284 } 2372 }
2373 ucs->retry_int_in = 0;
2285 2374
2286 /* tell the device that the driver is ready */ 2375 /* tell the device that the driver is ready */
2287 rc = req_submit(cs->bcs, HD_DEVICE_INIT_ACK, 0, 0); 2376 rc = req_submit(cs->bcs, HD_DEVICE_INIT_ACK, 0, 0);
@@ -2334,10 +2423,12 @@ static void gigaset_disconnect(struct usb_interface *interface)
2334 /* stop driver (common part) */ 2423 /* stop driver (common part) */
2335 gigaset_stop(cs); 2424 gigaset_stop(cs);
2336 2425
2337 /* stop timers and URBs, free ressources */ 2426 /* stop delayed work and URBs, free ressources */
2338 del_timer_sync(&ucs->timer_ctrl); 2427 del_timer_sync(&ucs->timer_ctrl);
2339 del_timer_sync(&ucs->timer_atrdy); 2428 del_timer_sync(&ucs->timer_atrdy);
2340 del_timer_sync(&ucs->timer_cmd_in); 2429 del_timer_sync(&ucs->timer_cmd_in);
2430 del_timer_sync(&ucs->timer_int_in);
2431 cancel_work_sync(&ucs->int_in_wq);
2341 freeurbs(cs); 2432 freeurbs(cs);
2342 usb_set_intfdata(interface, NULL); 2433 usb_set_intfdata(interface, NULL);
2343 kfree(ucs->rcvbuf); 2434 kfree(ucs->rcvbuf);
@@ -2400,10 +2491,14 @@ static int gigaset_suspend(struct usb_interface *intf, pm_message_t message)
2400 /* in case of timeout, proceed anyway */ 2491 /* in case of timeout, proceed anyway */
2401 } 2492 }
2402 2493
2403 /* kill all URBs and timers that might still be pending */ 2494 /* kill all URBs and delayed work that might still be pending */
2404 usb_kill_urb(ucs->urb_ctrl); 2495 usb_kill_urb(ucs->urb_ctrl);
2405 usb_kill_urb(ucs->urb_int_in); 2496 usb_kill_urb(ucs->urb_int_in);
2406 del_timer_sync(&ucs->timer_ctrl); 2497 del_timer_sync(&ucs->timer_ctrl);
2498 del_timer_sync(&ucs->timer_atrdy);
2499 del_timer_sync(&ucs->timer_cmd_in);
2500 del_timer_sync(&ucs->timer_int_in);
2501 cancel_work_sync(&ucs->int_in_wq);
2407 2502
2408 gig_dbg(DEBUG_SUSPEND, "suspend complete"); 2503 gig_dbg(DEBUG_SUSPEND, "suspend complete");
2409 return 0; 2504 return 0;
@@ -2425,6 +2520,7 @@ static int gigaset_resume(struct usb_interface *intf)
2425 get_usb_rcmsg(rc)); 2520 get_usb_rcmsg(rc));
2426 return rc; 2521 return rc;
2427 } 2522 }
2523 ucs->retry_int_in = 0;
2428 2524
2429 /* clear suspend flag to reallow activity */ 2525 /* clear suspend flag to reallow activity */
2430 update_basstate(ucs, 0, BS_SUSPEND); 2526 update_basstate(ucs, 0, BS_SUSPEND);
diff --git a/drivers/isdn/gigaset/common.c b/drivers/isdn/gigaset/common.c
index 3ca561eccd9f..db621db67f61 100644
--- a/drivers/isdn/gigaset/common.c
+++ b/drivers/isdn/gigaset/common.c
@@ -1026,32 +1026,6 @@ struct cardstate *gigaset_get_cs_by_id(int id)
1026 return ret; 1026 return ret;
1027} 1027}
1028 1028
1029void gigaset_debugdrivers(void)
1030{
1031 unsigned long flags;
1032 static struct cardstate *cs;
1033 struct gigaset_driver *drv;
1034 unsigned i;
1035
1036 spin_lock_irqsave(&driver_lock, flags);
1037 list_for_each_entry(drv, &drivers, list) {
1038 gig_dbg(DEBUG_DRIVER, "driver %p", drv);
1039 spin_lock(&drv->lock);
1040 for (i = 0; i < drv->minors; ++i) {
1041 gig_dbg(DEBUG_DRIVER, " index %u", i);
1042 cs = drv->cs + i;
1043 gig_dbg(DEBUG_DRIVER, " cardstate %p", cs);
1044 gig_dbg(DEBUG_DRIVER, " flags 0x%02x", cs->flags);
1045 gig_dbg(DEBUG_DRIVER, " minor_index %u",
1046 cs->minor_index);
1047 gig_dbg(DEBUG_DRIVER, " driver %p", cs->driver);
1048 gig_dbg(DEBUG_DRIVER, " i4l id %d", cs->myid);
1049 }
1050 spin_unlock(&drv->lock);
1051 }
1052 spin_unlock_irqrestore(&driver_lock, flags);
1053}
1054
1055static struct cardstate *gigaset_get_cs_by_minor(unsigned minor) 1029static struct cardstate *gigaset_get_cs_by_minor(unsigned minor)
1056{ 1030{
1057 unsigned long flags; 1031 unsigned long flags;
diff --git a/drivers/isdn/gigaset/gigaset.h b/drivers/isdn/gigaset/gigaset.h
index a69512fb1195..6dd360734cfd 100644
--- a/drivers/isdn/gigaset/gigaset.h
+++ b/drivers/isdn/gigaset/gigaset.h
@@ -70,7 +70,6 @@ enum debuglevel {
70 DEBUG_STREAM_DUMP = 0x00080, /* application data stream content */ 70 DEBUG_STREAM_DUMP = 0x00080, /* application data stream content */
71 DEBUG_LLDATA = 0x00100, /* sent/received LL data */ 71 DEBUG_LLDATA = 0x00100, /* sent/received LL data */
72 DEBUG_EVENT = 0x00200, /* event processing */ 72 DEBUG_EVENT = 0x00200, /* event processing */
73 DEBUG_DRIVER = 0x00400, /* driver structure */
74 DEBUG_HDLC = 0x00800, /* M10x HDLC processing */ 73 DEBUG_HDLC = 0x00800, /* M10x HDLC processing */
75 DEBUG_CHANNEL = 0x01000, /* channel allocation/deallocation */ 74 DEBUG_CHANNEL = 0x01000, /* channel allocation/deallocation */
76 DEBUG_TRANSCMD = 0x02000, /* AT-COMMANDS+RESPONSES */ 75 DEBUG_TRANSCMD = 0x02000, /* AT-COMMANDS+RESPONSES */
@@ -727,7 +726,7 @@ struct gigaset_driver *gigaset_initdriver(unsigned minor, unsigned minors,
727 726
728/* Deallocate driver structure. */ 727/* Deallocate driver structure. */
729void gigaset_freedriver(struct gigaset_driver *drv); 728void gigaset_freedriver(struct gigaset_driver *drv);
730void gigaset_debugdrivers(void); 729
731struct cardstate *gigaset_get_cs_by_tty(struct tty_struct *tty); 730struct cardstate *gigaset_get_cs_by_tty(struct tty_struct *tty);
732struct cardstate *gigaset_get_cs_by_id(int id); 731struct cardstate *gigaset_get_cs_by_id(int id);
733void gigaset_blockdriver(struct gigaset_driver *drv); 732void gigaset_blockdriver(struct gigaset_driver *drv);
diff --git a/drivers/isdn/gigaset/i4l.c b/drivers/isdn/gigaset/i4l.c
index 34bca37d65b9..9bec8b969964 100644
--- a/drivers/isdn/gigaset/i4l.c
+++ b/drivers/isdn/gigaset/i4l.c
@@ -201,8 +201,6 @@ static int command_from_LL(isdn_ctrl *cntrl)
201 int i; 201 int i;
202 size_t l; 202 size_t l;
203 203
204 gigaset_debugdrivers();
205
206 gig_dbg(DEBUG_CMD, "driver: %d, command: %d, arg: 0x%lx", 204 gig_dbg(DEBUG_CMD, "driver: %d, command: %d, arg: 0x%lx",
207 cntrl->driver, cntrl->command, cntrl->arg); 205 cntrl->driver, cntrl->command, cntrl->arg);
208 206
diff --git a/drivers/isdn/gigaset/isocdata.c b/drivers/isdn/gigaset/isocdata.c
index 2dfd346fc889..f39ccdf87a17 100644
--- a/drivers/isdn/gigaset/isocdata.c
+++ b/drivers/isdn/gigaset/isocdata.c
@@ -842,13 +842,14 @@ static inline void trans_receive(unsigned char *src, unsigned count,
842 842
843 if (unlikely(bcs->ignore)) { 843 if (unlikely(bcs->ignore)) {
844 bcs->ignore--; 844 bcs->ignore--;
845 hdlc_flush(bcs);
846 return; 845 return;
847 } 846 }
848 skb = bcs->rx_skb; 847 skb = bcs->rx_skb;
849 if (skb == NULL) 848 if (skb == NULL) {
850 skb = gigaset_new_rx_skb(bcs); 849 skb = gigaset_new_rx_skb(bcs);
851 bcs->hw.bas->goodbytes += skb->len; 850 if (skb == NULL)
851 return;
852 }
852 dobytes = bcs->rx_bufsize - skb->len; 853 dobytes = bcs->rx_bufsize - skb->len;
853 while (count > 0) { 854 while (count > 0) {
854 dst = skb_put(skb, count < dobytes ? count : dobytes); 855 dst = skb_put(skb, count < dobytes ? count : dobytes);
@@ -860,6 +861,7 @@ static inline void trans_receive(unsigned char *src, unsigned count,
860 if (dobytes == 0) { 861 if (dobytes == 0) {
861 dump_bytes(DEBUG_STREAM_DUMP, 862 dump_bytes(DEBUG_STREAM_DUMP,
862 "rcv data", skb->data, skb->len); 863 "rcv data", skb->data, skb->len);
864 bcs->hw.bas->goodbytes += skb->len;
863 gigaset_skb_rcvd(bcs, skb); 865 gigaset_skb_rcvd(bcs, skb);
864 skb = gigaset_new_rx_skb(bcs); 866 skb = gigaset_new_rx_skb(bcs);
865 if (skb == NULL) 867 if (skb == NULL)
diff --git a/drivers/isdn/hardware/eicon/debug.c b/drivers/isdn/hardware/eicon/debug.c
index 33ce89eed65b..362640120886 100644
--- a/drivers/isdn/hardware/eicon/debug.c
+++ b/drivers/isdn/hardware/eicon/debug.c
@@ -862,7 +862,7 @@ void diva_mnt_add_xdi_adapter (const DESCRIPTOR* d) {
862 diva_os_spin_lock_magic_t old_irql, old_irql1; 862 diva_os_spin_lock_magic_t old_irql, old_irql1;
863 dword sec, usec, logical, serial, org_mask; 863 dword sec, usec, logical, serial, org_mask;
864 int id, best_id = 0, free_id = -1; 864 int id, best_id = 0, free_id = -1;
865 char tmp[256]; 865 char tmp[128];
866 diva_dbg_entry_head_t* pmsg = NULL; 866 diva_dbg_entry_head_t* pmsg = NULL;
867 int len; 867 int len;
868 word size; 868 word size;
diff --git a/drivers/isdn/hardware/eicon/debuglib.h b/drivers/isdn/hardware/eicon/debuglib.h
index 8ea587783e14..02eed6b4354c 100644
--- a/drivers/isdn/hardware/eicon/debuglib.h
+++ b/drivers/isdn/hardware/eicon/debuglib.h
@@ -249,7 +249,7 @@ typedef struct _DbgHandle_
249 } regTime ; /* timestamp for registration */ 249 } regTime ; /* timestamp for registration */
250 void *pIrp ; /* ptr to pending i/o request */ 250 void *pIrp ; /* ptr to pending i/o request */
251 unsigned long dbgMask ; /* current debug mask */ 251 unsigned long dbgMask ; /* current debug mask */
252 char drvName[16] ; /* ASCII name of registered driver */ 252 char drvName[128] ; /* ASCII name of registered driver */
253 char drvTag[64] ; /* revision string */ 253 char drvTag[64] ; /* revision string */
254 DbgEnd dbg_end ; /* function for debug closing */ 254 DbgEnd dbg_end ; /* function for debug closing */
255 DbgLog dbg_prt ; /* function for debug appending */ 255 DbgLog dbg_prt ; /* function for debug appending */
diff --git a/drivers/isdn/i4l/isdn_tty.c b/drivers/isdn/i4l/isdn_tty.c
index f013ee15327c..c463162843ba 100644
--- a/drivers/isdn/i4l/isdn_tty.c
+++ b/drivers/isdn/i4l/isdn_tty.c
@@ -14,7 +14,7 @@
14#include <linux/isdn.h> 14#include <linux/isdn.h>
15#include <linux/slab.h> 15#include <linux/slab.h>
16#include <linux/delay.h> 16#include <linux/delay.h>
17#include <linux/smp_lock.h> 17#include <linux/mutex.h>
18#include "isdn_common.h" 18#include "isdn_common.h"
19#include "isdn_tty.h" 19#include "isdn_tty.h"
20#ifdef CONFIG_ISDN_AUDIO 20#ifdef CONFIG_ISDN_AUDIO
@@ -28,6 +28,7 @@
28 28
29/* Prototypes */ 29/* Prototypes */
30 30
31static DEFINE_MUTEX(modem_info_mutex);
31static int isdn_tty_edit_at(const char *, int, modem_info *); 32static int isdn_tty_edit_at(const char *, int, modem_info *);
32static void isdn_tty_check_esc(const u_char *, u_char, int, int *, u_long *); 33static void isdn_tty_check_esc(const u_char *, u_char, int, int *, u_long *);
33static void isdn_tty_modem_reset_regs(modem_info *, int); 34static void isdn_tty_modem_reset_regs(modem_info *, int);
@@ -1354,14 +1355,14 @@ isdn_tty_tiocmget(struct tty_struct *tty, struct file *file)
1354 if (tty->flags & (1 << TTY_IO_ERROR)) 1355 if (tty->flags & (1 << TTY_IO_ERROR))
1355 return -EIO; 1356 return -EIO;
1356 1357
1357 lock_kernel(); 1358 mutex_lock(&modem_info_mutex);
1358#ifdef ISDN_DEBUG_MODEM_IOCTL 1359#ifdef ISDN_DEBUG_MODEM_IOCTL
1359 printk(KERN_DEBUG "ttyI%d ioctl TIOCMGET\n", info->line); 1360 printk(KERN_DEBUG "ttyI%d ioctl TIOCMGET\n", info->line);
1360#endif 1361#endif
1361 1362
1362 control = info->mcr; 1363 control = info->mcr;
1363 status = info->msr; 1364 status = info->msr;
1364 unlock_kernel(); 1365 mutex_unlock(&modem_info_mutex);
1365 return ((control & UART_MCR_RTS) ? TIOCM_RTS : 0) 1366 return ((control & UART_MCR_RTS) ? TIOCM_RTS : 0)
1366 | ((control & UART_MCR_DTR) ? TIOCM_DTR : 0) 1367 | ((control & UART_MCR_DTR) ? TIOCM_DTR : 0)
1367 | ((status & UART_MSR_DCD) ? TIOCM_CAR : 0) 1368 | ((status & UART_MSR_DCD) ? TIOCM_CAR : 0)
@@ -1385,7 +1386,7 @@ isdn_tty_tiocmset(struct tty_struct *tty, struct file *file,
1385 printk(KERN_DEBUG "ttyI%d ioctl TIOCMxxx: %x %x\n", info->line, set, clear); 1386 printk(KERN_DEBUG "ttyI%d ioctl TIOCMxxx: %x %x\n", info->line, set, clear);
1386#endif 1387#endif
1387 1388
1388 lock_kernel(); 1389 mutex_lock(&modem_info_mutex);
1389 if (set & TIOCM_RTS) 1390 if (set & TIOCM_RTS)
1390 info->mcr |= UART_MCR_RTS; 1391 info->mcr |= UART_MCR_RTS;
1391 if (set & TIOCM_DTR) { 1392 if (set & TIOCM_DTR) {
@@ -1407,7 +1408,7 @@ isdn_tty_tiocmset(struct tty_struct *tty, struct file *file,
1407 isdn_tty_modem_hup(info, 1); 1408 isdn_tty_modem_hup(info, 1);
1408 } 1409 }
1409 } 1410 }
1410 unlock_kernel(); 1411 mutex_unlock(&modem_info_mutex);
1411 return 0; 1412 return 0;
1412} 1413}
1413 1414
diff --git a/drivers/isdn/mISDN/stack.c b/drivers/isdn/mISDN/stack.c
index b159bd59e64e..a5b632e67552 100644
--- a/drivers/isdn/mISDN/stack.c
+++ b/drivers/isdn/mISDN/stack.c
@@ -18,7 +18,6 @@
18#include <linux/slab.h> 18#include <linux/slab.h>
19#include <linux/mISDNif.h> 19#include <linux/mISDNif.h>
20#include <linux/kthread.h> 20#include <linux/kthread.h>
21#include <linux/smp_lock.h>
22#include "core.h" 21#include "core.h"
23 22
24static u_int *debug; 23static u_int *debug;
@@ -205,13 +204,7 @@ mISDNStackd(void *data)
205 struct mISDNstack *st = data; 204 struct mISDNstack *st = data;
206 int err = 0; 205 int err = 0;
207 206
208#ifdef CONFIG_SMP
209 lock_kernel();
210#endif
211 sigfillset(&current->blocked); 207 sigfillset(&current->blocked);
212#ifdef CONFIG_SMP
213 unlock_kernel();
214#endif
215 if (*debug & DEBUG_MSG_THREAD) 208 if (*debug & DEBUG_MSG_THREAD)
216 printk(KERN_DEBUG "mISDNStackd %s started\n", 209 printk(KERN_DEBUG "mISDNStackd %s started\n",
217 dev_name(&st->dev->dev)); 210 dev_name(&st->dev->dev));
diff --git a/drivers/isdn/pcbit/edss1.c b/drivers/isdn/pcbit/edss1.c
index d5920ae22d73..80c9c16fd5ef 100644
--- a/drivers/isdn/pcbit/edss1.c
+++ b/drivers/isdn/pcbit/edss1.c
@@ -33,7 +33,7 @@
33#include "callbacks.h" 33#include "callbacks.h"
34 34
35 35
36char * isdn_state_table[] = { 36const char * const isdn_state_table[] = {
37 "Closed", 37 "Closed",
38 "Call initiated", 38 "Call initiated",
39 "Overlap sending", 39 "Overlap sending",
diff --git a/drivers/isdn/pcbit/edss1.h b/drivers/isdn/pcbit/edss1.h
index 0b64f97015d8..39f8346e28c5 100644
--- a/drivers/isdn/pcbit/edss1.h
+++ b/drivers/isdn/pcbit/edss1.h
@@ -90,7 +90,7 @@ struct fsm_timer_entry {
90 unsigned long timeout; /* in seconds */ 90 unsigned long timeout; /* in seconds */
91}; 91};
92 92
93extern char * isdn_state_table[]; 93extern const char * const isdn_state_table[];
94 94
95void pcbit_fsm_event(struct pcbit_dev *, struct pcbit_chan *, 95void pcbit_fsm_event(struct pcbit_dev *, struct pcbit_chan *,
96 unsigned short event, struct callb_data *); 96 unsigned short event, struct callb_data *);
diff --git a/drivers/isdn/sc/interrupt.c b/drivers/isdn/sc/interrupt.c
index 485be8b1e1b3..f0225bc0f267 100644
--- a/drivers/isdn/sc/interrupt.c
+++ b/drivers/isdn/sc/interrupt.c
@@ -112,11 +112,19 @@ irqreturn_t interrupt_handler(int dummy, void *card_inst)
112 } 112 }
113 else if(callid>=0x0000 && callid<=0x7FFF) 113 else if(callid>=0x0000 && callid<=0x7FFF)
114 { 114 {
115 int len;
116
115 pr_debug("%s: Got Incoming Call\n", 117 pr_debug("%s: Got Incoming Call\n",
116 sc_adapter[card]->devicename); 118 sc_adapter[card]->devicename);
117 strcpy(setup.phone,&(rcvmsg.msg_data.byte_array[4])); 119 len = strlcpy(setup.phone, &(rcvmsg.msg_data.byte_array[4]),
118 strcpy(setup.eazmsn, 120 sizeof(setup.phone));
119 sc_adapter[card]->channel[rcvmsg.phy_link_no-1].dn); 121 if (len >= sizeof(setup.phone))
122 continue;
123 len = strlcpy(setup.eazmsn,
124 sc_adapter[card]->channel[rcvmsg.phy_link_no - 1].dn,
125 sizeof(setup.eazmsn));
126 if (len >= sizeof(setup.eazmsn))
127 continue;
120 setup.si1 = 7; 128 setup.si1 = 7;
121 setup.si2 = 0; 129 setup.si2 = 0;
122 setup.plan = 0; 130 setup.plan = 0;
@@ -176,7 +184,9 @@ irqreturn_t interrupt_handler(int dummy, void *card_inst)
176 * Handle a GetMyNumber Rsp 184 * Handle a GetMyNumber Rsp
177 */ 185 */
178 if (IS_CE_MESSAGE(rcvmsg,Call,0,GetMyNumber)){ 186 if (IS_CE_MESSAGE(rcvmsg,Call,0,GetMyNumber)){
179 strcpy(sc_adapter[card]->channel[rcvmsg.phy_link_no-1].dn,rcvmsg.msg_data.byte_array); 187 strlcpy(sc_adapter[card]->channel[rcvmsg.phy_link_no - 1].dn,
188 rcvmsg.msg_data.byte_array,
189 sizeof(rcvmsg.msg_data.byte_array));
180 continue; 190 continue;
181 } 191 }
182 192
diff --git a/drivers/leds/leds-ns2.c b/drivers/leds/leds-ns2.c
index 74dce4ba0262..350eb34f049c 100644
--- a/drivers/leds/leds-ns2.c
+++ b/drivers/leds/leds-ns2.c
@@ -81,7 +81,7 @@ static int ns2_led_get_mode(struct ns2_led_data *led_dat,
81 int cmd_level; 81 int cmd_level;
82 int slow_level; 82 int slow_level;
83 83
84 read_lock(&led_dat->rw_lock); 84 read_lock_irq(&led_dat->rw_lock);
85 85
86 cmd_level = gpio_get_value(led_dat->cmd); 86 cmd_level = gpio_get_value(led_dat->cmd);
87 slow_level = gpio_get_value(led_dat->slow); 87 slow_level = gpio_get_value(led_dat->slow);
@@ -95,7 +95,7 @@ static int ns2_led_get_mode(struct ns2_led_data *led_dat,
95 } 95 }
96 } 96 }
97 97
98 read_unlock(&led_dat->rw_lock); 98 read_unlock_irq(&led_dat->rw_lock);
99 99
100 return ret; 100 return ret;
101} 101}
@@ -104,8 +104,9 @@ static void ns2_led_set_mode(struct ns2_led_data *led_dat,
104 enum ns2_led_modes mode) 104 enum ns2_led_modes mode)
105{ 105{
106 int i; 106 int i;
107 unsigned long flags;
107 108
108 write_lock(&led_dat->rw_lock); 109 write_lock_irqsave(&led_dat->rw_lock, flags);
109 110
110 for (i = 0; i < ARRAY_SIZE(ns2_led_modval); i++) { 111 for (i = 0; i < ARRAY_SIZE(ns2_led_modval); i++) {
111 if (mode == ns2_led_modval[i].mode) { 112 if (mode == ns2_led_modval[i].mode) {
@@ -116,7 +117,7 @@ static void ns2_led_set_mode(struct ns2_led_data *led_dat,
116 } 117 }
117 } 118 }
118 119
119 write_unlock(&led_dat->rw_lock); 120 write_unlock_irqrestore(&led_dat->rw_lock, flags);
120} 121}
121 122
122static void ns2_led_set(struct led_classdev *led_cdev, 123static void ns2_led_set(struct led_classdev *led_cdev,
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 43cf9cc9c1df..f20d13e717d5 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -1643,7 +1643,9 @@ static void super_1_sync(mddev_t *mddev, mdk_rdev_t *rdev)
1643 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1; 1643 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
1644 if (rdev->sb_size & bmask) 1644 if (rdev->sb_size & bmask)
1645 rdev->sb_size = (rdev->sb_size | bmask) + 1; 1645 rdev->sb_size = (rdev->sb_size | bmask) + 1;
1646 } 1646 } else
1647 max_dev = le32_to_cpu(sb->max_dev);
1648
1647 for (i=0; i<max_dev;i++) 1649 for (i=0; i<max_dev;i++)
1648 sb->dev_roles[i] = cpu_to_le16(0xfffe); 1650 sb->dev_roles[i] = cpu_to_le16(0xfffe);
1649 1651
@@ -7069,7 +7071,7 @@ void md_check_recovery(mddev_t *mddev)
7069 if (mddev->ro && !test_bit(MD_RECOVERY_NEEDED, &mddev->recovery)) 7071 if (mddev->ro && !test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))
7070 return; 7072 return;
7071 if ( ! ( 7073 if ( ! (
7072 (mddev->flags && !mddev->external) || 7074 (mddev->flags & ~ (1<<MD_CHANGE_PENDING)) ||
7073 test_bit(MD_RECOVERY_NEEDED, &mddev->recovery) || 7075 test_bit(MD_RECOVERY_NEEDED, &mddev->recovery) ||
7074 test_bit(MD_RECOVERY_DONE, &mddev->recovery) || 7076 test_bit(MD_RECOVERY_DONE, &mddev->recovery) ||
7075 (mddev->external == 0 && mddev->safemode == 1) || 7077 (mddev->external == 0 && mddev->safemode == 1) ||
diff --git a/drivers/mfd/max8925-core.c b/drivers/mfd/max8925-core.c
index 04028a9ee082..428377a5a6f5 100644
--- a/drivers/mfd/max8925-core.c
+++ b/drivers/mfd/max8925-core.c
@@ -429,24 +429,25 @@ static void max8925_irq_sync_unlock(unsigned int irq)
429 irq_tsc = cache_tsc; 429 irq_tsc = cache_tsc;
430 for (i = 0; i < ARRAY_SIZE(max8925_irqs); i++) { 430 for (i = 0; i < ARRAY_SIZE(max8925_irqs); i++) {
431 irq_data = &max8925_irqs[i]; 431 irq_data = &max8925_irqs[i];
432 /* 1 -- disable, 0 -- enable */
432 switch (irq_data->mask_reg) { 433 switch (irq_data->mask_reg) {
433 case MAX8925_CHG_IRQ1_MASK: 434 case MAX8925_CHG_IRQ1_MASK:
434 irq_chg[0] &= irq_data->enable; 435 irq_chg[0] &= ~irq_data->enable;
435 break; 436 break;
436 case MAX8925_CHG_IRQ2_MASK: 437 case MAX8925_CHG_IRQ2_MASK:
437 irq_chg[1] &= irq_data->enable; 438 irq_chg[1] &= ~irq_data->enable;
438 break; 439 break;
439 case MAX8925_ON_OFF_IRQ1_MASK: 440 case MAX8925_ON_OFF_IRQ1_MASK:
440 irq_on[0] &= irq_data->enable; 441 irq_on[0] &= ~irq_data->enable;
441 break; 442 break;
442 case MAX8925_ON_OFF_IRQ2_MASK: 443 case MAX8925_ON_OFF_IRQ2_MASK:
443 irq_on[1] &= irq_data->enable; 444 irq_on[1] &= ~irq_data->enable;
444 break; 445 break;
445 case MAX8925_RTC_IRQ_MASK: 446 case MAX8925_RTC_IRQ_MASK:
446 irq_rtc &= irq_data->enable; 447 irq_rtc &= ~irq_data->enable;
447 break; 448 break;
448 case MAX8925_TSC_IRQ_MASK: 449 case MAX8925_TSC_IRQ_MASK:
449 irq_tsc &= irq_data->enable; 450 irq_tsc &= ~irq_data->enable;
450 break; 451 break;
451 default: 452 default:
452 dev_err(chip->dev, "wrong IRQ\n"); 453 dev_err(chip->dev, "wrong IRQ\n");
diff --git a/drivers/mfd/wm831x-irq.c b/drivers/mfd/wm831x-irq.c
index 7dabe4dbd373..294183b6260b 100644
--- a/drivers/mfd/wm831x-irq.c
+++ b/drivers/mfd/wm831x-irq.c
@@ -394,8 +394,13 @@ static int wm831x_irq_set_type(unsigned int irq, unsigned int type)
394 394
395 irq = irq - wm831x->irq_base; 395 irq = irq - wm831x->irq_base;
396 396
397 if (irq < WM831X_IRQ_GPIO_1 || irq > WM831X_IRQ_GPIO_11) 397 if (irq < WM831X_IRQ_GPIO_1 || irq > WM831X_IRQ_GPIO_11) {
398 return -EINVAL; 398 /* Ignore internal-only IRQs */
399 if (irq >= 0 && irq < WM831X_NUM_IRQS)
400 return 0;
401 else
402 return -EINVAL;
403 }
399 404
400 switch (type) { 405 switch (type) {
401 case IRQ_TYPE_EDGE_BOTH: 406 case IRQ_TYPE_EDGE_BOTH:
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index 0b591b658243..b74331260744 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -368,7 +368,7 @@ config VMWARE_BALLOON
368 If unsure, say N. 368 If unsure, say N.
369 369
370 To compile this driver as a module, choose M here: the 370 To compile this driver as a module, choose M here: the
371 module will be called vmware_balloon. 371 module will be called vmw_balloon.
372 372
373config ARM_CHARLCD 373config ARM_CHARLCD
374 bool "ARM Ltd. Character LCD Driver" 374 bool "ARM Ltd. Character LCD Driver"
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
index 255a80dc9d73..42eab95cde2a 100644
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
@@ -33,5 +33,5 @@ obj-$(CONFIG_IWMC3200TOP) += iwmc3200top/
33obj-$(CONFIG_HMC6352) += hmc6352.o 33obj-$(CONFIG_HMC6352) += hmc6352.o
34obj-y += eeprom/ 34obj-y += eeprom/
35obj-y += cb710/ 35obj-y += cb710/
36obj-$(CONFIG_VMWARE_BALLOON) += vmware_balloon.o 36obj-$(CONFIG_VMWARE_BALLOON) += vmw_balloon.o
37obj-$(CONFIG_ARM_CHARLCD) += arm-charlcd.o 37obj-$(CONFIG_ARM_CHARLCD) += arm-charlcd.o
diff --git a/drivers/misc/vmware_balloon.c b/drivers/misc/vmw_balloon.c
index 2a1e804a71aa..2a1e804a71aa 100644
--- a/drivers/misc/vmware_balloon.c
+++ b/drivers/misc/vmw_balloon.c
diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c
index bd2755e8d9a3..f332c52968b7 100644
--- a/drivers/mmc/core/sdio.c
+++ b/drivers/mmc/core/sdio.c
@@ -362,9 +362,8 @@ static int mmc_sdio_init_card(struct mmc_host *host, u32 ocr,
362 goto err; 362 goto err;
363 } 363 }
364 364
365 err = mmc_sd_get_cid(host, host->ocr & ocr, card->raw_cid); 365 if (ocr & R4_MEMORY_PRESENT
366 366 && mmc_sd_get_cid(host, host->ocr & ocr, card->raw_cid) == 0) {
367 if (!err) {
368 card->type = MMC_TYPE_SD_COMBO; 367 card->type = MMC_TYPE_SD_COMBO;
369 368
370 if (oldcard && (oldcard->type != MMC_TYPE_SD_COMBO || 369 if (oldcard && (oldcard->type != MMC_TYPE_SD_COMBO ||
diff --git a/drivers/mmc/host/at91_mci.c b/drivers/mmc/host/at91_mci.c
index 5f3a599ead07..87226cd202a5 100644
--- a/drivers/mmc/host/at91_mci.c
+++ b/drivers/mmc/host/at91_mci.c
@@ -66,6 +66,7 @@
66#include <linux/clk.h> 66#include <linux/clk.h>
67#include <linux/atmel_pdc.h> 67#include <linux/atmel_pdc.h>
68#include <linux/gfp.h> 68#include <linux/gfp.h>
69#include <linux/highmem.h>
69 70
70#include <linux/mmc/host.h> 71#include <linux/mmc/host.h>
71 72
diff --git a/drivers/mmc/host/imxmmc.c b/drivers/mmc/host/imxmmc.c
index 9a68ff4353a2..5a950b16d9e6 100644
--- a/drivers/mmc/host/imxmmc.c
+++ b/drivers/mmc/host/imxmmc.c
@@ -148,11 +148,12 @@ static int imxmci_start_clock(struct imxmci_host *host)
148 148
149 while (delay--) { 149 while (delay--) {
150 reg = readw(host->base + MMC_REG_STATUS); 150 reg = readw(host->base + MMC_REG_STATUS);
151 if (reg & STATUS_CARD_BUS_CLK_RUN) 151 if (reg & STATUS_CARD_BUS_CLK_RUN) {
152 /* Check twice before cut */ 152 /* Check twice before cut */
153 reg = readw(host->base + MMC_REG_STATUS); 153 reg = readw(host->base + MMC_REG_STATUS);
154 if (reg & STATUS_CARD_BUS_CLK_RUN) 154 if (reg & STATUS_CARD_BUS_CLK_RUN)
155 return 0; 155 return 0;
156 }
156 157
157 if (test_bit(IMXMCI_PEND_STARTED_b, &host->pending_events)) 158 if (test_bit(IMXMCI_PEND_STARTED_b, &host->pending_events))
158 return 0; 159 return 0;
diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
index 4a8776f8afdd..4526d2791f29 100644
--- a/drivers/mmc/host/omap_hsmmc.c
+++ b/drivers/mmc/host/omap_hsmmc.c
@@ -2305,7 +2305,6 @@ static int omap_hsmmc_suspend(struct device *dev)
2305 int ret = 0; 2305 int ret = 0;
2306 struct platform_device *pdev = to_platform_device(dev); 2306 struct platform_device *pdev = to_platform_device(dev);
2307 struct omap_hsmmc_host *host = platform_get_drvdata(pdev); 2307 struct omap_hsmmc_host *host = platform_get_drvdata(pdev);
2308 pm_message_t state = PMSG_SUSPEND; /* unused by MMC core */
2309 2308
2310 if (host && host->suspended) 2309 if (host && host->suspended)
2311 return 0; 2310 return 0;
@@ -2324,8 +2323,8 @@ static int omap_hsmmc_suspend(struct device *dev)
2324 } 2323 }
2325 } 2324 }
2326 cancel_work_sync(&host->mmc_carddetect_work); 2325 cancel_work_sync(&host->mmc_carddetect_work);
2327 mmc_host_enable(host->mmc);
2328 ret = mmc_suspend_host(host->mmc); 2326 ret = mmc_suspend_host(host->mmc);
2327 mmc_host_enable(host->mmc);
2329 if (ret == 0) { 2328 if (ret == 0) {
2330 omap_hsmmc_disable_irq(host); 2329 omap_hsmmc_disable_irq(host);
2331 OMAP_HSMMC_WRITE(host->base, HCTL, 2330 OMAP_HSMMC_WRITE(host->base, HCTL,
diff --git a/drivers/mmc/host/s3cmci.c b/drivers/mmc/host/s3cmci.c
index 2e16e0a90a5e..976330de379e 100644
--- a/drivers/mmc/host/s3cmci.c
+++ b/drivers/mmc/host/s3cmci.c
@@ -1600,7 +1600,7 @@ static int __devinit s3cmci_probe(struct platform_device *pdev)
1600 host->pio_active = XFER_NONE; 1600 host->pio_active = XFER_NONE;
1601 1601
1602#ifdef CONFIG_MMC_S3C_PIODMA 1602#ifdef CONFIG_MMC_S3C_PIODMA
1603 host->dodma = host->pdata->dma; 1603 host->dodma = host->pdata->use_dma;
1604#endif 1604#endif
1605 1605
1606 host->mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1606 host->mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
diff --git a/drivers/mmc/host/sdhci-s3c.c b/drivers/mmc/host/sdhci-s3c.c
index 71ad4163b95e..aacb862ecc8a 100644
--- a/drivers/mmc/host/sdhci-s3c.c
+++ b/drivers/mmc/host/sdhci-s3c.c
@@ -241,8 +241,10 @@ static struct sdhci_ops sdhci_s3c_ops = {
241static void sdhci_s3c_notify_change(struct platform_device *dev, int state) 241static void sdhci_s3c_notify_change(struct platform_device *dev, int state)
242{ 242{
243 struct sdhci_host *host = platform_get_drvdata(dev); 243 struct sdhci_host *host = platform_get_drvdata(dev);
244 unsigned long flags;
245
244 if (host) { 246 if (host) {
245 spin_lock(&host->lock); 247 spin_lock_irqsave(&host->lock, flags);
246 if (state) { 248 if (state) {
247 dev_dbg(&dev->dev, "card inserted.\n"); 249 dev_dbg(&dev->dev, "card inserted.\n");
248 host->flags &= ~SDHCI_DEVICE_DEAD; 250 host->flags &= ~SDHCI_DEVICE_DEAD;
@@ -253,7 +255,7 @@ static void sdhci_s3c_notify_change(struct platform_device *dev, int state)
253 host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION; 255 host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION;
254 } 256 }
255 tasklet_schedule(&host->card_tasklet); 257 tasklet_schedule(&host->card_tasklet);
256 spin_unlock(&host->lock); 258 spin_unlock_irqrestore(&host->lock, flags);
257 } 259 }
258} 260}
259 261
@@ -481,8 +483,10 @@ static int __devexit sdhci_s3c_remove(struct platform_device *pdev)
481 sdhci_remove_host(host, 1); 483 sdhci_remove_host(host, 1);
482 484
483 for (ptr = 0; ptr < 3; ptr++) { 485 for (ptr = 0; ptr < 3; ptr++) {
484 clk_disable(sc->clk_bus[ptr]); 486 if (sc->clk_bus[ptr]) {
485 clk_put(sc->clk_bus[ptr]); 487 clk_disable(sc->clk_bus[ptr]);
488 clk_put(sc->clk_bus[ptr]);
489 }
486 } 490 }
487 clk_disable(sc->clk_io); 491 clk_disable(sc->clk_io);
488 clk_put(sc->clk_io); 492 clk_put(sc->clk_io);
diff --git a/drivers/mmc/host/tmio_mmc.c b/drivers/mmc/host/tmio_mmc.c
index ee7d0a5a51c4..69d98e3bf6ab 100644
--- a/drivers/mmc/host/tmio_mmc.c
+++ b/drivers/mmc/host/tmio_mmc.c
@@ -164,6 +164,7 @@ tmio_mmc_start_command(struct tmio_mmc_host *host, struct mmc_command *cmd)
164static void tmio_mmc_pio_irq(struct tmio_mmc_host *host) 164static void tmio_mmc_pio_irq(struct tmio_mmc_host *host)
165{ 165{
166 struct mmc_data *data = host->data; 166 struct mmc_data *data = host->data;
167 void *sg_virt;
167 unsigned short *buf; 168 unsigned short *buf;
168 unsigned int count; 169 unsigned int count;
169 unsigned long flags; 170 unsigned long flags;
@@ -173,8 +174,8 @@ static void tmio_mmc_pio_irq(struct tmio_mmc_host *host)
173 return; 174 return;
174 } 175 }
175 176
176 buf = (unsigned short *)(tmio_mmc_kmap_atomic(host, &flags) + 177 sg_virt = tmio_mmc_kmap_atomic(host->sg_ptr, &flags);
177 host->sg_off); 178 buf = (unsigned short *)(sg_virt + host->sg_off);
178 179
179 count = host->sg_ptr->length - host->sg_off; 180 count = host->sg_ptr->length - host->sg_off;
180 if (count > data->blksz) 181 if (count > data->blksz)
@@ -191,7 +192,7 @@ static void tmio_mmc_pio_irq(struct tmio_mmc_host *host)
191 192
192 host->sg_off += count; 193 host->sg_off += count;
193 194
194 tmio_mmc_kunmap_atomic(host, &flags); 195 tmio_mmc_kunmap_atomic(sg_virt, &flags);
195 196
196 if (host->sg_off == host->sg_ptr->length) 197 if (host->sg_off == host->sg_ptr->length)
197 tmio_mmc_next_sg(host); 198 tmio_mmc_next_sg(host);
diff --git a/drivers/mmc/host/tmio_mmc.h b/drivers/mmc/host/tmio_mmc.h
index 64f7d5dfc106..0fedc78e3ea5 100644
--- a/drivers/mmc/host/tmio_mmc.h
+++ b/drivers/mmc/host/tmio_mmc.h
@@ -82,10 +82,7 @@
82 82
83#define ack_mmc_irqs(host, i) \ 83#define ack_mmc_irqs(host, i) \
84 do { \ 84 do { \
85 u32 mask;\ 85 sd_ctrl_write32((host), CTL_STATUS, ~(i)); \
86 mask = sd_ctrl_read32((host), CTL_STATUS); \
87 mask &= ~((i) & TMIO_MASK_IRQ); \
88 sd_ctrl_write32((host), CTL_STATUS, mask); \
89 } while (0) 86 } while (0)
90 87
91 88
@@ -177,19 +174,17 @@ static inline int tmio_mmc_next_sg(struct tmio_mmc_host *host)
177 return --host->sg_len; 174 return --host->sg_len;
178} 175}
179 176
180static inline char *tmio_mmc_kmap_atomic(struct tmio_mmc_host *host, 177static inline char *tmio_mmc_kmap_atomic(struct scatterlist *sg,
181 unsigned long *flags) 178 unsigned long *flags)
182{ 179{
183 struct scatterlist *sg = host->sg_ptr;
184
185 local_irq_save(*flags); 180 local_irq_save(*flags);
186 return kmap_atomic(sg_page(sg), KM_BIO_SRC_IRQ) + sg->offset; 181 return kmap_atomic(sg_page(sg), KM_BIO_SRC_IRQ) + sg->offset;
187} 182}
188 183
189static inline void tmio_mmc_kunmap_atomic(struct tmio_mmc_host *host, 184static inline void tmio_mmc_kunmap_atomic(void *virt,
190 unsigned long *flags) 185 unsigned long *flags)
191{ 186{
192 kunmap_atomic(sg_page(host->sg_ptr), KM_BIO_SRC_IRQ); 187 kunmap_atomic(virt, KM_BIO_SRC_IRQ);
193 local_irq_restore(*flags); 188 local_irq_restore(*flags);
194} 189}
195 190
diff --git a/drivers/mtd/nand/bf5xx_nand.c b/drivers/mtd/nand/bf5xx_nand.c
index a382e3dd0a5d..6fbeefa3a766 100644
--- a/drivers/mtd/nand/bf5xx_nand.c
+++ b/drivers/mtd/nand/bf5xx_nand.c
@@ -682,7 +682,6 @@ static int __devinit bf5xx_nand_add_partition(struct bf5xx_nand_info *info)
682static int __devexit bf5xx_nand_remove(struct platform_device *pdev) 682static int __devexit bf5xx_nand_remove(struct platform_device *pdev)
683{ 683{
684 struct bf5xx_nand_info *info = to_nand_info(pdev); 684 struct bf5xx_nand_info *info = to_nand_info(pdev);
685 struct mtd_info *mtd = NULL;
686 685
687 platform_set_drvdata(pdev, NULL); 686 platform_set_drvdata(pdev, NULL);
688 687
@@ -690,11 +689,7 @@ static int __devexit bf5xx_nand_remove(struct platform_device *pdev)
690 * and their partitions, then go through freeing the 689 * and their partitions, then go through freeing the
691 * resources used 690 * resources used
692 */ 691 */
693 mtd = &info->mtd; 692 nand_release(&info->mtd);
694 if (mtd) {
695 nand_release(mtd);
696 kfree(mtd);
697 }
698 693
699 peripheral_free_list(bfin_nfc_pin_req); 694 peripheral_free_list(bfin_nfc_pin_req);
700 bf5xx_nand_dma_remove(info); 695 bf5xx_nand_dma_remove(info);
@@ -710,7 +705,7 @@ static int bf5xx_nand_scan(struct mtd_info *mtd)
710 struct nand_chip *chip = mtd->priv; 705 struct nand_chip *chip = mtd->priv;
711 int ret; 706 int ret;
712 707
713 ret = nand_scan_ident(mtd, 1); 708 ret = nand_scan_ident(mtd, 1, NULL);
714 if (ret) 709 if (ret)
715 return ret; 710 return ret;
716 711
diff --git a/drivers/mtd/nand/mxc_nand.c b/drivers/mtd/nand/mxc_nand.c
index fcf8ceb277d4..b2828e84d243 100644
--- a/drivers/mtd/nand/mxc_nand.c
+++ b/drivers/mtd/nand/mxc_nand.c
@@ -67,7 +67,9 @@
67#define NFC_V1_V2_CONFIG1_BIG (1 << 5) 67#define NFC_V1_V2_CONFIG1_BIG (1 << 5)
68#define NFC_V1_V2_CONFIG1_RST (1 << 6) 68#define NFC_V1_V2_CONFIG1_RST (1 << 6)
69#define NFC_V1_V2_CONFIG1_CE (1 << 7) 69#define NFC_V1_V2_CONFIG1_CE (1 << 7)
70#define NFC_V1_V2_CONFIG1_ONE_CYCLE (1 << 8) 70#define NFC_V2_CONFIG1_ONE_CYCLE (1 << 8)
71#define NFC_V2_CONFIG1_PPB(x) (((x) & 0x3) << 9)
72#define NFC_V2_CONFIG1_FP_INT (1 << 11)
71 73
72#define NFC_V1_V2_CONFIG2_INT (1 << 15) 74#define NFC_V1_V2_CONFIG2_INT (1 << 15)
73 75
@@ -402,16 +404,16 @@ static void send_read_id_v1_v2(struct mxc_nand_host *host)
402 /* Wait for operation to complete */ 404 /* Wait for operation to complete */
403 wait_op_done(host, true); 405 wait_op_done(host, true);
404 406
407 memcpy(host->data_buf, host->main_area0, 16);
408
405 if (this->options & NAND_BUSWIDTH_16) { 409 if (this->options & NAND_BUSWIDTH_16) {
406 void __iomem *main_buf = host->main_area0;
407 /* compress the ID info */ 410 /* compress the ID info */
408 writeb(readb(main_buf + 2), main_buf + 1); 411 host->data_buf[1] = host->data_buf[2];
409 writeb(readb(main_buf + 4), main_buf + 2); 412 host->data_buf[2] = host->data_buf[4];
410 writeb(readb(main_buf + 6), main_buf + 3); 413 host->data_buf[3] = host->data_buf[6];
411 writeb(readb(main_buf + 8), main_buf + 4); 414 host->data_buf[4] = host->data_buf[8];
412 writeb(readb(main_buf + 10), main_buf + 5); 415 host->data_buf[5] = host->data_buf[10];
413 } 416 }
414 memcpy(host->data_buf, host->main_area0, 16);
415} 417}
416 418
417static uint16_t get_dev_status_v3(struct mxc_nand_host *host) 419static uint16_t get_dev_status_v3(struct mxc_nand_host *host)
@@ -729,27 +731,30 @@ static void preset_v1_v2(struct mtd_info *mtd)
729{ 731{
730 struct nand_chip *nand_chip = mtd->priv; 732 struct nand_chip *nand_chip = mtd->priv;
731 struct mxc_nand_host *host = nand_chip->priv; 733 struct mxc_nand_host *host = nand_chip->priv;
732 uint16_t tmp; 734 uint16_t config1 = 0;
733 735
734 /* enable interrupt, disable spare enable */ 736 if (nand_chip->ecc.mode == NAND_ECC_HW)
735 tmp = readw(NFC_V1_V2_CONFIG1); 737 config1 |= NFC_V1_V2_CONFIG1_ECC_EN;
736 tmp &= ~NFC_V1_V2_CONFIG1_INT_MSK; 738
737 tmp &= ~NFC_V1_V2_CONFIG1_SP_EN; 739 if (nfc_is_v21())
738 if (nand_chip->ecc.mode == NAND_ECC_HW) { 740 config1 |= NFC_V2_CONFIG1_FP_INT;
739 tmp |= NFC_V1_V2_CONFIG1_ECC_EN; 741
740 } else { 742 if (!cpu_is_mx21())
741 tmp &= ~NFC_V1_V2_CONFIG1_ECC_EN; 743 config1 |= NFC_V1_V2_CONFIG1_INT_MSK;
742 }
743 744
744 if (nfc_is_v21() && mtd->writesize) { 745 if (nfc_is_v21() && mtd->writesize) {
746 uint16_t pages_per_block = mtd->erasesize / mtd->writesize;
747
745 host->eccsize = get_eccsize(mtd); 748 host->eccsize = get_eccsize(mtd);
746 if (host->eccsize == 4) 749 if (host->eccsize == 4)
747 tmp |= NFC_V2_CONFIG1_ECC_MODE_4; 750 config1 |= NFC_V2_CONFIG1_ECC_MODE_4;
751
752 config1 |= NFC_V2_CONFIG1_PPB(ffs(pages_per_block) - 6);
748 } else { 753 } else {
749 host->eccsize = 1; 754 host->eccsize = 1;
750 } 755 }
751 756
752 writew(tmp, NFC_V1_V2_CONFIG1); 757 writew(config1, NFC_V1_V2_CONFIG1);
753 /* preset operation */ 758 /* preset operation */
754 759
755 /* Unlock the internal RAM Buffer */ 760 /* Unlock the internal RAM Buffer */
diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c
index 133d51528f8d..513e0a76a4a7 100644
--- a/drivers/mtd/nand/omap2.c
+++ b/drivers/mtd/nand/omap2.c
@@ -413,7 +413,7 @@ static inline int omap_nand_dma_transfer(struct mtd_info *mtd, void *addr,
413 prefetch_status = gpmc_read_status(GPMC_PREFETCH_COUNT); 413 prefetch_status = gpmc_read_status(GPMC_PREFETCH_COUNT);
414 } while (prefetch_status); 414 } while (prefetch_status);
415 /* disable and stop the PFPW engine */ 415 /* disable and stop the PFPW engine */
416 gpmc_prefetch_reset(); 416 gpmc_prefetch_reset(info->gpmc_cs);
417 417
418 dma_unmap_single(&info->pdev->dev, dma_addr, len, dir); 418 dma_unmap_single(&info->pdev->dev, dma_addr, len, dir);
419 return 0; 419 return 0;
diff --git a/drivers/mtd/nand/pxa3xx_nand.c b/drivers/mtd/nand/pxa3xx_nand.c
index 4d89f3780207..4d01cda68844 100644
--- a/drivers/mtd/nand/pxa3xx_nand.c
+++ b/drivers/mtd/nand/pxa3xx_nand.c
@@ -1320,6 +1320,7 @@ static int pxa3xx_nand_probe(struct platform_device *pdev)
1320 goto fail_free_irq; 1320 goto fail_free_irq;
1321 } 1321 }
1322 1322
1323#ifdef CONFIG_MTD_PARTITIONS
1323 if (mtd_has_cmdlinepart()) { 1324 if (mtd_has_cmdlinepart()) {
1324 static const char *probes[] = { "cmdlinepart", NULL }; 1325 static const char *probes[] = { "cmdlinepart", NULL };
1325 struct mtd_partition *parts; 1326 struct mtd_partition *parts;
@@ -1332,6 +1333,9 @@ static int pxa3xx_nand_probe(struct platform_device *pdev)
1332 } 1333 }
1333 1334
1334 return add_mtd_partitions(mtd, pdata->parts, pdata->nr_parts); 1335 return add_mtd_partitions(mtd, pdata->parts, pdata->nr_parts);
1336#else
1337 return 0;
1338#endif
1335 1339
1336fail_free_irq: 1340fail_free_irq:
1337 free_irq(irq, info); 1341 free_irq(irq, info);
@@ -1364,7 +1368,9 @@ static int pxa3xx_nand_remove(struct platform_device *pdev)
1364 platform_set_drvdata(pdev, NULL); 1368 platform_set_drvdata(pdev, NULL);
1365 1369
1366 del_mtd_device(mtd); 1370 del_mtd_device(mtd);
1371#ifdef CONFIG_MTD_PARTITIONS
1367 del_mtd_partitions(mtd); 1372 del_mtd_partitions(mtd);
1373#endif
1368 irq = platform_get_irq(pdev, 0); 1374 irq = platform_get_irq(pdev, 0);
1369 if (irq >= 0) 1375 if (irq >= 0)
1370 free_irq(irq, info); 1376 free_irq(irq, info);
diff --git a/drivers/mtd/onenand/samsung.c b/drivers/mtd/onenand/samsung.c
index cb443af3d45f..a460f1b748c2 100644
--- a/drivers/mtd/onenand/samsung.c
+++ b/drivers/mtd/onenand/samsung.c
@@ -554,14 +554,13 @@ static int s5pc110_dma_ops(void *dst, void *src, size_t count, int direction)
554 554
555 do { 555 do {
556 status = readl(base + S5PC110_DMA_TRANS_STATUS); 556 status = readl(base + S5PC110_DMA_TRANS_STATUS);
557 if (status & S5PC110_DMA_TRANS_STATUS_TE) {
558 writel(S5PC110_DMA_TRANS_CMD_TEC,
559 base + S5PC110_DMA_TRANS_CMD);
560 return -EIO;
561 }
557 } while (!(status & S5PC110_DMA_TRANS_STATUS_TD)); 562 } while (!(status & S5PC110_DMA_TRANS_STATUS_TD));
558 563
559 if (status & S5PC110_DMA_TRANS_STATUS_TE) {
560 writel(S5PC110_DMA_TRANS_CMD_TEC, base + S5PC110_DMA_TRANS_CMD);
561 writel(S5PC110_DMA_TRANS_CMD_TDC, base + S5PC110_DMA_TRANS_CMD);
562 return -EIO;
563 }
564
565 writel(S5PC110_DMA_TRANS_CMD_TDC, base + S5PC110_DMA_TRANS_CMD); 564 writel(S5PC110_DMA_TRANS_CMD_TDC, base + S5PC110_DMA_TRANS_CMD);
566 565
567 return 0; 566 return 0;
@@ -571,13 +570,12 @@ static int s5pc110_read_bufferram(struct mtd_info *mtd, int area,
571 unsigned char *buffer, int offset, size_t count) 570 unsigned char *buffer, int offset, size_t count)
572{ 571{
573 struct onenand_chip *this = mtd->priv; 572 struct onenand_chip *this = mtd->priv;
574 void __iomem *bufferram;
575 void __iomem *p; 573 void __iomem *p;
576 void *buf = (void *) buffer; 574 void *buf = (void *) buffer;
577 dma_addr_t dma_src, dma_dst; 575 dma_addr_t dma_src, dma_dst;
578 int err; 576 int err;
579 577
580 p = bufferram = this->base + area; 578 p = this->base + area;
581 if (ONENAND_CURRENT_BUFFERRAM(this)) { 579 if (ONENAND_CURRENT_BUFFERRAM(this)) {
582 if (area == ONENAND_DATARAM) 580 if (area == ONENAND_DATARAM)
583 p += this->writesize; 581 p += this->writesize;
@@ -621,7 +619,7 @@ static int s5pc110_read_bufferram(struct mtd_info *mtd, int area,
621normal: 619normal:
622 if (count != mtd->writesize) { 620 if (count != mtd->writesize) {
623 /* Copy the bufferram to memory to prevent unaligned access */ 621 /* Copy the bufferram to memory to prevent unaligned access */
624 memcpy(this->page_buf, bufferram, mtd->writesize); 622 memcpy(this->page_buf, p, mtd->writesize);
625 p = this->page_buf + offset; 623 p = this->page_buf + offset;
626 } 624 }
627 625
diff --git a/drivers/net/3c515.c b/drivers/net/3c515.c
index 8a6eb0c44486..cdf7226a7c43 100644
--- a/drivers/net/3c515.c
+++ b/drivers/net/3c515.c
@@ -662,7 +662,9 @@ static int corkscrew_setup(struct net_device *dev, int ioaddr,
662 pr_warning(" *** Warning: this IRQ is unlikely to work! ***\n"); 662 pr_warning(" *** Warning: this IRQ is unlikely to work! ***\n");
663 663
664 { 664 {
665 char *ram_split[] = { "5:3", "3:1", "1:1", "3:5" }; 665 static const char * const ram_split[] = {
666 "5:3", "3:1", "1:1", "3:5"
667 };
666 __u32 config; 668 __u32 config;
667 EL3WINDOW(3); 669 EL3WINDOW(3);
668 vp->available_media = inw(ioaddr + Wn3_Options); 670 vp->available_media = inw(ioaddr + Wn3_Options);
diff --git a/drivers/net/3c523.c b/drivers/net/3c523.c
index a7b0e5e43a52..de579d043169 100644
--- a/drivers/net/3c523.c
+++ b/drivers/net/3c523.c
@@ -287,7 +287,7 @@ static int elmc_open(struct net_device *dev)
287 287
288 elmc_id_attn586(); /* disable interrupts */ 288 elmc_id_attn586(); /* disable interrupts */
289 289
290 ret = request_irq(dev->irq, elmc_interrupt, IRQF_SHARED | IRQF_SAMPLE_RANDOM, 290 ret = request_irq(dev->irq, elmc_interrupt, IRQF_SHARED,
291 dev->name, dev); 291 dev->name, dev);
292 if (ret) { 292 if (ret) {
293 pr_err("%s: couldn't get irq %d\n", dev->name, dev->irq); 293 pr_err("%s: couldn't get irq %d\n", dev->name, dev->irq);
@@ -463,7 +463,7 @@ static int __init do_elmc_probe(struct net_device *dev)
463 463
464 /* we didn't find any 3c523 in the slots we checked for */ 464 /* we didn't find any 3c523 in the slots we checked for */
465 if (slot == MCA_NOTFOUND) 465 if (slot == MCA_NOTFOUND)
466 return ((base_addr || irq) ? -ENXIO : -ENODEV); 466 return (base_addr || irq) ? -ENXIO : -ENODEV;
467 467
468 mca_set_adapter_name(slot, "3Com 3c523 Etherlink/MC"); 468 mca_set_adapter_name(slot, "3Com 3c523 Etherlink/MC");
469 mca_set_adapter_procfn(slot, (MCA_ProcFn) elmc_getinfo, dev); 469 mca_set_adapter_procfn(slot, (MCA_ProcFn) elmc_getinfo, dev);
diff --git a/drivers/net/3c527.c b/drivers/net/3c527.c
index 70705d1306b9..0d6ca1e407d0 100644
--- a/drivers/net/3c527.c
+++ b/drivers/net/3c527.c
@@ -443,7 +443,7 @@ static int __init mc32_probe1(struct net_device *dev, int slot)
443 * Grab the IRQ 443 * Grab the IRQ
444 */ 444 */
445 445
446 err = request_irq(dev->irq, mc32_interrupt, IRQF_SHARED | IRQF_SAMPLE_RANDOM, DRV_NAME, dev); 446 err = request_irq(dev->irq, mc32_interrupt, IRQF_SHARED, DRV_NAME, dev);
447 if (err) { 447 if (err) {
448 release_region(dev->base_addr, MC32_IO_EXTENT); 448 release_region(dev->base_addr, MC32_IO_EXTENT);
449 pr_err("%s: unable to get IRQ %d.\n", DRV_NAME, dev->irq); 449 pr_err("%s: unable to get IRQ %d.\n", DRV_NAME, dev->irq);
diff --git a/drivers/net/3c59x.c b/drivers/net/3c59x.c
index e31a6d1919c6..e1da258bbfb7 100644
--- a/drivers/net/3c59x.c
+++ b/drivers/net/3c59x.c
@@ -635,6 +635,9 @@ struct vortex_private {
635 must_free_region:1, /* Flag: if zero, Cardbus owns the I/O region */ 635 must_free_region:1, /* Flag: if zero, Cardbus owns the I/O region */
636 large_frames:1, /* accept large frames */ 636 large_frames:1, /* accept large frames */
637 handling_irq:1; /* private in_irq indicator */ 637 handling_irq:1; /* private in_irq indicator */
638 /* {get|set}_wol operations are already serialized by rtnl.
639 * no additional locking is required for the enable_wol and acpi_set_WOL()
640 */
638 int drv_flags; 641 int drv_flags;
639 u16 status_enable; 642 u16 status_enable;
640 u16 intr_enable; 643 u16 intr_enable;
@@ -2939,28 +2942,31 @@ static void vortex_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2939{ 2942{
2940 struct vortex_private *vp = netdev_priv(dev); 2943 struct vortex_private *vp = netdev_priv(dev);
2941 2944
2942 spin_lock_irq(&vp->lock); 2945 if (!VORTEX_PCI(vp))
2946 return;
2947
2943 wol->supported = WAKE_MAGIC; 2948 wol->supported = WAKE_MAGIC;
2944 2949
2945 wol->wolopts = 0; 2950 wol->wolopts = 0;
2946 if (vp->enable_wol) 2951 if (vp->enable_wol)
2947 wol->wolopts |= WAKE_MAGIC; 2952 wol->wolopts |= WAKE_MAGIC;
2948 spin_unlock_irq(&vp->lock);
2949} 2953}
2950 2954
2951static int vortex_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) 2955static int vortex_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2952{ 2956{
2953 struct vortex_private *vp = netdev_priv(dev); 2957 struct vortex_private *vp = netdev_priv(dev);
2958
2959 if (!VORTEX_PCI(vp))
2960 return -EOPNOTSUPP;
2961
2954 if (wol->wolopts & ~WAKE_MAGIC) 2962 if (wol->wolopts & ~WAKE_MAGIC)
2955 return -EINVAL; 2963 return -EINVAL;
2956 2964
2957 spin_lock_irq(&vp->lock);
2958 if (wol->wolopts & WAKE_MAGIC) 2965 if (wol->wolopts & WAKE_MAGIC)
2959 vp->enable_wol = 1; 2966 vp->enable_wol = 1;
2960 else 2967 else
2961 vp->enable_wol = 0; 2968 vp->enable_wol = 0;
2962 acpi_set_WOL(dev); 2969 acpi_set_WOL(dev);
2963 spin_unlock_irq(&vp->lock);
2964 2970
2965 return 0; 2971 return 0;
2966} 2972}
@@ -3202,6 +3208,9 @@ static void acpi_set_WOL(struct net_device *dev)
3202 return; 3208 return;
3203 } 3209 }
3204 3210
3211 if (VORTEX_PCI(vp)->current_state < PCI_D3hot)
3212 return;
3213
3205 /* Change the power state to D3; RxEnable doesn't take effect. */ 3214 /* Change the power state to D3; RxEnable doesn't take effect. */
3206 pci_set_power_state(VORTEX_PCI(vp), PCI_D3hot); 3215 pci_set_power_state(VORTEX_PCI(vp), PCI_D3hot);
3207 } 3216 }
diff --git a/drivers/net/8139cp.c b/drivers/net/8139cp.c
index 237d4ea5a416..ac422cd332ea 100644
--- a/drivers/net/8139cp.c
+++ b/drivers/net/8139cp.c
@@ -754,7 +754,7 @@ static netdev_tx_t cp_start_xmit (struct sk_buff *skb,
754 } 754 }
755 755
756#if CP_VLAN_TAG_USED 756#if CP_VLAN_TAG_USED
757 if (cp->vlgrp && vlan_tx_tag_present(skb)) 757 if (vlan_tx_tag_present(skb))
758 vlan_tag = TxVlanTag | swab16(vlan_tx_tag_get(skb)); 758 vlan_tag = TxVlanTag | swab16(vlan_tx_tag_get(skb));
759#endif 759#endif
760 760
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 53c4810b119e..d24f54b8c19a 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -177,6 +177,13 @@ config NET_SB1000
177 177
178source "drivers/net/arcnet/Kconfig" 178source "drivers/net/arcnet/Kconfig"
179 179
180config MII
181 tristate "Generic Media Independent Interface device support"
182 help
183 Most ethernet controllers have MII transceiver either as an external
184 or internal device. It is safe to say Y or M here even if your
185 ethernet card lacks MII.
186
180source "drivers/net/phy/Kconfig" 187source "drivers/net/phy/Kconfig"
181 188
182# 189#
@@ -212,13 +219,6 @@ menuconfig NET_ETHERNET
212 219
213if NET_ETHERNET 220if NET_ETHERNET
214 221
215config MII
216 tristate "Generic Media Independent Interface device support"
217 help
218 Most ethernet controllers have MII transceiver either as an external
219 or internal device. It is safe to say Y or M here even if your
220 ethernet card lack MII.
221
222config MACB 222config MACB
223 tristate "Atmel MACB support" 223 tristate "Atmel MACB support"
224 depends on AVR32 || ARCH_AT91SAM9260 || ARCH_AT91SAM9263 || ARCH_AT91SAM9G20 || ARCH_AT91SAM9G45 || ARCH_AT91CAP9 224 depends on AVR32 || ARCH_AT91SAM9260 || ARCH_AT91SAM9263 || ARCH_AT91SAM9G20 || ARCH_AT91SAM9G45 || ARCH_AT91CAP9
@@ -2428,7 +2428,7 @@ config UGETH_TX_ON_DEMAND
2428 2428
2429config MV643XX_ETH 2429config MV643XX_ETH
2430 tristate "Marvell Discovery (643XX) and Orion ethernet support" 2430 tristate "Marvell Discovery (643XX) and Orion ethernet support"
2431 depends on MV64X60 || PPC32 || PLAT_ORION 2431 depends on (MV64X60 || PPC32 || PLAT_ORION) && INET
2432 select INET_LRO 2432 select INET_LRO
2433 select PHYLIB 2433 select PHYLIB
2434 help 2434 help
@@ -2515,6 +2515,18 @@ config S6GMAC
2515 2515
2516source "drivers/net/stmmac/Kconfig" 2516source "drivers/net/stmmac/Kconfig"
2517 2517
2518config PCH_GBE
2519 tristate "PCH Gigabit Ethernet"
2520 depends on PCI
2521 ---help---
2522 This is a gigabit ethernet driver for Topcliff PCH.
2523 Topcliff PCH is the platform controller hub that is used in Intel's
2524 general embedded platform.
2525 Topcliff PCH has Gigabit Ethernet interface.
2526 Using this interface, it is able to access system devices connected
2527 to Gigabit Ethernet.
2528 This driver enables Gigabit Ethernet function.
2529
2518endif # NETDEV_1000 2530endif # NETDEV_1000
2519 2531
2520# 2532#
@@ -2803,7 +2815,7 @@ config NIU
2803 2815
2804config PASEMI_MAC 2816config PASEMI_MAC
2805 tristate "PA Semi 1/10Gbit MAC" 2817 tristate "PA Semi 1/10Gbit MAC"
2806 depends on PPC_PASEMI && PCI 2818 depends on PPC_PASEMI && PCI && INET
2807 select PHYLIB 2819 select PHYLIB
2808 select INET_LRO 2820 select INET_LRO
2809 help 2821 help
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index 18a277709a2a..b8bf93d4a132 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -298,3 +298,4 @@ obj-$(CONFIG_WIMAX) += wimax/
298obj-$(CONFIG_CAIF) += caif/ 298obj-$(CONFIG_CAIF) += caif/
299 299
300obj-$(CONFIG_OCTEON_MGMT_ETHERNET) += octeon/ 300obj-$(CONFIG_OCTEON_MGMT_ETHERNET) += octeon/
301obj-$(CONFIG_PCH_GBE) += pch_gbe/
diff --git a/drivers/net/amd8111e.c b/drivers/net/amd8111e.c
index 58a0ab4923ee..2ca880b4c0db 100644
--- a/drivers/net/amd8111e.c
+++ b/drivers/net/amd8111e.c
@@ -1315,7 +1315,7 @@ static netdev_tx_t amd8111e_start_xmit(struct sk_buff *skb,
1315 lp->tx_ring[tx_index].tx_flags = 0; 1315 lp->tx_ring[tx_index].tx_flags = 0;
1316 1316
1317#if AMD8111E_VLAN_TAG_USED 1317#if AMD8111E_VLAN_TAG_USED
1318 if((lp->vlgrp != NULL) && vlan_tx_tag_present(skb)){ 1318 if (vlan_tx_tag_present(skb)) {
1319 lp->tx_ring[tx_index].tag_ctrl_cmd |= 1319 lp->tx_ring[tx_index].tag_ctrl_cmd |=
1320 cpu_to_le16(TCC_VLAN_INSERT); 1320 cpu_to_le16(TCC_VLAN_INSERT);
1321 lp->tx_ring[tx_index].tag_ctrl_info = 1321 lp->tx_ring[tx_index].tag_ctrl_info =
diff --git a/drivers/net/appletalk/ipddp.c b/drivers/net/appletalk/ipddp.c
index 0362c8d31a08..10d0dba572c2 100644
--- a/drivers/net/appletalk/ipddp.c
+++ b/drivers/net/appletalk/ipddp.c
@@ -244,7 +244,7 @@ static int ipddp_delete(struct ipddp_route *rt)
244 } 244 }
245 245
246 spin_unlock_bh(&ipddp_route_lock); 246 spin_unlock_bh(&ipddp_route_lock);
247 return (-ENOENT); 247 return -ENOENT;
248} 248}
249 249
250/* 250/*
@@ -259,10 +259,10 @@ static struct ipddp_route* __ipddp_find_route(struct ipddp_route *rt)
259 if(f->ip == rt->ip && 259 if(f->ip == rt->ip &&
260 f->at.s_net == rt->at.s_net && 260 f->at.s_net == rt->at.s_net &&
261 f->at.s_node == rt->at.s_node) 261 f->at.s_node == rt->at.s_node)
262 return (f); 262 return f;
263 } 263 }
264 264
265 return (NULL); 265 return NULL;
266} 266}
267 267
268static int ipddp_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 268static int ipddp_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
@@ -279,7 +279,7 @@ static int ipddp_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
279 switch(cmd) 279 switch(cmd)
280 { 280 {
281 case SIOCADDIPDDPRT: 281 case SIOCADDIPDDPRT:
282 return (ipddp_create(&rcp)); 282 return ipddp_create(&rcp);
283 283
284 case SIOCFINDIPDDPRT: 284 case SIOCFINDIPDDPRT:
285 spin_lock_bh(&ipddp_route_lock); 285 spin_lock_bh(&ipddp_route_lock);
@@ -297,7 +297,7 @@ static int ipddp_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
297 return -ENOENT; 297 return -ENOENT;
298 298
299 case SIOCDELIPDDPRT: 299 case SIOCDELIPDDPRT:
300 return (ipddp_delete(&rcp)); 300 return ipddp_delete(&rcp);
301 301
302 default: 302 default:
303 return -EINVAL; 303 return -EINVAL;
diff --git a/drivers/net/appletalk/ltpc.c b/drivers/net/appletalk/ltpc.c
index adc07551739e..e69eead12ec7 100644
--- a/drivers/net/appletalk/ltpc.c
+++ b/drivers/net/appletalk/ltpc.c
@@ -727,7 +727,7 @@ static int sendup_buffer (struct net_device *dev)
727 727
728 if (ltc->command != LT_RCVLAP) { 728 if (ltc->command != LT_RCVLAP) {
729 printk("unknown command 0x%02x from ltpc card\n",ltc->command); 729 printk("unknown command 0x%02x from ltpc card\n",ltc->command);
730 return(-1); 730 return -1;
731 } 731 }
732 dnode = ltc->dnode; 732 dnode = ltc->dnode;
733 snode = ltc->snode; 733 snode = ltc->snode;
diff --git a/drivers/net/atarilance.c b/drivers/net/atarilance.c
index b57d7dee389a..3134e5326231 100644
--- a/drivers/net/atarilance.c
+++ b/drivers/net/atarilance.c
@@ -362,7 +362,7 @@ static void *slow_memcpy( void *dst, const void *src, size_t len )
362 *cto++ = *cfrom++; 362 *cto++ = *cfrom++;
363 MFPDELAY(); 363 MFPDELAY();
364 } 364 }
365 return( dst ); 365 return dst;
366} 366}
367 367
368 368
@@ -449,7 +449,7 @@ static noinline int __init addr_accessible(volatile void *regp, int wordflag,
449 vbr[2] = save_berr; 449 vbr[2] = save_berr;
450 local_irq_restore(flags); 450 local_irq_restore(flags);
451 451
452 return( ret ); 452 return ret;
453} 453}
454 454
455static const struct net_device_ops lance_netdev_ops = { 455static const struct net_device_ops lance_netdev_ops = {
@@ -526,7 +526,7 @@ static unsigned long __init lance_probe1( struct net_device *dev,
526 goto probe_ok; 526 goto probe_ok;
527 527
528 probe_fail: 528 probe_fail:
529 return( 0 ); 529 return 0;
530 530
531 probe_ok: 531 probe_ok:
532 lp = netdev_priv(dev); 532 lp = netdev_priv(dev);
@@ -556,7 +556,7 @@ static unsigned long __init lance_probe1( struct net_device *dev,
556 if (request_irq(IRQ_AUTO_5, lance_interrupt, IRQ_TYPE_PRIO, 556 if (request_irq(IRQ_AUTO_5, lance_interrupt, IRQ_TYPE_PRIO,
557 "PAM/Riebl-ST Ethernet", dev)) { 557 "PAM/Riebl-ST Ethernet", dev)) {
558 printk( "Lance: request for irq %d failed\n", IRQ_AUTO_5 ); 558 printk( "Lance: request for irq %d failed\n", IRQ_AUTO_5 );
559 return( 0 ); 559 return 0;
560 } 560 }
561 dev->irq = (unsigned short)IRQ_AUTO_5; 561 dev->irq = (unsigned short)IRQ_AUTO_5;
562 } 562 }
@@ -568,12 +568,12 @@ static unsigned long __init lance_probe1( struct net_device *dev,
568 unsigned long irq = atari_register_vme_int(); 568 unsigned long irq = atari_register_vme_int();
569 if (!irq) { 569 if (!irq) {
570 printk( "Lance: request for VME interrupt failed\n" ); 570 printk( "Lance: request for VME interrupt failed\n" );
571 return( 0 ); 571 return 0;
572 } 572 }
573 if (request_irq(irq, lance_interrupt, IRQ_TYPE_PRIO, 573 if (request_irq(irq, lance_interrupt, IRQ_TYPE_PRIO,
574 "Riebl-VME Ethernet", dev)) { 574 "Riebl-VME Ethernet", dev)) {
575 printk( "Lance: request for irq %ld failed\n", irq ); 575 printk( "Lance: request for irq %ld failed\n", irq );
576 return( 0 ); 576 return 0;
577 } 577 }
578 dev->irq = irq; 578 dev->irq = irq;
579 } 579 }
@@ -637,7 +637,7 @@ static unsigned long __init lance_probe1( struct net_device *dev,
637 /* XXX MSch */ 637 /* XXX MSch */
638 dev->watchdog_timeo = TX_TIMEOUT; 638 dev->watchdog_timeo = TX_TIMEOUT;
639 639
640 return( 1 ); 640 return 1;
641} 641}
642 642
643 643
@@ -666,7 +666,7 @@ static int lance_open( struct net_device *dev )
666 DPRINTK( 2, ( "lance_open(): opening %s failed, i=%d, csr0=%04x\n", 666 DPRINTK( 2, ( "lance_open(): opening %s failed, i=%d, csr0=%04x\n",
667 dev->name, i, DREG )); 667 dev->name, i, DREG ));
668 DREG = CSR0_STOP; 668 DREG = CSR0_STOP;
669 return( -EIO ); 669 return -EIO;
670 } 670 }
671 DREG = CSR0_IDON; 671 DREG = CSR0_IDON;
672 DREG = CSR0_STRT; 672 DREG = CSR0_STRT;
@@ -676,7 +676,7 @@ static int lance_open( struct net_device *dev )
676 676
677 DPRINTK( 2, ( "%s: LANCE is open, csr0 %04x\n", dev->name, DREG )); 677 DPRINTK( 2, ( "%s: LANCE is open, csr0 %04x\n", dev->name, DREG ));
678 678
679 return( 0 ); 679 return 0;
680} 680}
681 681
682 682
@@ -1126,13 +1126,13 @@ static int lance_set_mac_address( struct net_device *dev, void *addr )
1126 int i; 1126 int i;
1127 1127
1128 if (lp->cardtype != OLD_RIEBL && lp->cardtype != NEW_RIEBL) 1128 if (lp->cardtype != OLD_RIEBL && lp->cardtype != NEW_RIEBL)
1129 return( -EOPNOTSUPP ); 1129 return -EOPNOTSUPP;
1130 1130
1131 if (netif_running(dev)) { 1131 if (netif_running(dev)) {
1132 /* Only possible while card isn't started */ 1132 /* Only possible while card isn't started */
1133 DPRINTK( 1, ( "%s: hwaddr can be set only while card isn't open.\n", 1133 DPRINTK( 1, ( "%s: hwaddr can be set only while card isn't open.\n",
1134 dev->name )); 1134 dev->name ));
1135 return( -EIO ); 1135 return -EIO;
1136 } 1136 }
1137 1137
1138 memcpy( dev->dev_addr, saddr->sa_data, dev->addr_len ); 1138 memcpy( dev->dev_addr, saddr->sa_data, dev->addr_len );
@@ -1142,7 +1142,7 @@ static int lance_set_mac_address( struct net_device *dev, void *addr )
1142 /* set also the magic for future sessions */ 1142 /* set also the magic for future sessions */
1143 *RIEBL_MAGIC_ADDR = RIEBL_MAGIC; 1143 *RIEBL_MAGIC_ADDR = RIEBL_MAGIC;
1144 1144
1145 return( 0 ); 1145 return 0;
1146} 1146}
1147 1147
1148 1148
diff --git a/drivers/net/atl1c/atl1c_main.c b/drivers/net/atl1c/atl1c_main.c
index 553230eb365c..99ffcf667d1f 100644
--- a/drivers/net/atl1c/atl1c_main.c
+++ b/drivers/net/atl1c/atl1c_main.c
@@ -2243,7 +2243,7 @@ static netdev_tx_t atl1c_xmit_frame(struct sk_buff *skb,
2243 return NETDEV_TX_OK; 2243 return NETDEV_TX_OK;
2244 } 2244 }
2245 2245
2246 if (unlikely(adapter->vlgrp && vlan_tx_tag_present(skb))) { 2246 if (unlikely(vlan_tx_tag_present(skb))) {
2247 u16 vlan = vlan_tx_tag_get(skb); 2247 u16 vlan = vlan_tx_tag_get(skb);
2248 __le16 tag; 2248 __le16 tag;
2249 2249
diff --git a/drivers/net/atl1e/atl1e_main.c b/drivers/net/atl1e/atl1e_main.c
index 56ace3fbe40d..ef6349bf3b33 100644
--- a/drivers/net/atl1e/atl1e_main.c
+++ b/drivers/net/atl1e/atl1e_main.c
@@ -1814,7 +1814,7 @@ static netdev_tx_t atl1e_xmit_frame(struct sk_buff *skb,
1814 1814
1815 tpd = atl1e_get_tpd(adapter); 1815 tpd = atl1e_get_tpd(adapter);
1816 1816
1817 if (unlikely(adapter->vlgrp && vlan_tx_tag_present(skb))) { 1817 if (unlikely(vlan_tx_tag_present(skb))) {
1818 u16 vlan_tag = vlan_tx_tag_get(skb); 1818 u16 vlan_tag = vlan_tx_tag_get(skb);
1819 u16 atl1e_vlan_tag; 1819 u16 atl1e_vlan_tag;
1820 1820
diff --git a/drivers/net/atlx/atl1.c b/drivers/net/atlx/atl1.c
index e1e0171d6e62..dbd27b8e66bd 100644
--- a/drivers/net/atlx/atl1.c
+++ b/drivers/net/atlx/atl1.c
@@ -1251,6 +1251,12 @@ static void atl1_free_ring_resources(struct atl1_adapter *adapter)
1251 1251
1252 rrd_ring->desc = NULL; 1252 rrd_ring->desc = NULL;
1253 rrd_ring->dma = 0; 1253 rrd_ring->dma = 0;
1254
1255 adapter->cmb.dma = 0;
1256 adapter->cmb.cmb = NULL;
1257
1258 adapter->smb.dma = 0;
1259 adapter->smb.smb = NULL;
1254} 1260}
1255 1261
1256static void atl1_setup_mac_ctrl(struct atl1_adapter *adapter) 1262static void atl1_setup_mac_ctrl(struct atl1_adapter *adapter)
@@ -2094,9 +2100,9 @@ static u16 atl1_tpd_avail(struct atl1_tpd_ring *tpd_ring)
2094{ 2100{
2095 u16 next_to_clean = atomic_read(&tpd_ring->next_to_clean); 2101 u16 next_to_clean = atomic_read(&tpd_ring->next_to_clean);
2096 u16 next_to_use = atomic_read(&tpd_ring->next_to_use); 2102 u16 next_to_use = atomic_read(&tpd_ring->next_to_use);
2097 return ((next_to_clean > next_to_use) ? 2103 return (next_to_clean > next_to_use) ?
2098 next_to_clean - next_to_use - 1 : 2104 next_to_clean - next_to_use - 1 :
2099 tpd_ring->count + next_to_clean - next_to_use - 1); 2105 tpd_ring->count + next_to_clean - next_to_use - 1;
2100} 2106}
2101 2107
2102static int atl1_tso(struct atl1_adapter *adapter, struct sk_buff *skb, 2108static int atl1_tso(struct atl1_adapter *adapter, struct sk_buff *skb,
@@ -2402,7 +2408,7 @@ static netdev_tx_t atl1_xmit_frame(struct sk_buff *skb,
2402 (u16) atomic_read(&tpd_ring->next_to_use)); 2408 (u16) atomic_read(&tpd_ring->next_to_use));
2403 memset(ptpd, 0, sizeof(struct tx_packet_desc)); 2409 memset(ptpd, 0, sizeof(struct tx_packet_desc));
2404 2410
2405 if (adapter->vlgrp && vlan_tx_tag_present(skb)) { 2411 if (vlan_tx_tag_present(skb)) {
2406 vlan_tag = vlan_tx_tag_get(skb); 2412 vlan_tag = vlan_tx_tag_get(skb);
2407 vlan_tag = (vlan_tag << 4) | (vlan_tag >> 13) | 2413 vlan_tag = (vlan_tag << 4) | (vlan_tag >> 13) |
2408 ((vlan_tag >> 9) & 0x8); 2414 ((vlan_tag >> 9) & 0x8);
@@ -2847,10 +2853,11 @@ static int atl1_resume(struct pci_dev *pdev)
2847 pci_enable_wake(pdev, PCI_D3cold, 0); 2853 pci_enable_wake(pdev, PCI_D3cold, 0);
2848 2854
2849 atl1_reset_hw(&adapter->hw); 2855 atl1_reset_hw(&adapter->hw);
2850 adapter->cmb.cmb->int_stats = 0;
2851 2856
2852 if (netif_running(netdev)) 2857 if (netif_running(netdev)) {
2858 adapter->cmb.cmb->int_stats = 0;
2853 atl1_up(adapter); 2859 atl1_up(adapter);
2860 }
2854 netif_device_attach(netdev); 2861 netif_device_attach(netdev);
2855 2862
2856 return 0; 2863 return 0;
diff --git a/drivers/net/atlx/atl2.c b/drivers/net/atlx/atl2.c
index 29c0265ccc5d..35b14bec1207 100644
--- a/drivers/net/atlx/atl2.c
+++ b/drivers/net/atlx/atl2.c
@@ -870,7 +870,7 @@ static netdev_tx_t atl2_xmit_frame(struct sk_buff *skb,
870 offset = ((u32)(skb->len-copy_len + 3) & ~3); 870 offset = ((u32)(skb->len-copy_len + 3) & ~3);
871 } 871 }
872#ifdef NETIF_F_HW_VLAN_TX 872#ifdef NETIF_F_HW_VLAN_TX
873 if (adapter->vlgrp && vlan_tx_tag_present(skb)) { 873 if (vlan_tx_tag_present(skb)) {
874 u16 vlan_tag = vlan_tx_tag_get(skb); 874 u16 vlan_tag = vlan_tx_tag_get(skb);
875 vlan_tag = (vlan_tag << 4) | 875 vlan_tag = (vlan_tag << 4) |
876 (vlan_tag >> 13) | 876 (vlan_tag >> 13) |
diff --git a/drivers/net/ax88796.c b/drivers/net/ax88796.c
index 20e946b1e744..b6da4cf3694b 100644
--- a/drivers/net/ax88796.c
+++ b/drivers/net/ax88796.c
@@ -864,6 +864,7 @@ static int ax_probe(struct platform_device *pdev)
864 res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); 864 res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
865 if (res == NULL) { 865 if (res == NULL) {
866 dev_err(&pdev->dev, "no IRQ specified\n"); 866 dev_err(&pdev->dev, "no IRQ specified\n");
867 ret = -ENXIO;
867 goto exit_mem; 868 goto exit_mem;
868 } 869 }
869 870
diff --git a/drivers/net/b44.c b/drivers/net/b44.c
index 8e7c8a8e61c7..171da7f75108 100644
--- a/drivers/net/b44.c
+++ b/drivers/net/b44.c
@@ -2296,18 +2296,27 @@ static int b44_resume(struct ssb_device *sdev)
2296 if (!netif_running(dev)) 2296 if (!netif_running(dev))
2297 return 0; 2297 return 0;
2298 2298
2299 spin_lock_irq(&bp->lock);
2300 b44_init_rings(bp);
2301 b44_init_hw(bp, B44_FULL_RESET);
2302 spin_unlock_irq(&bp->lock);
2303
2304 /*
2305 * As a shared interrupt, the handler can be called immediately. To be
2306 * able to check the interrupt status the hardware must already be
2307 * powered back on (b44_init_hw).
2308 */
2299 rc = request_irq(dev->irq, b44_interrupt, IRQF_SHARED, dev->name, dev); 2309 rc = request_irq(dev->irq, b44_interrupt, IRQF_SHARED, dev->name, dev);
2300 if (rc) { 2310 if (rc) {
2301 netdev_err(dev, "request_irq failed\n"); 2311 netdev_err(dev, "request_irq failed\n");
2312 spin_lock_irq(&bp->lock);
2313 b44_halt(bp);
2314 b44_free_rings(bp);
2315 spin_unlock_irq(&bp->lock);
2302 return rc; 2316 return rc;
2303 } 2317 }
2304 2318
2305 spin_lock_irq(&bp->lock);
2306
2307 b44_init_rings(bp);
2308 b44_init_hw(bp, B44_FULL_RESET);
2309 netif_device_attach(bp->dev); 2319 netif_device_attach(bp->dev);
2310 spin_unlock_irq(&bp->lock);
2311 2320
2312 b44_enable_ints(bp); 2321 b44_enable_ints(bp);
2313 netif_wake_queue(dev); 2322 netif_wake_queue(dev);
diff --git a/drivers/net/benet/be.h b/drivers/net/benet/be.h
index 4faf6961dcec..4594a28b1f66 100644
--- a/drivers/net/benet/be.h
+++ b/drivers/net/benet/be.h
@@ -78,6 +78,8 @@ static inline char *nic_name(struct pci_dev *pdev)
78#define MCC_Q_LEN 128 /* total size not to exceed 8 pages */ 78#define MCC_Q_LEN 128 /* total size not to exceed 8 pages */
79#define MCC_CQ_LEN 256 79#define MCC_CQ_LEN 256
80 80
81#define MAX_RSS_QS 4 /* BE limit is 4 queues/port */
82#define BE_MAX_MSIX_VECTORS (MAX_RSS_QS + 1 + 1)/* RSS qs + 1 def Rx + Tx */
81#define BE_NAPI_WEIGHT 64 83#define BE_NAPI_WEIGHT 64
82#define MAX_RX_POST BE_NAPI_WEIGHT /* Frags posted at a time */ 84#define MAX_RX_POST BE_NAPI_WEIGHT /* Frags posted at a time */
83#define RX_FRAGS_REFILL_WM (RX_Q_LEN - MAX_RX_POST) 85#define RX_FRAGS_REFILL_WM (RX_Q_LEN - MAX_RX_POST)
@@ -157,10 +159,9 @@ struct be_mcc_obj {
157 bool rearm_cq; 159 bool rearm_cq;
158}; 160};
159 161
160struct be_drvr_stats { 162struct be_tx_stats {
161 u32 be_tx_reqs; /* number of TX requests initiated */ 163 u32 be_tx_reqs; /* number of TX requests initiated */
162 u32 be_tx_stops; /* number of times TX Q was stopped */ 164 u32 be_tx_stops; /* number of times TX Q was stopped */
163 u32 be_fwd_reqs; /* number of send reqs through forwarding i/f */
164 u32 be_tx_wrbs; /* number of tx WRBs used */ 165 u32 be_tx_wrbs; /* number of tx WRBs used */
165 u32 be_tx_events; /* number of tx completion events */ 166 u32 be_tx_events; /* number of tx completion events */
166 u32 be_tx_compl; /* number of tx completion entries processed */ 167 u32 be_tx_compl; /* number of tx completion entries processed */
@@ -169,35 +170,6 @@ struct be_drvr_stats {
169 u64 be_tx_bytes_prev; 170 u64 be_tx_bytes_prev;
170 u64 be_tx_pkts; 171 u64 be_tx_pkts;
171 u32 be_tx_rate; 172 u32 be_tx_rate;
172
173 u32 cache_barrier[16];
174
175 u32 be_ethrx_post_fail;/* number of ethrx buffer alloc failures */
176 u32 be_rx_polls; /* number of times NAPI called poll function */
177 u32 be_rx_events; /* number of ucast rx completion events */
178 u32 be_rx_compl; /* number of rx completion entries processed */
179 ulong be_rx_jiffies;
180 u64 be_rx_bytes;
181 u64 be_rx_bytes_prev;
182 u64 be_rx_pkts;
183 u32 be_rx_rate;
184 u32 be_rx_mcast_pkt;
185 /* number of non ether type II frames dropped where
186 * frame len > length field of Mac Hdr */
187 u32 be_802_3_dropped_frames;
188 /* number of non ether type II frames malformed where
189 * in frame len < length field of Mac Hdr */
190 u32 be_802_3_malformed_frames;
191 u32 be_rxcp_err; /* Num rx completion entries w/ err set. */
192 ulong rx_fps_jiffies; /* jiffies at last FPS calc */
193 u32 be_rx_frags;
194 u32 be_prev_rx_frags;
195 u32 be_rx_fps; /* Rx frags per second */
196};
197
198struct be_stats_obj {
199 struct be_drvr_stats drvr_stats;
200 struct be_dma_mem cmd;
201}; 173};
202 174
203struct be_tx_obj { 175struct be_tx_obj {
@@ -215,10 +187,34 @@ struct be_rx_page_info {
215 bool last_page_user; 187 bool last_page_user;
216}; 188};
217 189
190struct be_rx_stats {
191 u32 rx_post_fail;/* number of ethrx buffer alloc failures */
192 u32 rx_polls; /* number of times NAPI called poll function */
193 u32 rx_events; /* number of ucast rx completion events */
194 u32 rx_compl; /* number of rx completion entries processed */
195 ulong rx_jiffies;
196 u64 rx_bytes;
197 u64 rx_bytes_prev;
198 u64 rx_pkts;
199 u32 rx_rate;
200 u32 rx_mcast_pkts;
201 u32 rxcp_err; /* Num rx completion entries w/ err set. */
202 ulong rx_fps_jiffies; /* jiffies at last FPS calc */
203 u32 rx_frags;
204 u32 prev_rx_frags;
205 u32 rx_fps; /* Rx frags per second */
206};
207
218struct be_rx_obj { 208struct be_rx_obj {
209 struct be_adapter *adapter;
219 struct be_queue_info q; 210 struct be_queue_info q;
220 struct be_queue_info cq; 211 struct be_queue_info cq;
221 struct be_rx_page_info page_info_tbl[RX_Q_LEN]; 212 struct be_rx_page_info page_info_tbl[RX_Q_LEN];
213 struct be_eq_obj rx_eq;
214 struct be_rx_stats stats;
215 u8 rss_id;
216 bool rx_post_starved; /* Zero rx frags have been posted to BE */
217 u32 cache_line_barrier[16];
222}; 218};
223 219
224struct be_vf_cfg { 220struct be_vf_cfg {
@@ -229,7 +225,6 @@ struct be_vf_cfg {
229 u32 vf_tx_rate; 225 u32 vf_tx_rate;
230}; 226};
231 227
232#define BE_NUM_MSIX_VECTORS 2 /* 1 each for Tx and Rx */
233#define BE_INVALID_PMAC_ID 0xffffffff 228#define BE_INVALID_PMAC_ID 0xffffffff
234struct be_adapter { 229struct be_adapter {
235 struct pci_dev *pdev; 230 struct pci_dev *pdev;
@@ -249,29 +244,31 @@ struct be_adapter {
249 spinlock_t mcc_lock; /* For serializing mcc cmds to BE card */ 244 spinlock_t mcc_lock; /* For serializing mcc cmds to BE card */
250 spinlock_t mcc_cq_lock; 245 spinlock_t mcc_cq_lock;
251 246
252 struct msix_entry msix_entries[BE_NUM_MSIX_VECTORS]; 247 struct msix_entry msix_entries[BE_MAX_MSIX_VECTORS];
253 bool msix_enabled; 248 bool msix_enabled;
254 bool isr_registered; 249 bool isr_registered;
255 250
256 /* TX Rings */ 251 /* TX Rings */
257 struct be_eq_obj tx_eq; 252 struct be_eq_obj tx_eq;
258 struct be_tx_obj tx_obj; 253 struct be_tx_obj tx_obj;
254 struct be_tx_stats tx_stats;
259 255
260 u32 cache_line_break[8]; 256 u32 cache_line_break[8];
261 257
262 /* Rx rings */ 258 /* Rx rings */
263 struct be_eq_obj rx_eq; 259 struct be_rx_obj rx_obj[MAX_RSS_QS + 1]; /* one default non-rss Q */
264 struct be_rx_obj rx_obj; 260 u32 num_rx_qs;
265 u32 big_page_size; /* Compounded page size shared by rx wrbs */ 261 u32 big_page_size; /* Compounded page size shared by rx wrbs */
266 bool rx_post_starved; /* Zero rx frags have been posted to BE */
267 262
268 struct vlan_group *vlan_grp; 263 struct vlan_group *vlan_grp;
269 u16 vlans_added; 264 u16 vlans_added;
270 u16 max_vlans; /* Number of vlans supported */ 265 u16 max_vlans; /* Number of vlans supported */
271 u8 vlan_tag[VLAN_GROUP_ARRAY_LEN]; 266 u8 vlan_tag[VLAN_N_VID];
267 u8 vlan_prio_bmap; /* Available Priority BitMap */
268 u16 recommended_prio; /* Recommended Priority */
272 struct be_dma_mem mc_cmd_mem; 269 struct be_dma_mem mc_cmd_mem;
273 270
274 struct be_stats_obj stats; 271 struct be_dma_mem stats_cmd;
275 /* Work queue used to perform periodic tasks like getting statistics */ 272 /* Work queue used to perform periodic tasks like getting statistics */
276 struct delayed_work work; 273 struct delayed_work work;
277 274
@@ -287,6 +284,7 @@ struct be_adapter {
287 bool promiscuous; 284 bool promiscuous;
288 bool wol; 285 bool wol;
289 u32 function_mode; 286 u32 function_mode;
287 u32 function_caps;
290 u32 rx_fc; /* Rx flow control */ 288 u32 rx_fc; /* Rx flow control */
291 u32 tx_fc; /* Tx flow control */ 289 u32 tx_fc; /* Tx flow control */
292 bool ue_detected; 290 bool ue_detected;
@@ -313,10 +311,20 @@ struct be_adapter {
313 311
314extern const struct ethtool_ops be_ethtool_ops; 312extern const struct ethtool_ops be_ethtool_ops;
315 313
316#define drvr_stats(adapter) (&adapter->stats.drvr_stats) 314#define tx_stats(adapter) (&adapter->tx_stats)
315#define rx_stats(rxo) (&rxo->stats)
317 316
318#define BE_SET_NETDEV_OPS(netdev, ops) (netdev->netdev_ops = ops) 317#define BE_SET_NETDEV_OPS(netdev, ops) (netdev->netdev_ops = ops)
319 318
319#define for_all_rx_queues(adapter, rxo, i) \
320 for (i = 0, rxo = &adapter->rx_obj[i]; i < adapter->num_rx_qs; \
321 i++, rxo++)
322
323/* Just skip the first default non-rss queue */
324#define for_all_rss_queues(adapter, rxo, i) \
325 for (i = 0, rxo = &adapter->rx_obj[i+1]; i < (adapter->num_rx_qs - 1);\
326 i++, rxo++)
327
320#define PAGE_SHIFT_4K 12 328#define PAGE_SHIFT_4K 12
321#define PAGE_SIZE_4K (1 << PAGE_SHIFT_4K) 329#define PAGE_SIZE_4K (1 << PAGE_SHIFT_4K)
322 330
diff --git a/drivers/net/benet/be_cmds.c b/drivers/net/benet/be_cmds.c
index 34abcc9403d6..1e7f305ed00b 100644
--- a/drivers/net/benet/be_cmds.c
+++ b/drivers/net/benet/be_cmds.c
@@ -71,7 +71,7 @@ static int be_mcc_compl_process(struct be_adapter *adapter,
71 if (compl_status == MCC_STATUS_SUCCESS) { 71 if (compl_status == MCC_STATUS_SUCCESS) {
72 if (compl->tag0 == OPCODE_ETH_GET_STATISTICS) { 72 if (compl->tag0 == OPCODE_ETH_GET_STATISTICS) {
73 struct be_cmd_resp_get_stats *resp = 73 struct be_cmd_resp_get_stats *resp =
74 adapter->stats.cmd.va; 74 adapter->stats_cmd.va;
75 be_dws_le_to_cpu(&resp->hw_stats, 75 be_dws_le_to_cpu(&resp->hw_stats,
76 sizeof(resp->hw_stats)); 76 sizeof(resp->hw_stats));
77 netdev_stats_update(adapter); 77 netdev_stats_update(adapter);
@@ -96,11 +96,62 @@ static void be_async_link_state_process(struct be_adapter *adapter,
96 evt->port_link_status == ASYNC_EVENT_LINK_UP); 96 evt->port_link_status == ASYNC_EVENT_LINK_UP);
97} 97}
98 98
99/* Grp5 CoS Priority evt */
100static void be_async_grp5_cos_priority_process(struct be_adapter *adapter,
101 struct be_async_event_grp5_cos_priority *evt)
102{
103 if (evt->valid) {
104 adapter->vlan_prio_bmap = evt->available_priority_bmap;
105 adapter->recommended_prio =
106 evt->reco_default_priority << VLAN_PRIO_SHIFT;
107 }
108}
109
110/* Grp5 QOS Speed evt */
111static void be_async_grp5_qos_speed_process(struct be_adapter *adapter,
112 struct be_async_event_grp5_qos_link_speed *evt)
113{
114 if (evt->physical_port == adapter->port_num) {
115 /* qos_link_speed is in units of 10 Mbps */
116 adapter->link_speed = evt->qos_link_speed * 10;
117 }
118}
119
120static void be_async_grp5_evt_process(struct be_adapter *adapter,
121 u32 trailer, struct be_mcc_compl *evt)
122{
123 u8 event_type = 0;
124
125 event_type = (trailer >> ASYNC_TRAILER_EVENT_TYPE_SHIFT) &
126 ASYNC_TRAILER_EVENT_TYPE_MASK;
127
128 switch (event_type) {
129 case ASYNC_EVENT_COS_PRIORITY:
130 be_async_grp5_cos_priority_process(adapter,
131 (struct be_async_event_grp5_cos_priority *)evt);
132 break;
133 case ASYNC_EVENT_QOS_SPEED:
134 be_async_grp5_qos_speed_process(adapter,
135 (struct be_async_event_grp5_qos_link_speed *)evt);
136 break;
137 default:
138 dev_warn(&adapter->pdev->dev, "Unknown grp5 event!\n");
139 break;
140 }
141}
142
99static inline bool is_link_state_evt(u32 trailer) 143static inline bool is_link_state_evt(u32 trailer)
100{ 144{
145 return ((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) &
146 ASYNC_TRAILER_EVENT_CODE_MASK) ==
147 ASYNC_EVENT_CODE_LINK_STATE;
148}
149
150static inline bool is_grp5_evt(u32 trailer)
151{
101 return (((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) & 152 return (((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) &
102 ASYNC_TRAILER_EVENT_CODE_MASK) == 153 ASYNC_TRAILER_EVENT_CODE_MASK) ==
103 ASYNC_EVENT_CODE_LINK_STATE); 154 ASYNC_EVENT_CODE_GRP_5);
104} 155}
105 156
106static struct be_mcc_compl *be_mcc_compl_get(struct be_adapter *adapter) 157static struct be_mcc_compl *be_mcc_compl_get(struct be_adapter *adapter)
@@ -143,6 +194,9 @@ int be_process_mcc(struct be_adapter *adapter, int *status)
143 if (is_link_state_evt(compl->flags)) 194 if (is_link_state_evt(compl->flags))
144 be_async_link_state_process(adapter, 195 be_async_link_state_process(adapter,
145 (struct be_async_event_link_state *) compl); 196 (struct be_async_event_link_state *) compl);
197 else if (is_grp5_evt(compl->flags))
198 be_async_grp5_evt_process(adapter,
199 compl->flags, compl);
146 } else if (compl->flags & CQE_FLAGS_COMPLETED_MASK) { 200 } else if (compl->flags & CQE_FLAGS_COMPLETED_MASK) {
147 *status = be_mcc_compl_process(adapter, compl); 201 *status = be_mcc_compl_process(adapter, compl);
148 atomic_dec(&mcc_obj->q.used); 202 atomic_dec(&mcc_obj->q.used);
@@ -677,10 +731,10 @@ int be_cmd_mccq_create(struct be_adapter *adapter,
677 ctxt = &req->context; 731 ctxt = &req->context;
678 732
679 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, 733 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
680 OPCODE_COMMON_MCC_CREATE); 734 OPCODE_COMMON_MCC_CREATE_EXT);
681 735
682 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 736 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
683 OPCODE_COMMON_MCC_CREATE, sizeof(*req)); 737 OPCODE_COMMON_MCC_CREATE_EXT, sizeof(*req));
684 738
685 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size)); 739 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
686 740
@@ -688,7 +742,8 @@ int be_cmd_mccq_create(struct be_adapter *adapter,
688 AMAP_SET_BITS(struct amap_mcc_context, ring_size, ctxt, 742 AMAP_SET_BITS(struct amap_mcc_context, ring_size, ctxt,
689 be_encoded_q_len(mccq->len)); 743 be_encoded_q_len(mccq->len));
690 AMAP_SET_BITS(struct amap_mcc_context, cq_id, ctxt, cq->id); 744 AMAP_SET_BITS(struct amap_mcc_context, cq_id, ctxt, cq->id);
691 745 /* Subscribe to Link State and Group 5 Events(bits 1 and 5 set) */
746 req->async_event_bitmap[0] |= 0x00000022;
692 be_dws_cpu_to_le(ctxt, sizeof(req->context)); 747 be_dws_cpu_to_le(ctxt, sizeof(req->context));
693 748
694 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem); 749 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
@@ -754,7 +809,7 @@ int be_cmd_txq_create(struct be_adapter *adapter,
754/* Uses mbox */ 809/* Uses mbox */
755int be_cmd_rxq_create(struct be_adapter *adapter, 810int be_cmd_rxq_create(struct be_adapter *adapter,
756 struct be_queue_info *rxq, u16 cq_id, u16 frag_size, 811 struct be_queue_info *rxq, u16 cq_id, u16 frag_size,
757 u16 max_frame_size, u32 if_id, u32 rss) 812 u16 max_frame_size, u32 if_id, u32 rss, u8 *rss_id)
758{ 813{
759 struct be_mcc_wrb *wrb; 814 struct be_mcc_wrb *wrb;
760 struct be_cmd_req_eth_rx_create *req; 815 struct be_cmd_req_eth_rx_create *req;
@@ -785,6 +840,7 @@ int be_cmd_rxq_create(struct be_adapter *adapter,
785 struct be_cmd_resp_eth_rx_create *resp = embedded_payload(wrb); 840 struct be_cmd_resp_eth_rx_create *resp = embedded_payload(wrb);
786 rxq->id = le16_to_cpu(resp->id); 841 rxq->id = le16_to_cpu(resp->id);
787 rxq->created = true; 842 rxq->created = true;
843 *rss_id = resp->rss_id;
788 } 844 }
789 845
790 spin_unlock(&adapter->mbox_lock); 846 spin_unlock(&adapter->mbox_lock);
@@ -1259,7 +1315,8 @@ err:
1259} 1315}
1260 1316
1261/* Uses mbox */ 1317/* Uses mbox */
1262int be_cmd_query_fw_cfg(struct be_adapter *adapter, u32 *port_num, u32 *mode) 1318int be_cmd_query_fw_cfg(struct be_adapter *adapter, u32 *port_num,
1319 u32 *mode, u32 *caps)
1263{ 1320{
1264 struct be_mcc_wrb *wrb; 1321 struct be_mcc_wrb *wrb;
1265 struct be_cmd_req_query_fw_cfg *req; 1322 struct be_cmd_req_query_fw_cfg *req;
@@ -1281,6 +1338,7 @@ int be_cmd_query_fw_cfg(struct be_adapter *adapter, u32 *port_num, u32 *mode)
1281 struct be_cmd_resp_query_fw_cfg *resp = embedded_payload(wrb); 1338 struct be_cmd_resp_query_fw_cfg *resp = embedded_payload(wrb);
1282 *port_num = le32_to_cpu(resp->phys_port); 1339 *port_num = le32_to_cpu(resp->phys_port);
1283 *mode = le32_to_cpu(resp->function_mode); 1340 *mode = le32_to_cpu(resp->function_mode);
1341 *caps = le32_to_cpu(resp->function_caps);
1284 } 1342 }
1285 1343
1286 spin_unlock(&adapter->mbox_lock); 1344 spin_unlock(&adapter->mbox_lock);
@@ -1311,6 +1369,37 @@ int be_cmd_reset_function(struct be_adapter *adapter)
1311 return status; 1369 return status;
1312} 1370}
1313 1371
1372int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable, u16 table_size)
1373{
1374 struct be_mcc_wrb *wrb;
1375 struct be_cmd_req_rss_config *req;
1376 u32 myhash[10];
1377 int status;
1378
1379 spin_lock(&adapter->mbox_lock);
1380
1381 wrb = wrb_from_mbox(adapter);
1382 req = embedded_payload(wrb);
1383
1384 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
1385 OPCODE_ETH_RSS_CONFIG);
1386
1387 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
1388 OPCODE_ETH_RSS_CONFIG, sizeof(*req));
1389
1390 req->if_id = cpu_to_le32(adapter->if_handle);
1391 req->enable_rss = cpu_to_le16(RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4);
1392 req->cpu_table_size_log2 = cpu_to_le16(fls(table_size) - 1);
1393 memcpy(req->cpu_table, rsstable, table_size);
1394 memcpy(req->hash, myhash, sizeof(myhash));
1395 be_dws_cpu_to_le(req->hash, sizeof(req->hash));
1396
1397 status = be_mbox_notify_wait(adapter);
1398
1399 spin_unlock(&adapter->mbox_lock);
1400 return status;
1401}
1402
1314/* Uses sync mcc */ 1403/* Uses sync mcc */
1315int be_cmd_set_beacon_state(struct be_adapter *adapter, u8 port_num, 1404int be_cmd_set_beacon_state(struct be_adapter *adapter, u8 port_num,
1316 u8 bcn, u8 sts, u8 state) 1405 u8 bcn, u8 sts, u8 state)
diff --git a/drivers/net/benet/be_cmds.h b/drivers/net/benet/be_cmds.h
index ad1e6fac60c5..c7f6cdfe1c73 100644
--- a/drivers/net/benet/be_cmds.h
+++ b/drivers/net/benet/be_cmds.h
@@ -82,7 +82,12 @@ struct be_mcc_compl {
82 */ 82 */
83#define ASYNC_TRAILER_EVENT_CODE_SHIFT 8 /* bits 8 - 15 */ 83#define ASYNC_TRAILER_EVENT_CODE_SHIFT 8 /* bits 8 - 15 */
84#define ASYNC_TRAILER_EVENT_CODE_MASK 0xFF 84#define ASYNC_TRAILER_EVENT_CODE_MASK 0xFF
85#define ASYNC_TRAILER_EVENT_TYPE_SHIFT 16
86#define ASYNC_TRAILER_EVENT_TYPE_MASK 0xFF
85#define ASYNC_EVENT_CODE_LINK_STATE 0x1 87#define ASYNC_EVENT_CODE_LINK_STATE 0x1
88#define ASYNC_EVENT_CODE_GRP_5 0x5
89#define ASYNC_EVENT_QOS_SPEED 0x1
90#define ASYNC_EVENT_COS_PRIORITY 0x2
86struct be_async_event_trailer { 91struct be_async_event_trailer {
87 u32 code; 92 u32 code;
88}; 93};
@@ -105,6 +110,30 @@ struct be_async_event_link_state {
105 struct be_async_event_trailer trailer; 110 struct be_async_event_trailer trailer;
106} __packed; 111} __packed;
107 112
113/* When the event code of an async trailer is GRP-5 and event_type is QOS_SPEED
114 * the mcc_compl must be interpreted as follows
115 */
116struct be_async_event_grp5_qos_link_speed {
117 u8 physical_port;
118 u8 rsvd[5];
119 u16 qos_link_speed;
120 u32 event_tag;
121 struct be_async_event_trailer trailer;
122} __packed;
123
124/* When the event code of an async trailer is GRP5 and event type is
125 * CoS-Priority, the mcc_compl must be interpreted as follows
126 */
127struct be_async_event_grp5_cos_priority {
128 u8 physical_port;
129 u8 available_priority_bmap;
130 u8 reco_default_priority;
131 u8 valid;
132 u8 rsvd0;
133 u8 event_tag;
134 struct be_async_event_trailer trailer;
135} __packed;
136
108struct be_mcc_mailbox { 137struct be_mcc_mailbox {
109 struct be_mcc_wrb wrb; 138 struct be_mcc_wrb wrb;
110 struct be_mcc_compl compl; 139 struct be_mcc_compl compl;
@@ -123,8 +152,9 @@ struct be_mcc_mailbox {
123#define OPCODE_COMMON_WRITE_FLASHROM 7 152#define OPCODE_COMMON_WRITE_FLASHROM 7
124#define OPCODE_COMMON_CQ_CREATE 12 153#define OPCODE_COMMON_CQ_CREATE 12
125#define OPCODE_COMMON_EQ_CREATE 13 154#define OPCODE_COMMON_EQ_CREATE 13
126#define OPCODE_COMMON_MCC_CREATE 21 155#define OPCODE_COMMON_MCC_CREATE 21
127#define OPCODE_COMMON_SET_QOS 28 156#define OPCODE_COMMON_SET_QOS 28
157#define OPCODE_COMMON_MCC_CREATE_EXT 90
128#define OPCODE_COMMON_SEEPROM_READ 30 158#define OPCODE_COMMON_SEEPROM_READ 30
129#define OPCODE_COMMON_NTWK_RX_FILTER 34 159#define OPCODE_COMMON_NTWK_RX_FILTER 34
130#define OPCODE_COMMON_GET_FW_VERSION 35 160#define OPCODE_COMMON_GET_FW_VERSION 35
@@ -147,6 +177,7 @@ struct be_mcc_mailbox {
147#define OPCODE_COMMON_READ_TRANSRECV_DATA 73 177#define OPCODE_COMMON_READ_TRANSRECV_DATA 73
148#define OPCODE_COMMON_GET_PHY_DETAILS 102 178#define OPCODE_COMMON_GET_PHY_DETAILS 102
149 179
180#define OPCODE_ETH_RSS_CONFIG 1
150#define OPCODE_ETH_ACPI_CONFIG 2 181#define OPCODE_ETH_ACPI_CONFIG 2
151#define OPCODE_ETH_PROMISCUOUS 3 182#define OPCODE_ETH_PROMISCUOUS 3
152#define OPCODE_ETH_GET_STATISTICS 4 183#define OPCODE_ETH_GET_STATISTICS 4
@@ -337,6 +368,7 @@ struct be_cmd_req_mcc_create {
337 struct be_cmd_req_hdr hdr; 368 struct be_cmd_req_hdr hdr;
338 u16 num_pages; 369 u16 num_pages;
339 u16 rsvd0; 370 u16 rsvd0;
371 u32 async_event_bitmap[1];
340 u8 context[sizeof(struct amap_mcc_context) / 8]; 372 u8 context[sizeof(struct amap_mcc_context) / 8];
341 struct phys_addr pages[8]; 373 struct phys_addr pages[8];
342} __packed; 374} __packed;
@@ -409,7 +441,7 @@ struct be_cmd_req_eth_rx_create {
409struct be_cmd_resp_eth_rx_create { 441struct be_cmd_resp_eth_rx_create {
410 struct be_cmd_resp_hdr hdr; 442 struct be_cmd_resp_hdr hdr;
411 u16 id; 443 u16 id;
412 u8 cpu_id; 444 u8 rss_id;
413 u8 rsvd0; 445 u8 rsvd0;
414} __packed; 446} __packed;
415 447
@@ -739,9 +771,10 @@ struct be_cmd_resp_modify_eq_delay {
739} __packed; 771} __packed;
740 772
741/******************** Get FW Config *******************/ 773/******************** Get FW Config *******************/
774#define BE_FUNCTION_CAPS_RSS 0x2
742struct be_cmd_req_query_fw_cfg { 775struct be_cmd_req_query_fw_cfg {
743 struct be_cmd_req_hdr hdr; 776 struct be_cmd_req_hdr hdr;
744 u32 rsvd[30]; 777 u32 rsvd[31];
745}; 778};
746 779
747struct be_cmd_resp_query_fw_cfg { 780struct be_cmd_resp_query_fw_cfg {
@@ -751,6 +784,26 @@ struct be_cmd_resp_query_fw_cfg {
751 u32 phys_port; 784 u32 phys_port;
752 u32 function_mode; 785 u32 function_mode;
753 u32 rsvd[26]; 786 u32 rsvd[26];
787 u32 function_caps;
788};
789
790/******************** RSS Config *******************/
791/* RSS types */
792#define RSS_ENABLE_NONE 0x0
793#define RSS_ENABLE_IPV4 0x1
794#define RSS_ENABLE_TCP_IPV4 0x2
795#define RSS_ENABLE_IPV6 0x4
796#define RSS_ENABLE_TCP_IPV6 0x8
797
798struct be_cmd_req_rss_config {
799 struct be_cmd_req_hdr hdr;
800 u32 if_id;
801 u16 enable_rss;
802 u16 cpu_table_size_log2;
803 u32 hash[10];
804 u8 cpu_table[128];
805 u8 flush;
806 u8 rsvd0[3];
754}; 807};
755 808
756/******************** Port Beacon ***************************/ 809/******************** Port Beacon ***************************/
@@ -937,7 +990,7 @@ extern int be_cmd_txq_create(struct be_adapter *adapter,
937extern int be_cmd_rxq_create(struct be_adapter *adapter, 990extern int be_cmd_rxq_create(struct be_adapter *adapter,
938 struct be_queue_info *rxq, u16 cq_id, 991 struct be_queue_info *rxq, u16 cq_id,
939 u16 frag_size, u16 max_frame_size, u32 if_id, 992 u16 frag_size, u16 max_frame_size, u32 if_id,
940 u32 rss); 993 u32 rss, u8 *rss_id);
941extern int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q, 994extern int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q,
942 int type); 995 int type);
943extern int be_cmd_link_status_query(struct be_adapter *adapter, 996extern int be_cmd_link_status_query(struct be_adapter *adapter,
@@ -960,8 +1013,10 @@ extern int be_cmd_set_flow_control(struct be_adapter *adapter,
960extern int be_cmd_get_flow_control(struct be_adapter *adapter, 1013extern int be_cmd_get_flow_control(struct be_adapter *adapter,
961 u32 *tx_fc, u32 *rx_fc); 1014 u32 *tx_fc, u32 *rx_fc);
962extern int be_cmd_query_fw_cfg(struct be_adapter *adapter, 1015extern int be_cmd_query_fw_cfg(struct be_adapter *adapter,
963 u32 *port_num, u32 *cap); 1016 u32 *port_num, u32 *function_mode, u32 *function_caps);
964extern int be_cmd_reset_function(struct be_adapter *adapter); 1017extern int be_cmd_reset_function(struct be_adapter *adapter);
1018extern int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable,
1019 u16 table_size);
965extern int be_process_mcc(struct be_adapter *adapter, int *status); 1020extern int be_process_mcc(struct be_adapter *adapter, int *status);
966extern int be_cmd_set_beacon_state(struct be_adapter *adapter, 1021extern int be_cmd_set_beacon_state(struct be_adapter *adapter,
967 u8 port_num, u8 beacon, u8 status, u8 state); 1022 u8 port_num, u8 beacon, u8 status, u8 state);
diff --git a/drivers/net/benet/be_ethtool.c b/drivers/net/benet/be_ethtool.c
index d92063420c25..0f46366ecc48 100644
--- a/drivers/net/benet/be_ethtool.c
+++ b/drivers/net/benet/be_ethtool.c
@@ -26,14 +26,16 @@ struct be_ethtool_stat {
26 int offset; 26 int offset;
27}; 27};
28 28
29enum {NETSTAT, PORTSTAT, MISCSTAT, DRVSTAT, ERXSTAT}; 29enum {NETSTAT, PORTSTAT, MISCSTAT, DRVSTAT_TX, DRVSTAT_RX, ERXSTAT};
30#define FIELDINFO(_struct, field) FIELD_SIZEOF(_struct, field), \ 30#define FIELDINFO(_struct, field) FIELD_SIZEOF(_struct, field), \
31 offsetof(_struct, field) 31 offsetof(_struct, field)
32#define NETSTAT_INFO(field) #field, NETSTAT,\ 32#define NETSTAT_INFO(field) #field, NETSTAT,\
33 FIELDINFO(struct net_device_stats,\ 33 FIELDINFO(struct net_device_stats,\
34 field) 34 field)
35#define DRVSTAT_INFO(field) #field, DRVSTAT,\ 35#define DRVSTAT_TX_INFO(field) #field, DRVSTAT_TX,\
36 FIELDINFO(struct be_drvr_stats, field) 36 FIELDINFO(struct be_tx_stats, field)
37#define DRVSTAT_RX_INFO(field) #field, DRVSTAT_RX,\
38 FIELDINFO(struct be_rx_stats, field)
37#define MISCSTAT_INFO(field) #field, MISCSTAT,\ 39#define MISCSTAT_INFO(field) #field, MISCSTAT,\
38 FIELDINFO(struct be_rxf_stats, field) 40 FIELDINFO(struct be_rxf_stats, field)
39#define PORTSTAT_INFO(field) #field, PORTSTAT,\ 41#define PORTSTAT_INFO(field) #field, PORTSTAT,\
@@ -51,21 +53,12 @@ static const struct be_ethtool_stat et_stats[] = {
51 {NETSTAT_INFO(tx_errors)}, 53 {NETSTAT_INFO(tx_errors)},
52 {NETSTAT_INFO(rx_dropped)}, 54 {NETSTAT_INFO(rx_dropped)},
53 {NETSTAT_INFO(tx_dropped)}, 55 {NETSTAT_INFO(tx_dropped)},
54 {DRVSTAT_INFO(be_tx_reqs)}, 56 {DRVSTAT_TX_INFO(be_tx_rate)},
55 {DRVSTAT_INFO(be_tx_stops)}, 57 {DRVSTAT_TX_INFO(be_tx_reqs)},
56 {DRVSTAT_INFO(be_fwd_reqs)}, 58 {DRVSTAT_TX_INFO(be_tx_wrbs)},
57 {DRVSTAT_INFO(be_tx_wrbs)}, 59 {DRVSTAT_TX_INFO(be_tx_stops)},
58 {DRVSTAT_INFO(be_rx_polls)}, 60 {DRVSTAT_TX_INFO(be_tx_events)},
59 {DRVSTAT_INFO(be_tx_events)}, 61 {DRVSTAT_TX_INFO(be_tx_compl)},
60 {DRVSTAT_INFO(be_rx_events)},
61 {DRVSTAT_INFO(be_tx_compl)},
62 {DRVSTAT_INFO(be_rx_compl)},
63 {DRVSTAT_INFO(be_rx_mcast_pkt)},
64 {DRVSTAT_INFO(be_ethrx_post_fail)},
65 {DRVSTAT_INFO(be_802_3_dropped_frames)},
66 {DRVSTAT_INFO(be_802_3_malformed_frames)},
67 {DRVSTAT_INFO(be_tx_rate)},
68 {DRVSTAT_INFO(be_rx_rate)},
69 {PORTSTAT_INFO(rx_unicast_frames)}, 62 {PORTSTAT_INFO(rx_unicast_frames)},
70 {PORTSTAT_INFO(rx_multicast_frames)}, 63 {PORTSTAT_INFO(rx_multicast_frames)},
71 {PORTSTAT_INFO(rx_broadcast_frames)}, 64 {PORTSTAT_INFO(rx_broadcast_frames)},
@@ -106,11 +99,24 @@ static const struct be_ethtool_stat et_stats[] = {
106 {MISCSTAT_INFO(rx_drops_too_many_frags)}, 99 {MISCSTAT_INFO(rx_drops_too_many_frags)},
107 {MISCSTAT_INFO(rx_drops_invalid_ring)}, 100 {MISCSTAT_INFO(rx_drops_invalid_ring)},
108 {MISCSTAT_INFO(forwarded_packets)}, 101 {MISCSTAT_INFO(forwarded_packets)},
109 {MISCSTAT_INFO(rx_drops_mtu)}, 102 {MISCSTAT_INFO(rx_drops_mtu)}
110 {ERXSTAT_INFO(rx_drops_no_fragments)},
111}; 103};
112#define ETHTOOL_STATS_NUM ARRAY_SIZE(et_stats) 104#define ETHTOOL_STATS_NUM ARRAY_SIZE(et_stats)
113 105
106/* Stats related to multi RX queues */
107static const struct be_ethtool_stat et_rx_stats[] = {
108 {DRVSTAT_RX_INFO(rx_bytes)},
109 {DRVSTAT_RX_INFO(rx_pkts)},
110 {DRVSTAT_RX_INFO(rx_rate)},
111 {DRVSTAT_RX_INFO(rx_polls)},
112 {DRVSTAT_RX_INFO(rx_events)},
113 {DRVSTAT_RX_INFO(rx_compl)},
114 {DRVSTAT_RX_INFO(rx_mcast_pkts)},
115 {DRVSTAT_RX_INFO(rx_post_fail)},
116 {ERXSTAT_INFO(rx_drops_no_fragments)}
117};
118#define ETHTOOL_RXSTATS_NUM (ARRAY_SIZE(et_rx_stats))
119
114static const char et_self_tests[][ETH_GSTRING_LEN] = { 120static const char et_self_tests[][ETH_GSTRING_LEN] = {
115 "MAC Loopback test", 121 "MAC Loopback test",
116 "PHY Loopback test", 122 "PHY Loopback test",
@@ -143,7 +149,7 @@ static int
143be_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *coalesce) 149be_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *coalesce)
144{ 150{
145 struct be_adapter *adapter = netdev_priv(netdev); 151 struct be_adapter *adapter = netdev_priv(netdev);
146 struct be_eq_obj *rx_eq = &adapter->rx_eq; 152 struct be_eq_obj *rx_eq = &adapter->rx_obj[0].rx_eq;
147 struct be_eq_obj *tx_eq = &adapter->tx_eq; 153 struct be_eq_obj *tx_eq = &adapter->tx_eq;
148 154
149 coalesce->rx_coalesce_usecs = rx_eq->cur_eqd; 155 coalesce->rx_coalesce_usecs = rx_eq->cur_eqd;
@@ -167,25 +173,49 @@ static int
167be_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *coalesce) 173be_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *coalesce)
168{ 174{
169 struct be_adapter *adapter = netdev_priv(netdev); 175 struct be_adapter *adapter = netdev_priv(netdev);
170 struct be_eq_obj *rx_eq = &adapter->rx_eq; 176 struct be_rx_obj *rxo;
177 struct be_eq_obj *rx_eq;
171 struct be_eq_obj *tx_eq = &adapter->tx_eq; 178 struct be_eq_obj *tx_eq = &adapter->tx_eq;
172 u32 tx_max, tx_min, tx_cur; 179 u32 tx_max, tx_min, tx_cur;
173 u32 rx_max, rx_min, rx_cur; 180 u32 rx_max, rx_min, rx_cur;
174 int status = 0; 181 int status = 0, i;
175 182
176 if (coalesce->use_adaptive_tx_coalesce == 1) 183 if (coalesce->use_adaptive_tx_coalesce == 1)
177 return -EINVAL; 184 return -EINVAL;
178 185
179 /* if AIC is being turned on now, start with an EQD of 0 */ 186 for_all_rx_queues(adapter, rxo, i) {
180 if (rx_eq->enable_aic == 0 && 187 rx_eq = &rxo->rx_eq;
181 coalesce->use_adaptive_rx_coalesce == 1) { 188
182 rx_eq->cur_eqd = 0; 189 if (!rx_eq->enable_aic && coalesce->use_adaptive_rx_coalesce)
190 rx_eq->cur_eqd = 0;
191 rx_eq->enable_aic = coalesce->use_adaptive_rx_coalesce;
192
193 rx_max = coalesce->rx_coalesce_usecs_high;
194 rx_min = coalesce->rx_coalesce_usecs_low;
195 rx_cur = coalesce->rx_coalesce_usecs;
196
197 if (rx_eq->enable_aic) {
198 if (rx_max > BE_MAX_EQD)
199 rx_max = BE_MAX_EQD;
200 if (rx_min > rx_max)
201 rx_min = rx_max;
202 rx_eq->max_eqd = rx_max;
203 rx_eq->min_eqd = rx_min;
204 if (rx_eq->cur_eqd > rx_max)
205 rx_eq->cur_eqd = rx_max;
206 if (rx_eq->cur_eqd < rx_min)
207 rx_eq->cur_eqd = rx_min;
208 } else {
209 if (rx_cur > BE_MAX_EQD)
210 rx_cur = BE_MAX_EQD;
211 if (rx_eq->cur_eqd != rx_cur) {
212 status = be_cmd_modify_eqd(adapter, rx_eq->q.id,
213 rx_cur);
214 if (!status)
215 rx_eq->cur_eqd = rx_cur;
216 }
217 }
183 } 218 }
184 rx_eq->enable_aic = coalesce->use_adaptive_rx_coalesce;
185
186 rx_max = coalesce->rx_coalesce_usecs_high;
187 rx_min = coalesce->rx_coalesce_usecs_low;
188 rx_cur = coalesce->rx_coalesce_usecs;
189 219
190 tx_max = coalesce->tx_coalesce_usecs_high; 220 tx_max = coalesce->tx_coalesce_usecs_high;
191 tx_min = coalesce->tx_coalesce_usecs_low; 221 tx_min = coalesce->tx_coalesce_usecs_low;
@@ -199,27 +229,6 @@ be_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *coalesce)
199 tx_eq->cur_eqd = tx_cur; 229 tx_eq->cur_eqd = tx_cur;
200 } 230 }
201 231
202 if (rx_eq->enable_aic) {
203 if (rx_max > BE_MAX_EQD)
204 rx_max = BE_MAX_EQD;
205 if (rx_min > rx_max)
206 rx_min = rx_max;
207 rx_eq->max_eqd = rx_max;
208 rx_eq->min_eqd = rx_min;
209 if (rx_eq->cur_eqd > rx_max)
210 rx_eq->cur_eqd = rx_max;
211 if (rx_eq->cur_eqd < rx_min)
212 rx_eq->cur_eqd = rx_min;
213 } else {
214 if (rx_cur > BE_MAX_EQD)
215 rx_cur = BE_MAX_EQD;
216 if (rx_eq->cur_eqd != rx_cur) {
217 status = be_cmd_modify_eqd(adapter, rx_eq->q.id,
218 rx_cur);
219 if (!status)
220 rx_eq->cur_eqd = rx_cur;
221 }
222 }
223 return 0; 232 return 0;
224} 233}
225 234
@@ -247,32 +256,25 @@ be_get_ethtool_stats(struct net_device *netdev,
247 struct ethtool_stats *stats, uint64_t *data) 256 struct ethtool_stats *stats, uint64_t *data)
248{ 257{
249 struct be_adapter *adapter = netdev_priv(netdev); 258 struct be_adapter *adapter = netdev_priv(netdev);
250 struct be_drvr_stats *drvr_stats = &adapter->stats.drvr_stats; 259 struct be_hw_stats *hw_stats = hw_stats_from_cmd(adapter->stats_cmd.va);
251 struct be_hw_stats *hw_stats = hw_stats_from_cmd(adapter->stats.cmd.va);
252 struct be_rxf_stats *rxf_stats = &hw_stats->rxf;
253 struct be_port_rxf_stats *port_stats =
254 &rxf_stats->port[adapter->port_num];
255 struct net_device_stats *net_stats = &netdev->stats;
256 struct be_erx_stats *erx_stats = &hw_stats->erx; 260 struct be_erx_stats *erx_stats = &hw_stats->erx;
261 struct be_rx_obj *rxo;
257 void *p = NULL; 262 void *p = NULL;
258 int i; 263 int i, j;
259 264
260 for (i = 0; i < ETHTOOL_STATS_NUM; i++) { 265 for (i = 0; i < ETHTOOL_STATS_NUM; i++) {
261 switch (et_stats[i].type) { 266 switch (et_stats[i].type) {
262 case NETSTAT: 267 case NETSTAT:
263 p = net_stats; 268 p = &netdev->stats;
264 break; 269 break;
265 case DRVSTAT: 270 case DRVSTAT_TX:
266 p = drvr_stats; 271 p = &adapter->tx_stats;
267 break; 272 break;
268 case PORTSTAT: 273 case PORTSTAT:
269 p = port_stats; 274 p = &hw_stats->rxf.port[adapter->port_num];
270 break; 275 break;
271 case MISCSTAT: 276 case MISCSTAT:
272 p = rxf_stats; 277 p = &hw_stats->rxf;
273 break;
274 case ERXSTAT: /* Currently only one ERX stat is provided */
275 p = (u32 *)erx_stats + adapter->rx_obj.q.id;
276 break; 278 break;
277 } 279 }
278 280
@@ -280,19 +282,44 @@ be_get_ethtool_stats(struct net_device *netdev,
280 data[i] = (et_stats[i].size == sizeof(u64)) ? 282 data[i] = (et_stats[i].size == sizeof(u64)) ?
281 *(u64 *)p: *(u32 *)p; 283 *(u64 *)p: *(u32 *)p;
282 } 284 }
285
286 for_all_rx_queues(adapter, rxo, j) {
287 for (i = 0; i < ETHTOOL_RXSTATS_NUM; i++) {
288 switch (et_rx_stats[i].type) {
289 case DRVSTAT_RX:
290 p = (u8 *)&rxo->stats + et_rx_stats[i].offset;
291 break;
292 case ERXSTAT:
293 p = (u32 *)erx_stats + rxo->q.id;
294 break;
295 }
296 data[ETHTOOL_STATS_NUM + j * ETHTOOL_RXSTATS_NUM + i] =
297 (et_rx_stats[i].size == sizeof(u64)) ?
298 *(u64 *)p: *(u32 *)p;
299 }
300 }
283} 301}
284 302
285static void 303static void
286be_get_stat_strings(struct net_device *netdev, uint32_t stringset, 304be_get_stat_strings(struct net_device *netdev, uint32_t stringset,
287 uint8_t *data) 305 uint8_t *data)
288{ 306{
289 int i; 307 struct be_adapter *adapter = netdev_priv(netdev);
308 int i, j;
309
290 switch (stringset) { 310 switch (stringset) {
291 case ETH_SS_STATS: 311 case ETH_SS_STATS:
292 for (i = 0; i < ETHTOOL_STATS_NUM; i++) { 312 for (i = 0; i < ETHTOOL_STATS_NUM; i++) {
293 memcpy(data, et_stats[i].desc, ETH_GSTRING_LEN); 313 memcpy(data, et_stats[i].desc, ETH_GSTRING_LEN);
294 data += ETH_GSTRING_LEN; 314 data += ETH_GSTRING_LEN;
295 } 315 }
316 for (i = 0; i < adapter->num_rx_qs; i++) {
317 for (j = 0; j < ETHTOOL_RXSTATS_NUM; j++) {
318 sprintf(data, "rxq%d: %s", i,
319 et_rx_stats[j].desc);
320 data += ETH_GSTRING_LEN;
321 }
322 }
296 break; 323 break;
297 case ETH_SS_TEST: 324 case ETH_SS_TEST:
298 for (i = 0; i < ETHTOOL_TESTS_NUM; i++) { 325 for (i = 0; i < ETHTOOL_TESTS_NUM; i++) {
@@ -305,11 +332,14 @@ be_get_stat_strings(struct net_device *netdev, uint32_t stringset,
305 332
306static int be_get_sset_count(struct net_device *netdev, int stringset) 333static int be_get_sset_count(struct net_device *netdev, int stringset)
307{ 334{
335 struct be_adapter *adapter = netdev_priv(netdev);
336
308 switch (stringset) { 337 switch (stringset) {
309 case ETH_SS_TEST: 338 case ETH_SS_TEST:
310 return ETHTOOL_TESTS_NUM; 339 return ETHTOOL_TESTS_NUM;
311 case ETH_SS_STATS: 340 case ETH_SS_STATS:
312 return ETHTOOL_STATS_NUM; 341 return ETHTOOL_STATS_NUM +
342 adapter->num_rx_qs * ETHTOOL_RXSTATS_NUM;
313 default: 343 default:
314 return -EINVAL; 344 return -EINVAL;
315 } 345 }
@@ -424,10 +454,10 @@ be_get_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring)
424{ 454{
425 struct be_adapter *adapter = netdev_priv(netdev); 455 struct be_adapter *adapter = netdev_priv(netdev);
426 456
427 ring->rx_max_pending = adapter->rx_obj.q.len; 457 ring->rx_max_pending = adapter->rx_obj[0].q.len;
428 ring->tx_max_pending = adapter->tx_obj.q.len; 458 ring->tx_max_pending = adapter->tx_obj.q.len;
429 459
430 ring->rx_pending = atomic_read(&adapter->rx_obj.q.used); 460 ring->rx_pending = atomic_read(&adapter->rx_obj[0].q.used);
431 ring->tx_pending = atomic_read(&adapter->tx_obj.q.used); 461 ring->tx_pending = atomic_read(&adapter->tx_obj.q.used);
432} 462}
433 463
diff --git a/drivers/net/benet/be_main.c b/drivers/net/benet/be_main.c
index 43a3a574e2e0..45b1f6635282 100644
--- a/drivers/net/benet/be_main.c
+++ b/drivers/net/benet/be_main.c
@@ -32,6 +32,10 @@ module_param(num_vfs, uint, S_IRUGO);
32MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data."); 32MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
33MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize"); 33MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
34 34
35static bool multi_rxq = true;
36module_param(multi_rxq, bool, S_IRUGO | S_IWUSR);
37MODULE_PARM_DESC(multi_rxq, "Multi Rx Queue support. Enabled by default");
38
35static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = { 39static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
36 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) }, 40 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
37 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) }, 41 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
@@ -111,6 +115,11 @@ static char *ue_status_hi_desc[] = {
111 "Unknown" 115 "Unknown"
112}; 116};
113 117
118static inline bool be_multi_rxq(struct be_adapter *adapter)
119{
120 return (adapter->num_rx_qs > 1);
121}
122
114static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q) 123static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
115{ 124{
116 struct be_dma_mem *mem = &q->dma_mem; 125 struct be_dma_mem *mem = &q->dma_mem;
@@ -236,18 +245,27 @@ netdev_addr:
236 245
237void netdev_stats_update(struct be_adapter *adapter) 246void netdev_stats_update(struct be_adapter *adapter)
238{ 247{
239 struct be_hw_stats *hw_stats = hw_stats_from_cmd(adapter->stats.cmd.va); 248 struct be_hw_stats *hw_stats = hw_stats_from_cmd(adapter->stats_cmd.va);
240 struct be_rxf_stats *rxf_stats = &hw_stats->rxf; 249 struct be_rxf_stats *rxf_stats = &hw_stats->rxf;
241 struct be_port_rxf_stats *port_stats = 250 struct be_port_rxf_stats *port_stats =
242 &rxf_stats->port[adapter->port_num]; 251 &rxf_stats->port[adapter->port_num];
243 struct net_device_stats *dev_stats = &adapter->netdev->stats; 252 struct net_device_stats *dev_stats = &adapter->netdev->stats;
244 struct be_erx_stats *erx_stats = &hw_stats->erx; 253 struct be_erx_stats *erx_stats = &hw_stats->erx;
254 struct be_rx_obj *rxo;
255 int i;
245 256
246 dev_stats->rx_packets = drvr_stats(adapter)->be_rx_pkts; 257 memset(dev_stats, 0, sizeof(*dev_stats));
247 dev_stats->tx_packets = drvr_stats(adapter)->be_tx_pkts; 258 for_all_rx_queues(adapter, rxo, i) {
248 dev_stats->rx_bytes = drvr_stats(adapter)->be_rx_bytes; 259 dev_stats->rx_packets += rx_stats(rxo)->rx_pkts;
249 dev_stats->tx_bytes = drvr_stats(adapter)->be_tx_bytes; 260 dev_stats->rx_bytes += rx_stats(rxo)->rx_bytes;
250 dev_stats->multicast = drvr_stats(adapter)->be_rx_mcast_pkt; 261 dev_stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
262 /* no space in linux buffers: best possible approximation */
263 dev_stats->rx_dropped +=
264 erx_stats->rx_drops_no_fragments[rxo->q.id];
265 }
266
267 dev_stats->tx_packets = tx_stats(adapter)->be_tx_pkts;
268 dev_stats->tx_bytes = tx_stats(adapter)->be_tx_bytes;
251 269
252 /* bad pkts received */ 270 /* bad pkts received */
253 dev_stats->rx_errors = port_stats->rx_crc_errors + 271 dev_stats->rx_errors = port_stats->rx_crc_errors +
@@ -264,18 +282,11 @@ void netdev_stats_update(struct be_adapter *adapter)
264 port_stats->rx_ip_checksum_errs + 282 port_stats->rx_ip_checksum_errs +
265 port_stats->rx_udp_checksum_errs; 283 port_stats->rx_udp_checksum_errs;
266 284
267 /* no space in linux buffers: best possible approximation */
268 dev_stats->rx_dropped =
269 erx_stats->rx_drops_no_fragments[adapter->rx_obj.q.id];
270
271 /* detailed rx errors */ 285 /* detailed rx errors */
272 dev_stats->rx_length_errors = port_stats->rx_in_range_errors + 286 dev_stats->rx_length_errors = port_stats->rx_in_range_errors +
273 port_stats->rx_out_range_errors + 287 port_stats->rx_out_range_errors +
274 port_stats->rx_frame_too_long; 288 port_stats->rx_frame_too_long;
275 289
276 /* receive ring buffer overflow */
277 dev_stats->rx_over_errors = 0;
278
279 dev_stats->rx_crc_errors = port_stats->rx_crc_errors; 290 dev_stats->rx_crc_errors = port_stats->rx_crc_errors;
280 291
281 /* frame alignment errors */ 292 /* frame alignment errors */
@@ -286,23 +297,6 @@ void netdev_stats_update(struct be_adapter *adapter)
286 dev_stats->rx_fifo_errors = port_stats->rx_fifo_overflow + 297 dev_stats->rx_fifo_errors = port_stats->rx_fifo_overflow +
287 port_stats->rx_input_fifo_overflow + 298 port_stats->rx_input_fifo_overflow +
288 rxf_stats->rx_drops_no_pbuf; 299 rxf_stats->rx_drops_no_pbuf;
289 /* receiver missed packetd */
290 dev_stats->rx_missed_errors = 0;
291
292 /* packet transmit problems */
293 dev_stats->tx_errors = 0;
294
295 /* no space available in linux */
296 dev_stats->tx_dropped = 0;
297
298 dev_stats->collisions = 0;
299
300 /* detailed tx_errors */
301 dev_stats->tx_aborted_errors = 0;
302 dev_stats->tx_carrier_errors = 0;
303 dev_stats->tx_fifo_errors = 0;
304 dev_stats->tx_heartbeat_errors = 0;
305 dev_stats->tx_window_errors = 0;
306} 300}
307 301
308void be_link_status_update(struct be_adapter *adapter, bool link_up) 302void be_link_status_update(struct be_adapter *adapter, bool link_up)
@@ -326,10 +320,10 @@ void be_link_status_update(struct be_adapter *adapter, bool link_up)
326} 320}
327 321
328/* Update the EQ delay n BE based on the RX frags consumed / sec */ 322/* Update the EQ delay n BE based on the RX frags consumed / sec */
329static void be_rx_eqd_update(struct be_adapter *adapter) 323static void be_rx_eqd_update(struct be_adapter *adapter, struct be_rx_obj *rxo)
330{ 324{
331 struct be_eq_obj *rx_eq = &adapter->rx_eq; 325 struct be_eq_obj *rx_eq = &rxo->rx_eq;
332 struct be_drvr_stats *stats = &adapter->stats.drvr_stats; 326 struct be_rx_stats *stats = &rxo->stats;
333 ulong now = jiffies; 327 ulong now = jiffies;
334 u32 eqd; 328 u32 eqd;
335 329
@@ -346,12 +340,12 @@ static void be_rx_eqd_update(struct be_adapter *adapter)
346 if ((now - stats->rx_fps_jiffies) < HZ) 340 if ((now - stats->rx_fps_jiffies) < HZ)
347 return; 341 return;
348 342
349 stats->be_rx_fps = (stats->be_rx_frags - stats->be_prev_rx_frags) / 343 stats->rx_fps = (stats->rx_frags - stats->prev_rx_frags) /
350 ((now - stats->rx_fps_jiffies) / HZ); 344 ((now - stats->rx_fps_jiffies) / HZ);
351 345
352 stats->rx_fps_jiffies = now; 346 stats->rx_fps_jiffies = now;
353 stats->be_prev_rx_frags = stats->be_rx_frags; 347 stats->prev_rx_frags = stats->rx_frags;
354 eqd = stats->be_rx_fps / 110000; 348 eqd = stats->rx_fps / 110000;
355 eqd = eqd << 3; 349 eqd = eqd << 3;
356 if (eqd > rx_eq->max_eqd) 350 if (eqd > rx_eq->max_eqd)
357 eqd = rx_eq->max_eqd; 351 eqd = rx_eq->max_eqd;
@@ -378,7 +372,7 @@ static u32 be_calc_rate(u64 bytes, unsigned long ticks)
378 372
379static void be_tx_rate_update(struct be_adapter *adapter) 373static void be_tx_rate_update(struct be_adapter *adapter)
380{ 374{
381 struct be_drvr_stats *stats = drvr_stats(adapter); 375 struct be_tx_stats *stats = tx_stats(adapter);
382 ulong now = jiffies; 376 ulong now = jiffies;
383 377
384 /* Wrapped around? */ 378 /* Wrapped around? */
@@ -400,7 +394,7 @@ static void be_tx_rate_update(struct be_adapter *adapter)
400static void be_tx_stats_update(struct be_adapter *adapter, 394static void be_tx_stats_update(struct be_adapter *adapter,
401 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped) 395 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
402{ 396{
403 struct be_drvr_stats *stats = drvr_stats(adapter); 397 struct be_tx_stats *stats = tx_stats(adapter);
404 stats->be_tx_reqs++; 398 stats->be_tx_reqs++;
405 stats->be_tx_wrbs += wrb_cnt; 399 stats->be_tx_wrbs += wrb_cnt;
406 stats->be_tx_bytes += copied; 400 stats->be_tx_bytes += copied;
@@ -435,9 +429,12 @@ static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
435 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK; 429 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
436} 430}
437 431
438static void wrb_fill_hdr(struct be_eth_hdr_wrb *hdr, struct sk_buff *skb, 432static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
439 bool vlan, u32 wrb_cnt, u32 len) 433 struct sk_buff *skb, u32 wrb_cnt, u32 len)
440{ 434{
435 u8 vlan_prio = 0;
436 u16 vlan_tag = 0;
437
441 memset(hdr, 0, sizeof(*hdr)); 438 memset(hdr, 0, sizeof(*hdr));
442 439
443 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1); 440 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
@@ -455,10 +452,15 @@ static void wrb_fill_hdr(struct be_eth_hdr_wrb *hdr, struct sk_buff *skb,
455 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1); 452 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
456 } 453 }
457 454
458 if (vlan && vlan_tx_tag_present(skb)) { 455 if (adapter->vlan_grp && vlan_tx_tag_present(skb)) {
459 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1); 456 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
460 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, 457 vlan_tag = vlan_tx_tag_get(skb);
461 hdr, vlan_tx_tag_get(skb)); 458 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
459 /* If vlan priority provided by OS is NOT in available bmap */
460 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
461 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
462 adapter->recommended_prio;
463 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
462 } 464 }
463 465
464 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1); 466 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
@@ -538,8 +540,7 @@ static int make_tx_wrbs(struct be_adapter *adapter,
538 queue_head_inc(txq); 540 queue_head_inc(txq);
539 } 541 }
540 542
541 wrb_fill_hdr(hdr, first_skb, adapter->vlan_grp ? true : false, 543 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
542 wrb_cnt, copied);
543 be_dws_cpu_to_le(hdr, sizeof(*hdr)); 544 be_dws_cpu_to_le(hdr, sizeof(*hdr));
544 545
545 return copied; 546 return copied;
@@ -632,7 +633,7 @@ static int be_vid_config(struct be_adapter *adapter, bool vf, u32 vf_num)
632 633
633 if (adapter->vlans_added <= adapter->max_vlans) { 634 if (adapter->vlans_added <= adapter->max_vlans) {
634 /* Construct VLAN Table to give to HW */ 635 /* Construct VLAN Table to give to HW */
635 for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) { 636 for (i = 0; i < VLAN_N_VID; i++) {
636 if (adapter->vlan_tag[i]) { 637 if (adapter->vlan_tag[i]) {
637 vtag[ntags] = cpu_to_le16(i); 638 vtag[ntags] = cpu_to_le16(i);
638 ntags++; 639 ntags++;
@@ -651,14 +652,8 @@ static int be_vid_config(struct be_adapter *adapter, bool vf, u32 vf_num)
651static void be_vlan_register(struct net_device *netdev, struct vlan_group *grp) 652static void be_vlan_register(struct net_device *netdev, struct vlan_group *grp)
652{ 653{
653 struct be_adapter *adapter = netdev_priv(netdev); 654 struct be_adapter *adapter = netdev_priv(netdev);
654 struct be_eq_obj *rx_eq = &adapter->rx_eq;
655 struct be_eq_obj *tx_eq = &adapter->tx_eq;
656 655
657 be_eq_notify(adapter, rx_eq->q.id, false, false, 0);
658 be_eq_notify(adapter, tx_eq->q.id, false, false, 0);
659 adapter->vlan_grp = grp; 656 adapter->vlan_grp = grp;
660 be_eq_notify(adapter, rx_eq->q.id, true, false, 0);
661 be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
662} 657}
663 658
664static void be_vlan_add_vid(struct net_device *netdev, u16 vid) 659static void be_vlan_add_vid(struct net_device *netdev, u16 vid)
@@ -820,40 +815,38 @@ static int be_set_vf_tx_rate(struct net_device *netdev,
820 return status; 815 return status;
821} 816}
822 817
823static void be_rx_rate_update(struct be_adapter *adapter) 818static void be_rx_rate_update(struct be_rx_obj *rxo)
824{ 819{
825 struct be_drvr_stats *stats = drvr_stats(adapter); 820 struct be_rx_stats *stats = &rxo->stats;
826 ulong now = jiffies; 821 ulong now = jiffies;
827 822
828 /* Wrapped around */ 823 /* Wrapped around */
829 if (time_before(now, stats->be_rx_jiffies)) { 824 if (time_before(now, stats->rx_jiffies)) {
830 stats->be_rx_jiffies = now; 825 stats->rx_jiffies = now;
831 return; 826 return;
832 } 827 }
833 828
834 /* Update the rate once in two seconds */ 829 /* Update the rate once in two seconds */
835 if ((now - stats->be_rx_jiffies) < 2 * HZ) 830 if ((now - stats->rx_jiffies) < 2 * HZ)
836 return; 831 return;
837 832
838 stats->be_rx_rate = be_calc_rate(stats->be_rx_bytes 833 stats->rx_rate = be_calc_rate(stats->rx_bytes - stats->rx_bytes_prev,
839 - stats->be_rx_bytes_prev, 834 now - stats->rx_jiffies);
840 now - stats->be_rx_jiffies); 835 stats->rx_jiffies = now;
841 stats->be_rx_jiffies = now; 836 stats->rx_bytes_prev = stats->rx_bytes;
842 stats->be_rx_bytes_prev = stats->be_rx_bytes;
843} 837}
844 838
845static void be_rx_stats_update(struct be_adapter *adapter, 839static void be_rx_stats_update(struct be_rx_obj *rxo,
846 u32 pktsize, u16 numfrags, u8 pkt_type) 840 u32 pktsize, u16 numfrags, u8 pkt_type)
847{ 841{
848 struct be_drvr_stats *stats = drvr_stats(adapter); 842 struct be_rx_stats *stats = &rxo->stats;
849
850 stats->be_rx_compl++;
851 stats->be_rx_frags += numfrags;
852 stats->be_rx_bytes += pktsize;
853 stats->be_rx_pkts++;
854 843
844 stats->rx_compl++;
845 stats->rx_frags += numfrags;
846 stats->rx_bytes += pktsize;
847 stats->rx_pkts++;
855 if (pkt_type == BE_MULTICAST_PACKET) 848 if (pkt_type == BE_MULTICAST_PACKET)
856 stats->be_rx_mcast_pkt++; 849 stats->rx_mcast_pkts++;
857} 850}
858 851
859static inline bool do_pkt_csum(struct be_eth_rx_compl *rxcp, bool cso) 852static inline bool do_pkt_csum(struct be_eth_rx_compl *rxcp, bool cso)
@@ -873,12 +866,14 @@ static inline bool do_pkt_csum(struct be_eth_rx_compl *rxcp, bool cso)
873} 866}
874 867
875static struct be_rx_page_info * 868static struct be_rx_page_info *
876get_rx_page_info(struct be_adapter *adapter, u16 frag_idx) 869get_rx_page_info(struct be_adapter *adapter,
870 struct be_rx_obj *rxo,
871 u16 frag_idx)
877{ 872{
878 struct be_rx_page_info *rx_page_info; 873 struct be_rx_page_info *rx_page_info;
879 struct be_queue_info *rxq = &adapter->rx_obj.q; 874 struct be_queue_info *rxq = &rxo->q;
880 875
881 rx_page_info = &adapter->rx_obj.page_info_tbl[frag_idx]; 876 rx_page_info = &rxo->page_info_tbl[frag_idx];
882 BUG_ON(!rx_page_info->page); 877 BUG_ON(!rx_page_info->page);
883 878
884 if (rx_page_info->last_page_user) { 879 if (rx_page_info->last_page_user) {
@@ -893,9 +888,10 @@ get_rx_page_info(struct be_adapter *adapter, u16 frag_idx)
893 888
894/* Throwaway the data in the Rx completion */ 889/* Throwaway the data in the Rx completion */
895static void be_rx_compl_discard(struct be_adapter *adapter, 890static void be_rx_compl_discard(struct be_adapter *adapter,
896 struct be_eth_rx_compl *rxcp) 891 struct be_rx_obj *rxo,
892 struct be_eth_rx_compl *rxcp)
897{ 893{
898 struct be_queue_info *rxq = &adapter->rx_obj.q; 894 struct be_queue_info *rxq = &rxo->q;
899 struct be_rx_page_info *page_info; 895 struct be_rx_page_info *page_info;
900 u16 rxq_idx, i, num_rcvd; 896 u16 rxq_idx, i, num_rcvd;
901 897
@@ -903,7 +899,7 @@ static void be_rx_compl_discard(struct be_adapter *adapter,
903 num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp); 899 num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
904 900
905 for (i = 0; i < num_rcvd; i++) { 901 for (i = 0; i < num_rcvd; i++) {
906 page_info = get_rx_page_info(adapter, rxq_idx); 902 page_info = get_rx_page_info(adapter, rxo, rxq_idx);
907 put_page(page_info->page); 903 put_page(page_info->page);
908 memset(page_info, 0, sizeof(*page_info)); 904 memset(page_info, 0, sizeof(*page_info));
909 index_inc(&rxq_idx, rxq->len); 905 index_inc(&rxq_idx, rxq->len);
@@ -914,11 +910,11 @@ static void be_rx_compl_discard(struct be_adapter *adapter,
914 * skb_fill_rx_data forms a complete skb for an ether frame 910 * skb_fill_rx_data forms a complete skb for an ether frame
915 * indicated by rxcp. 911 * indicated by rxcp.
916 */ 912 */
917static void skb_fill_rx_data(struct be_adapter *adapter, 913static void skb_fill_rx_data(struct be_adapter *adapter, struct be_rx_obj *rxo,
918 struct sk_buff *skb, struct be_eth_rx_compl *rxcp, 914 struct sk_buff *skb, struct be_eth_rx_compl *rxcp,
919 u16 num_rcvd) 915 u16 num_rcvd)
920{ 916{
921 struct be_queue_info *rxq = &adapter->rx_obj.q; 917 struct be_queue_info *rxq = &rxo->q;
922 struct be_rx_page_info *page_info; 918 struct be_rx_page_info *page_info;
923 u16 rxq_idx, i, j; 919 u16 rxq_idx, i, j;
924 u32 pktsize, hdr_len, curr_frag_len, size; 920 u32 pktsize, hdr_len, curr_frag_len, size;
@@ -929,7 +925,7 @@ static void skb_fill_rx_data(struct be_adapter *adapter,
929 pktsize = AMAP_GET_BITS(struct amap_eth_rx_compl, pktsize, rxcp); 925 pktsize = AMAP_GET_BITS(struct amap_eth_rx_compl, pktsize, rxcp);
930 pkt_type = AMAP_GET_BITS(struct amap_eth_rx_compl, cast_enc, rxcp); 926 pkt_type = AMAP_GET_BITS(struct amap_eth_rx_compl, cast_enc, rxcp);
931 927
932 page_info = get_rx_page_info(adapter, rxq_idx); 928 page_info = get_rx_page_info(adapter, rxo, rxq_idx);
933 929
934 start = page_address(page_info->page) + page_info->page_offset; 930 start = page_address(page_info->page) + page_info->page_offset;
935 prefetch(start); 931 prefetch(start);
@@ -967,7 +963,7 @@ static void skb_fill_rx_data(struct be_adapter *adapter,
967 for (i = 1, j = 0; i < num_rcvd; i++) { 963 for (i = 1, j = 0; i < num_rcvd; i++) {
968 size -= curr_frag_len; 964 size -= curr_frag_len;
969 index_inc(&rxq_idx, rxq->len); 965 index_inc(&rxq_idx, rxq->len);
970 page_info = get_rx_page_info(adapter, rxq_idx); 966 page_info = get_rx_page_info(adapter, rxo, rxq_idx);
971 967
972 curr_frag_len = min(size, rx_frag_size); 968 curr_frag_len = min(size, rx_frag_size);
973 969
@@ -993,11 +989,12 @@ static void skb_fill_rx_data(struct be_adapter *adapter,
993 BUG_ON(j > MAX_SKB_FRAGS); 989 BUG_ON(j > MAX_SKB_FRAGS);
994 990
995done: 991done:
996 be_rx_stats_update(adapter, pktsize, num_rcvd, pkt_type); 992 be_rx_stats_update(rxo, pktsize, num_rcvd, pkt_type);
997} 993}
998 994
999/* Process the RX completion indicated by rxcp when GRO is disabled */ 995/* Process the RX completion indicated by rxcp when GRO is disabled */
1000static void be_rx_compl_process(struct be_adapter *adapter, 996static void be_rx_compl_process(struct be_adapter *adapter,
997 struct be_rx_obj *rxo,
1001 struct be_eth_rx_compl *rxcp) 998 struct be_eth_rx_compl *rxcp)
1002{ 999{
1003 struct sk_buff *skb; 1000 struct sk_buff *skb;
@@ -1014,11 +1011,11 @@ static void be_rx_compl_process(struct be_adapter *adapter,
1014 if (unlikely(!skb)) { 1011 if (unlikely(!skb)) {
1015 if (net_ratelimit()) 1012 if (net_ratelimit())
1016 dev_warn(&adapter->pdev->dev, "skb alloc failed\n"); 1013 dev_warn(&adapter->pdev->dev, "skb alloc failed\n");
1017 be_rx_compl_discard(adapter, rxcp); 1014 be_rx_compl_discard(adapter, rxo, rxcp);
1018 return; 1015 return;
1019 } 1016 }
1020 1017
1021 skb_fill_rx_data(adapter, skb, rxcp, num_rcvd); 1018 skb_fill_rx_data(adapter, rxo, skb, rxcp, num_rcvd);
1022 1019
1023 if (do_pkt_csum(rxcp, adapter->rx_csum)) 1020 if (do_pkt_csum(rxcp, adapter->rx_csum))
1024 skb_checksum_none_assert(skb); 1021 skb_checksum_none_assert(skb);
@@ -1051,12 +1048,13 @@ static void be_rx_compl_process(struct be_adapter *adapter,
1051 1048
1052/* Process the RX completion indicated by rxcp when GRO is enabled */ 1049/* Process the RX completion indicated by rxcp when GRO is enabled */
1053static void be_rx_compl_process_gro(struct be_adapter *adapter, 1050static void be_rx_compl_process_gro(struct be_adapter *adapter,
1054 struct be_eth_rx_compl *rxcp) 1051 struct be_rx_obj *rxo,
1052 struct be_eth_rx_compl *rxcp)
1055{ 1053{
1056 struct be_rx_page_info *page_info; 1054 struct be_rx_page_info *page_info;
1057 struct sk_buff *skb = NULL; 1055 struct sk_buff *skb = NULL;
1058 struct be_queue_info *rxq = &adapter->rx_obj.q; 1056 struct be_queue_info *rxq = &rxo->q;
1059 struct be_eq_obj *eq_obj = &adapter->rx_eq; 1057 struct be_eq_obj *eq_obj = &rxo->rx_eq;
1060 u32 num_rcvd, pkt_size, remaining, vlanf, curr_frag_len; 1058 u32 num_rcvd, pkt_size, remaining, vlanf, curr_frag_len;
1061 u16 i, rxq_idx = 0, vid, j; 1059 u16 i, rxq_idx = 0, vid, j;
1062 u8 vtm; 1060 u8 vtm;
@@ -1080,13 +1078,13 @@ static void be_rx_compl_process_gro(struct be_adapter *adapter,
1080 1078
1081 skb = napi_get_frags(&eq_obj->napi); 1079 skb = napi_get_frags(&eq_obj->napi);
1082 if (!skb) { 1080 if (!skb) {
1083 be_rx_compl_discard(adapter, rxcp); 1081 be_rx_compl_discard(adapter, rxo, rxcp);
1084 return; 1082 return;
1085 } 1083 }
1086 1084
1087 remaining = pkt_size; 1085 remaining = pkt_size;
1088 for (i = 0, j = -1; i < num_rcvd; i++) { 1086 for (i = 0, j = -1; i < num_rcvd; i++) {
1089 page_info = get_rx_page_info(adapter, rxq_idx); 1087 page_info = get_rx_page_info(adapter, rxo, rxq_idx);
1090 1088
1091 curr_frag_len = min(remaining, rx_frag_size); 1089 curr_frag_len = min(remaining, rx_frag_size);
1092 1090
@@ -1127,12 +1125,12 @@ static void be_rx_compl_process_gro(struct be_adapter *adapter,
1127 vlan_gro_frags(&eq_obj->napi, adapter->vlan_grp, vid); 1125 vlan_gro_frags(&eq_obj->napi, adapter->vlan_grp, vid);
1128 } 1126 }
1129 1127
1130 be_rx_stats_update(adapter, pkt_size, num_rcvd, pkt_type); 1128 be_rx_stats_update(rxo, pkt_size, num_rcvd, pkt_type);
1131} 1129}
1132 1130
1133static struct be_eth_rx_compl *be_rx_compl_get(struct be_adapter *adapter) 1131static struct be_eth_rx_compl *be_rx_compl_get(struct be_rx_obj *rxo)
1134{ 1132{
1135 struct be_eth_rx_compl *rxcp = queue_tail_node(&adapter->rx_obj.cq); 1133 struct be_eth_rx_compl *rxcp = queue_tail_node(&rxo->cq);
1136 1134
1137 if (rxcp->dw[offsetof(struct amap_eth_rx_compl, valid) / 32] == 0) 1135 if (rxcp->dw[offsetof(struct amap_eth_rx_compl, valid) / 32] == 0)
1138 return NULL; 1136 return NULL;
@@ -1140,7 +1138,7 @@ static struct be_eth_rx_compl *be_rx_compl_get(struct be_adapter *adapter)
1140 rmb(); 1138 rmb();
1141 be_dws_le_to_cpu(rxcp, sizeof(*rxcp)); 1139 be_dws_le_to_cpu(rxcp, sizeof(*rxcp));
1142 1140
1143 queue_tail_inc(&adapter->rx_obj.cq); 1141 queue_tail_inc(&rxo->cq);
1144 return rxcp; 1142 return rxcp;
1145} 1143}
1146 1144
@@ -1166,22 +1164,23 @@ static inline struct page *be_alloc_pages(u32 size)
1166 * Allocate a page, split it to fragments of size rx_frag_size and post as 1164 * Allocate a page, split it to fragments of size rx_frag_size and post as
1167 * receive buffers to BE 1165 * receive buffers to BE
1168 */ 1166 */
1169static void be_post_rx_frags(struct be_adapter *adapter) 1167static void be_post_rx_frags(struct be_rx_obj *rxo)
1170{ 1168{
1171 struct be_rx_page_info *page_info_tbl = adapter->rx_obj.page_info_tbl; 1169 struct be_adapter *adapter = rxo->adapter;
1170 struct be_rx_page_info *page_info_tbl = rxo->page_info_tbl;
1172 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL; 1171 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
1173 struct be_queue_info *rxq = &adapter->rx_obj.q; 1172 struct be_queue_info *rxq = &rxo->q;
1174 struct page *pagep = NULL; 1173 struct page *pagep = NULL;
1175 struct be_eth_rx_d *rxd; 1174 struct be_eth_rx_d *rxd;
1176 u64 page_dmaaddr = 0, frag_dmaaddr; 1175 u64 page_dmaaddr = 0, frag_dmaaddr;
1177 u32 posted, page_offset = 0; 1176 u32 posted, page_offset = 0;
1178 1177
1179 page_info = &page_info_tbl[rxq->head]; 1178 page_info = &rxo->page_info_tbl[rxq->head];
1180 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) { 1179 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1181 if (!pagep) { 1180 if (!pagep) {
1182 pagep = be_alloc_pages(adapter->big_page_size); 1181 pagep = be_alloc_pages(adapter->big_page_size);
1183 if (unlikely(!pagep)) { 1182 if (unlikely(!pagep)) {
1184 drvr_stats(adapter)->be_ethrx_post_fail++; 1183 rxo->stats.rx_post_fail++;
1185 break; 1184 break;
1186 } 1185 }
1187 page_dmaaddr = pci_map_page(adapter->pdev, pagep, 0, 1186 page_dmaaddr = pci_map_page(adapter->pdev, pagep, 0,
@@ -1220,7 +1219,7 @@ static void be_post_rx_frags(struct be_adapter *adapter)
1220 be_rxq_notify(adapter, rxq->id, posted); 1219 be_rxq_notify(adapter, rxq->id, posted);
1221 } else if (atomic_read(&rxq->used) == 0) { 1220 } else if (atomic_read(&rxq->used) == 0) {
1222 /* Let be_worker replenish when memory is available */ 1221 /* Let be_worker replenish when memory is available */
1223 adapter->rx_post_starved = true; 1222 rxo->rx_post_starved = true;
1224 } 1223 }
1225} 1224}
1226 1225
@@ -1323,17 +1322,17 @@ static void be_eq_clean(struct be_adapter *adapter,
1323 be_eq_notify(adapter, eq_obj->q.id, false, true, num); 1322 be_eq_notify(adapter, eq_obj->q.id, false, true, num);
1324} 1323}
1325 1324
1326static void be_rx_q_clean(struct be_adapter *adapter) 1325static void be_rx_q_clean(struct be_adapter *adapter, struct be_rx_obj *rxo)
1327{ 1326{
1328 struct be_rx_page_info *page_info; 1327 struct be_rx_page_info *page_info;
1329 struct be_queue_info *rxq = &adapter->rx_obj.q; 1328 struct be_queue_info *rxq = &rxo->q;
1330 struct be_queue_info *rx_cq = &adapter->rx_obj.cq; 1329 struct be_queue_info *rx_cq = &rxo->cq;
1331 struct be_eth_rx_compl *rxcp; 1330 struct be_eth_rx_compl *rxcp;
1332 u16 tail; 1331 u16 tail;
1333 1332
1334 /* First cleanup pending rx completions */ 1333 /* First cleanup pending rx completions */
1335 while ((rxcp = be_rx_compl_get(adapter)) != NULL) { 1334 while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
1336 be_rx_compl_discard(adapter, rxcp); 1335 be_rx_compl_discard(adapter, rxo, rxcp);
1337 be_rx_compl_reset(rxcp); 1336 be_rx_compl_reset(rxcp);
1338 be_cq_notify(adapter, rx_cq->id, true, 1); 1337 be_cq_notify(adapter, rx_cq->id, true, 1);
1339 } 1338 }
@@ -1341,7 +1340,7 @@ static void be_rx_q_clean(struct be_adapter *adapter)
1341 /* Then free posted rx buffer that were not used */ 1340 /* Then free posted rx buffer that were not used */
1342 tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len; 1341 tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
1343 for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) { 1342 for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
1344 page_info = get_rx_page_info(adapter, tail); 1343 page_info = get_rx_page_info(adapter, rxo, tail);
1345 put_page(page_info->page); 1344 put_page(page_info->page);
1346 memset(page_info, 0, sizeof(*page_info)); 1345 memset(page_info, 0, sizeof(*page_info));
1347 } 1346 }
@@ -1519,92 +1518,101 @@ tx_eq_free:
1519static void be_rx_queues_destroy(struct be_adapter *adapter) 1518static void be_rx_queues_destroy(struct be_adapter *adapter)
1520{ 1519{
1521 struct be_queue_info *q; 1520 struct be_queue_info *q;
1522 1521 struct be_rx_obj *rxo;
1523 q = &adapter->rx_obj.q; 1522 int i;
1524 if (q->created) { 1523
1525 be_cmd_q_destroy(adapter, q, QTYPE_RXQ); 1524 for_all_rx_queues(adapter, rxo, i) {
1526 1525 q = &rxo->q;
1527 /* After the rxq is invalidated, wait for a grace time 1526 if (q->created) {
1528 * of 1ms for all dma to end and the flush compl to arrive 1527 be_cmd_q_destroy(adapter, q, QTYPE_RXQ);
1529 */ 1528 /* After the rxq is invalidated, wait for a grace time
1530 mdelay(1); 1529 * of 1ms for all dma to end and the flush compl to
1531 be_rx_q_clean(adapter); 1530 * arrive
1531 */
1532 mdelay(1);
1533 be_rx_q_clean(adapter, rxo);
1534 }
1535 be_queue_free(adapter, q);
1536
1537 q = &rxo->cq;
1538 if (q->created)
1539 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1540 be_queue_free(adapter, q);
1541
1542 /* Clear any residual events */
1543 q = &rxo->rx_eq.q;
1544 if (q->created) {
1545 be_eq_clean(adapter, &rxo->rx_eq);
1546 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
1547 }
1548 be_queue_free(adapter, q);
1532 } 1549 }
1533 be_queue_free(adapter, q);
1534
1535 q = &adapter->rx_obj.cq;
1536 if (q->created)
1537 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1538 be_queue_free(adapter, q);
1539
1540 /* Clear any residual events */
1541 be_eq_clean(adapter, &adapter->rx_eq);
1542
1543 q = &adapter->rx_eq.q;
1544 if (q->created)
1545 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
1546 be_queue_free(adapter, q);
1547} 1550}
1548 1551
1549static int be_rx_queues_create(struct be_adapter *adapter) 1552static int be_rx_queues_create(struct be_adapter *adapter)
1550{ 1553{
1551 struct be_queue_info *eq, *q, *cq; 1554 struct be_queue_info *eq, *q, *cq;
1552 int rc; 1555 struct be_rx_obj *rxo;
1556 int rc, i;
1553 1557
1554 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE; 1558 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
1555 adapter->rx_eq.max_eqd = BE_MAX_EQD; 1559 for_all_rx_queues(adapter, rxo, i) {
1556 adapter->rx_eq.min_eqd = 0; 1560 rxo->adapter = adapter;
1557 adapter->rx_eq.cur_eqd = 0; 1561 rxo->rx_eq.max_eqd = BE_MAX_EQD;
1558 adapter->rx_eq.enable_aic = true; 1562 rxo->rx_eq.enable_aic = true;
1559 1563
1560 /* Alloc Rx Event queue */ 1564 /* EQ */
1561 eq = &adapter->rx_eq.q; 1565 eq = &rxo->rx_eq.q;
1562 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN, 1566 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1563 sizeof(struct be_eq_entry)); 1567 sizeof(struct be_eq_entry));
1564 if (rc) 1568 if (rc)
1565 return rc; 1569 goto err;
1566 1570
1567 /* Ask BE to create Rx Event queue */ 1571 rc = be_cmd_eq_create(adapter, eq, rxo->rx_eq.cur_eqd);
1568 rc = be_cmd_eq_create(adapter, eq, adapter->rx_eq.cur_eqd); 1572 if (rc)
1569 if (rc) 1573 goto err;
1570 goto rx_eq_free; 1574
1571 1575 /* CQ */
1572 /* Alloc RX eth compl queue */ 1576 cq = &rxo->cq;
1573 cq = &adapter->rx_obj.cq; 1577 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
1574 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN, 1578 sizeof(struct be_eth_rx_compl));
1575 sizeof(struct be_eth_rx_compl)); 1579 if (rc)
1576 if (rc) 1580 goto err;
1577 goto rx_eq_destroy; 1581
1578 1582 rc = be_cmd_cq_create(adapter, cq, eq, false, false, 3);
1579 /* Ask BE to create Rx eth compl queue */ 1583 if (rc)
1580 rc = be_cmd_cq_create(adapter, cq, eq, false, false, 3); 1584 goto err;
1581 if (rc) 1585
1582 goto rx_cq_free; 1586 /* Rx Q */
1583 1587 q = &rxo->q;
1584 /* Alloc RX eth queue */ 1588 rc = be_queue_alloc(adapter, q, RX_Q_LEN,
1585 q = &adapter->rx_obj.q; 1589 sizeof(struct be_eth_rx_d));
1586 rc = be_queue_alloc(adapter, q, RX_Q_LEN, sizeof(struct be_eth_rx_d)); 1590 if (rc)
1587 if (rc) 1591 goto err;
1588 goto rx_cq_destroy; 1592
1589 1593 rc = be_cmd_rxq_create(adapter, q, cq->id, rx_frag_size,
1590 /* Ask BE to create Rx eth queue */ 1594 BE_MAX_JUMBO_FRAME_SIZE, adapter->if_handle,
1591 rc = be_cmd_rxq_create(adapter, q, cq->id, rx_frag_size, 1595 (i > 0) ? 1 : 0/* rss enable */, &rxo->rss_id);
1592 BE_MAX_JUMBO_FRAME_SIZE, adapter->if_handle, false); 1596 if (rc)
1593 if (rc) 1597 goto err;
1594 goto rx_q_free; 1598 }
1599
1600 if (be_multi_rxq(adapter)) {
1601 u8 rsstable[MAX_RSS_QS];
1602
1603 for_all_rss_queues(adapter, rxo, i)
1604 rsstable[i] = rxo->rss_id;
1605
1606 rc = be_cmd_rss_config(adapter, rsstable,
1607 adapter->num_rx_qs - 1);
1608 if (rc)
1609 goto err;
1610 }
1595 1611
1596 return 0; 1612 return 0;
1597rx_q_free: 1613err:
1598 be_queue_free(adapter, q); 1614 be_rx_queues_destroy(adapter);
1599rx_cq_destroy: 1615 return -1;
1600 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
1601rx_cq_free:
1602 be_queue_free(adapter, cq);
1603rx_eq_destroy:
1604 be_cmd_q_destroy(adapter, eq, QTYPE_EQ);
1605rx_eq_free:
1606 be_queue_free(adapter, eq);
1607 return rc;
1608} 1616}
1609 1617
1610/* There are 8 evt ids per func. Retruns the evt id's bit number */ 1618/* There are 8 evt ids per func. Retruns the evt id's bit number */
@@ -1616,24 +1624,31 @@ static inline int be_evt_bit_get(struct be_adapter *adapter, u32 eq_id)
1616static irqreturn_t be_intx(int irq, void *dev) 1624static irqreturn_t be_intx(int irq, void *dev)
1617{ 1625{
1618 struct be_adapter *adapter = dev; 1626 struct be_adapter *adapter = dev;
1619 int isr; 1627 struct be_rx_obj *rxo;
1628 int isr, i;
1620 1629
1621 isr = ioread32(adapter->csr + CEV_ISR0_OFFSET + 1630 isr = ioread32(adapter->csr + CEV_ISR0_OFFSET +
1622 (adapter->tx_eq.q.id/ 8) * CEV_ISR_SIZE); 1631 (adapter->tx_eq.q.id/ 8) * CEV_ISR_SIZE);
1623 if (!isr) 1632 if (!isr)
1624 return IRQ_NONE; 1633 return IRQ_NONE;
1625 1634
1626 event_handle(adapter, &adapter->tx_eq); 1635 if ((1 << be_evt_bit_get(adapter, adapter->tx_eq.q.id) & isr))
1627 event_handle(adapter, &adapter->rx_eq); 1636 event_handle(adapter, &adapter->tx_eq);
1637
1638 for_all_rx_queues(adapter, rxo, i) {
1639 if ((1 << be_evt_bit_get(adapter, rxo->rx_eq.q.id) & isr))
1640 event_handle(adapter, &rxo->rx_eq);
1641 }
1628 1642
1629 return IRQ_HANDLED; 1643 return IRQ_HANDLED;
1630} 1644}
1631 1645
1632static irqreturn_t be_msix_rx(int irq, void *dev) 1646static irqreturn_t be_msix_rx(int irq, void *dev)
1633{ 1647{
1634 struct be_adapter *adapter = dev; 1648 struct be_rx_obj *rxo = dev;
1649 struct be_adapter *adapter = rxo->adapter;
1635 1650
1636 event_handle(adapter, &adapter->rx_eq); 1651 event_handle(adapter, &rxo->rx_eq);
1637 1652
1638 return IRQ_HANDLED; 1653 return IRQ_HANDLED;
1639} 1654}
@@ -1647,14 +1662,14 @@ static irqreturn_t be_msix_tx_mcc(int irq, void *dev)
1647 return IRQ_HANDLED; 1662 return IRQ_HANDLED;
1648} 1663}
1649 1664
1650static inline bool do_gro(struct be_adapter *adapter, 1665static inline bool do_gro(struct be_adapter *adapter, struct be_rx_obj *rxo,
1651 struct be_eth_rx_compl *rxcp) 1666 struct be_eth_rx_compl *rxcp)
1652{ 1667{
1653 int err = AMAP_GET_BITS(struct amap_eth_rx_compl, err, rxcp); 1668 int err = AMAP_GET_BITS(struct amap_eth_rx_compl, err, rxcp);
1654 int tcp_frame = AMAP_GET_BITS(struct amap_eth_rx_compl, tcpf, rxcp); 1669 int tcp_frame = AMAP_GET_BITS(struct amap_eth_rx_compl, tcpf, rxcp);
1655 1670
1656 if (err) 1671 if (err)
1657 drvr_stats(adapter)->be_rxcp_err++; 1672 rxo->stats.rxcp_err++;
1658 1673
1659 return (tcp_frame && !err) ? true : false; 1674 return (tcp_frame && !err) ? true : false;
1660} 1675}
@@ -1662,29 +1677,29 @@ static inline bool do_gro(struct be_adapter *adapter,
1662int be_poll_rx(struct napi_struct *napi, int budget) 1677int be_poll_rx(struct napi_struct *napi, int budget)
1663{ 1678{
1664 struct be_eq_obj *rx_eq = container_of(napi, struct be_eq_obj, napi); 1679 struct be_eq_obj *rx_eq = container_of(napi, struct be_eq_obj, napi);
1665 struct be_adapter *adapter = 1680 struct be_rx_obj *rxo = container_of(rx_eq, struct be_rx_obj, rx_eq);
1666 container_of(rx_eq, struct be_adapter, rx_eq); 1681 struct be_adapter *adapter = rxo->adapter;
1667 struct be_queue_info *rx_cq = &adapter->rx_obj.cq; 1682 struct be_queue_info *rx_cq = &rxo->cq;
1668 struct be_eth_rx_compl *rxcp; 1683 struct be_eth_rx_compl *rxcp;
1669 u32 work_done; 1684 u32 work_done;
1670 1685
1671 adapter->stats.drvr_stats.be_rx_polls++; 1686 rxo->stats.rx_polls++;
1672 for (work_done = 0; work_done < budget; work_done++) { 1687 for (work_done = 0; work_done < budget; work_done++) {
1673 rxcp = be_rx_compl_get(adapter); 1688 rxcp = be_rx_compl_get(rxo);
1674 if (!rxcp) 1689 if (!rxcp)
1675 break; 1690 break;
1676 1691
1677 if (do_gro(adapter, rxcp)) 1692 if (do_gro(adapter, rxo, rxcp))
1678 be_rx_compl_process_gro(adapter, rxcp); 1693 be_rx_compl_process_gro(adapter, rxo, rxcp);
1679 else 1694 else
1680 be_rx_compl_process(adapter, rxcp); 1695 be_rx_compl_process(adapter, rxo, rxcp);
1681 1696
1682 be_rx_compl_reset(rxcp); 1697 be_rx_compl_reset(rxcp);
1683 } 1698 }
1684 1699
1685 /* Refill the queue */ 1700 /* Refill the queue */
1686 if (atomic_read(&adapter->rx_obj.q.used) < RX_FRAGS_REFILL_WM) 1701 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
1687 be_post_rx_frags(adapter); 1702 be_post_rx_frags(rxo);
1688 1703
1689 /* All consumed */ 1704 /* All consumed */
1690 if (work_done < budget) { 1705 if (work_done < budget) {
@@ -1738,8 +1753,8 @@ static int be_poll_tx_mcc(struct napi_struct *napi, int budget)
1738 netif_wake_queue(adapter->netdev); 1753 netif_wake_queue(adapter->netdev);
1739 } 1754 }
1740 1755
1741 drvr_stats(adapter)->be_tx_events++; 1756 tx_stats(adapter)->be_tx_events++;
1742 drvr_stats(adapter)->be_tx_compl += tx_compl; 1757 tx_stats(adapter)->be_tx_compl += tx_compl;
1743 } 1758 }
1744 1759
1745 return 1; 1760 return 1;
@@ -1788,20 +1803,24 @@ static void be_worker(struct work_struct *work)
1788{ 1803{
1789 struct be_adapter *adapter = 1804 struct be_adapter *adapter =
1790 container_of(work, struct be_adapter, work.work); 1805 container_of(work, struct be_adapter, work.work);
1806 struct be_rx_obj *rxo;
1807 int i;
1791 1808
1792 if (!adapter->stats_ioctl_sent) 1809 if (!adapter->stats_ioctl_sent)
1793 be_cmd_get_stats(adapter, &adapter->stats.cmd); 1810 be_cmd_get_stats(adapter, &adapter->stats_cmd);
1794
1795 /* Set EQ delay */
1796 be_rx_eqd_update(adapter);
1797 1811
1798 be_tx_rate_update(adapter); 1812 be_tx_rate_update(adapter);
1799 be_rx_rate_update(adapter);
1800 1813
1801 if (adapter->rx_post_starved) { 1814 for_all_rx_queues(adapter, rxo, i) {
1802 adapter->rx_post_starved = false; 1815 be_rx_rate_update(rxo);
1803 be_post_rx_frags(adapter); 1816 be_rx_eqd_update(adapter, rxo);
1817
1818 if (rxo->rx_post_starved) {
1819 rxo->rx_post_starved = false;
1820 be_post_rx_frags(rxo);
1821 }
1804 } 1822 }
1823
1805 if (!adapter->ue_detected) 1824 if (!adapter->ue_detected)
1806 be_detect_dump_ue(adapter); 1825 be_detect_dump_ue(adapter);
1807 1826
@@ -1816,17 +1835,45 @@ static void be_msix_disable(struct be_adapter *adapter)
1816 } 1835 }
1817} 1836}
1818 1837
1838static int be_num_rxqs_get(struct be_adapter *adapter)
1839{
1840 if (multi_rxq && (adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
1841 !adapter->sriov_enabled && !(adapter->function_mode & 0x400)) {
1842 return 1 + MAX_RSS_QS; /* one default non-RSS queue */
1843 } else {
1844 dev_warn(&adapter->pdev->dev,
1845 "No support for multiple RX queues\n");
1846 return 1;
1847 }
1848}
1849
1819static void be_msix_enable(struct be_adapter *adapter) 1850static void be_msix_enable(struct be_adapter *adapter)
1820{ 1851{
1852#define BE_MIN_MSIX_VECTORS (1 + 1) /* Rx + Tx */
1821 int i, status; 1853 int i, status;
1822 1854
1823 for (i = 0; i < BE_NUM_MSIX_VECTORS; i++) 1855 adapter->num_rx_qs = be_num_rxqs_get(adapter);
1856
1857 for (i = 0; i < (adapter->num_rx_qs + 1); i++)
1824 adapter->msix_entries[i].entry = i; 1858 adapter->msix_entries[i].entry = i;
1825 1859
1826 status = pci_enable_msix(adapter->pdev, adapter->msix_entries, 1860 status = pci_enable_msix(adapter->pdev, adapter->msix_entries,
1827 BE_NUM_MSIX_VECTORS); 1861 adapter->num_rx_qs + 1);
1828 if (status == 0) 1862 if (status == 0) {
1829 adapter->msix_enabled = true; 1863 goto done;
1864 } else if (status >= BE_MIN_MSIX_VECTORS) {
1865 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
1866 status) == 0) {
1867 adapter->num_rx_qs = status - 1;
1868 dev_warn(&adapter->pdev->dev,
1869 "Could alloc only %d MSIx vectors. "
1870 "Using %d RX Qs\n", status, adapter->num_rx_qs);
1871 goto done;
1872 }
1873 }
1874 return;
1875done:
1876 adapter->msix_enabled = true;
1830} 1877}
1831 1878
1832static void be_sriov_enable(struct be_adapter *adapter) 1879static void be_sriov_enable(struct be_adapter *adapter)
@@ -1860,38 +1907,50 @@ static inline int be_msix_vec_get(struct be_adapter *adapter, u32 eq_id)
1860 1907
1861static int be_request_irq(struct be_adapter *adapter, 1908static int be_request_irq(struct be_adapter *adapter,
1862 struct be_eq_obj *eq_obj, 1909 struct be_eq_obj *eq_obj,
1863 void *handler, char *desc) 1910 void *handler, char *desc, void *context)
1864{ 1911{
1865 struct net_device *netdev = adapter->netdev; 1912 struct net_device *netdev = adapter->netdev;
1866 int vec; 1913 int vec;
1867 1914
1868 sprintf(eq_obj->desc, "%s-%s", netdev->name, desc); 1915 sprintf(eq_obj->desc, "%s-%s", netdev->name, desc);
1869 vec = be_msix_vec_get(adapter, eq_obj->q.id); 1916 vec = be_msix_vec_get(adapter, eq_obj->q.id);
1870 return request_irq(vec, handler, 0, eq_obj->desc, adapter); 1917 return request_irq(vec, handler, 0, eq_obj->desc, context);
1871} 1918}
1872 1919
1873static void be_free_irq(struct be_adapter *adapter, struct be_eq_obj *eq_obj) 1920static void be_free_irq(struct be_adapter *adapter, struct be_eq_obj *eq_obj,
1921 void *context)
1874{ 1922{
1875 int vec = be_msix_vec_get(adapter, eq_obj->q.id); 1923 int vec = be_msix_vec_get(adapter, eq_obj->q.id);
1876 free_irq(vec, adapter); 1924 free_irq(vec, context);
1877} 1925}
1878 1926
1879static int be_msix_register(struct be_adapter *adapter) 1927static int be_msix_register(struct be_adapter *adapter)
1880{ 1928{
1881 int status; 1929 struct be_rx_obj *rxo;
1930 int status, i;
1931 char qname[10];
1882 1932
1883 status = be_request_irq(adapter, &adapter->tx_eq, be_msix_tx_mcc, "tx"); 1933 status = be_request_irq(adapter, &adapter->tx_eq, be_msix_tx_mcc, "tx",
1934 adapter);
1884 if (status) 1935 if (status)
1885 goto err; 1936 goto err;
1886 1937
1887 status = be_request_irq(adapter, &adapter->rx_eq, be_msix_rx, "rx"); 1938 for_all_rx_queues(adapter, rxo, i) {
1888 if (status) 1939 sprintf(qname, "rxq%d", i);
1889 goto free_tx_irq; 1940 status = be_request_irq(adapter, &rxo->rx_eq, be_msix_rx,
1941 qname, rxo);
1942 if (status)
1943 goto err_msix;
1944 }
1890 1945
1891 return 0; 1946 return 0;
1892 1947
1893free_tx_irq: 1948err_msix:
1894 be_free_irq(adapter, &adapter->tx_eq); 1949 be_free_irq(adapter, &adapter->tx_eq, adapter);
1950
1951 for (i--, rxo = &adapter->rx_obj[i]; i >= 0; i--, rxo--)
1952 be_free_irq(adapter, &rxo->rx_eq, rxo);
1953
1895err: 1954err:
1896 dev_warn(&adapter->pdev->dev, 1955 dev_warn(&adapter->pdev->dev,
1897 "MSIX Request IRQ failed - err %d\n", status); 1956 "MSIX Request IRQ failed - err %d\n", status);
@@ -1931,6 +1990,8 @@ done:
1931static void be_irq_unregister(struct be_adapter *adapter) 1990static void be_irq_unregister(struct be_adapter *adapter)
1932{ 1991{
1933 struct net_device *netdev = adapter->netdev; 1992 struct net_device *netdev = adapter->netdev;
1993 struct be_rx_obj *rxo;
1994 int i;
1934 1995
1935 if (!adapter->isr_registered) 1996 if (!adapter->isr_registered)
1936 return; 1997 return;
@@ -1942,8 +2003,11 @@ static void be_irq_unregister(struct be_adapter *adapter)
1942 } 2003 }
1943 2004
1944 /* MSIx */ 2005 /* MSIx */
1945 be_free_irq(adapter, &adapter->tx_eq); 2006 be_free_irq(adapter, &adapter->tx_eq, adapter);
1946 be_free_irq(adapter, &adapter->rx_eq); 2007
2008 for_all_rx_queues(adapter, rxo, i)
2009 be_free_irq(adapter, &rxo->rx_eq, rxo);
2010
1947done: 2011done:
1948 adapter->isr_registered = false; 2012 adapter->isr_registered = false;
1949} 2013}
@@ -1951,9 +2015,9 @@ done:
1951static int be_close(struct net_device *netdev) 2015static int be_close(struct net_device *netdev)
1952{ 2016{
1953 struct be_adapter *adapter = netdev_priv(netdev); 2017 struct be_adapter *adapter = netdev_priv(netdev);
1954 struct be_eq_obj *rx_eq = &adapter->rx_eq; 2018 struct be_rx_obj *rxo;
1955 struct be_eq_obj *tx_eq = &adapter->tx_eq; 2019 struct be_eq_obj *tx_eq = &adapter->tx_eq;
1956 int vec; 2020 int vec, i;
1957 2021
1958 cancel_delayed_work_sync(&adapter->work); 2022 cancel_delayed_work_sync(&adapter->work);
1959 2023
@@ -1968,14 +2032,19 @@ static int be_close(struct net_device *netdev)
1968 if (adapter->msix_enabled) { 2032 if (adapter->msix_enabled) {
1969 vec = be_msix_vec_get(adapter, tx_eq->q.id); 2033 vec = be_msix_vec_get(adapter, tx_eq->q.id);
1970 synchronize_irq(vec); 2034 synchronize_irq(vec);
1971 vec = be_msix_vec_get(adapter, rx_eq->q.id); 2035
1972 synchronize_irq(vec); 2036 for_all_rx_queues(adapter, rxo, i) {
2037 vec = be_msix_vec_get(adapter, rxo->rx_eq.q.id);
2038 synchronize_irq(vec);
2039 }
1973 } else { 2040 } else {
1974 synchronize_irq(netdev->irq); 2041 synchronize_irq(netdev->irq);
1975 } 2042 }
1976 be_irq_unregister(adapter); 2043 be_irq_unregister(adapter);
1977 2044
1978 napi_disable(&rx_eq->napi); 2045 for_all_rx_queues(adapter, rxo, i)
2046 napi_disable(&rxo->rx_eq.napi);
2047
1979 napi_disable(&tx_eq->napi); 2048 napi_disable(&tx_eq->napi);
1980 2049
1981 /* Wait for all pending tx completions to arrive so that 2050 /* Wait for all pending tx completions to arrive so that
@@ -1989,17 +2058,17 @@ static int be_close(struct net_device *netdev)
1989static int be_open(struct net_device *netdev) 2058static int be_open(struct net_device *netdev)
1990{ 2059{
1991 struct be_adapter *adapter = netdev_priv(netdev); 2060 struct be_adapter *adapter = netdev_priv(netdev);
1992 struct be_eq_obj *rx_eq = &adapter->rx_eq;
1993 struct be_eq_obj *tx_eq = &adapter->tx_eq; 2061 struct be_eq_obj *tx_eq = &adapter->tx_eq;
2062 struct be_rx_obj *rxo;
1994 bool link_up; 2063 bool link_up;
1995 int status; 2064 int status, i;
1996 u8 mac_speed; 2065 u8 mac_speed;
1997 u16 link_speed; 2066 u16 link_speed;
1998 2067
1999 /* First time posting */ 2068 for_all_rx_queues(adapter, rxo, i) {
2000 be_post_rx_frags(adapter); 2069 be_post_rx_frags(rxo);
2001 2070 napi_enable(&rxo->rx_eq.napi);
2002 napi_enable(&rx_eq->napi); 2071 }
2003 napi_enable(&tx_eq->napi); 2072 napi_enable(&tx_eq->napi);
2004 2073
2005 be_irq_register(adapter); 2074 be_irq_register(adapter);
@@ -2007,12 +2076,12 @@ static int be_open(struct net_device *netdev)
2007 be_intr_set(adapter, true); 2076 be_intr_set(adapter, true);
2008 2077
2009 /* The evt queues are created in unarmed state; arm them */ 2078 /* The evt queues are created in unarmed state; arm them */
2010 be_eq_notify(adapter, rx_eq->q.id, true, false, 0); 2079 for_all_rx_queues(adapter, rxo, i) {
2080 be_eq_notify(adapter, rxo->rx_eq.q.id, true, false, 0);
2081 be_cq_notify(adapter, rxo->cq.id, true, 0);
2082 }
2011 be_eq_notify(adapter, tx_eq->q.id, true, false, 0); 2083 be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
2012 2084
2013 /* Rx compl queue may be in unarmed state; rearm it */
2014 be_cq_notify(adapter, adapter->rx_obj.cq.id, true, 0);
2015
2016 /* Now that interrupts are on we can process async mcc */ 2085 /* Now that interrupts are on we can process async mcc */
2017 be_async_mcc_enable(adapter); 2086 be_async_mcc_enable(adapter);
2018 2087
@@ -2088,7 +2157,7 @@ static int be_setup_wol(struct be_adapter *adapter, bool enable)
2088static inline int be_vf_eth_addr_config(struct be_adapter *adapter) 2157static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
2089{ 2158{
2090 u32 vf = 0; 2159 u32 vf = 0;
2091 int status; 2160 int status = 0;
2092 u8 mac[ETH_ALEN]; 2161 u8 mac[ETH_ALEN];
2093 2162
2094 be_vf_eth_addr_generate(adapter, mac); 2163 be_vf_eth_addr_generate(adapter, mac);
@@ -2134,6 +2203,11 @@ static int be_setup(struct be_adapter *adapter)
2134 BE_IF_FLAGS_PROMISCUOUS | 2203 BE_IF_FLAGS_PROMISCUOUS |
2135 BE_IF_FLAGS_PASS_L3L4_ERRORS; 2204 BE_IF_FLAGS_PASS_L3L4_ERRORS;
2136 en_flags |= BE_IF_FLAGS_PASS_L3L4_ERRORS; 2205 en_flags |= BE_IF_FLAGS_PASS_L3L4_ERRORS;
2206
2207 if (be_multi_rxq(adapter)) {
2208 cap_flags |= BE_IF_FLAGS_RSS;
2209 en_flags |= BE_IF_FLAGS_RSS;
2210 }
2137 } 2211 }
2138 2212
2139 status = be_cmd_if_create(adapter, cap_flags, en_flags, 2213 status = be_cmd_if_create(adapter, cap_flags, en_flags,
@@ -2455,6 +2529,8 @@ static struct net_device_ops be_netdev_ops = {
2455static void be_netdev_init(struct net_device *netdev) 2529static void be_netdev_init(struct net_device *netdev)
2456{ 2530{
2457 struct be_adapter *adapter = netdev_priv(netdev); 2531 struct be_adapter *adapter = netdev_priv(netdev);
2532 struct be_rx_obj *rxo;
2533 int i;
2458 2534
2459 netdev->features |= NETIF_F_SG | NETIF_F_HW_VLAN_RX | NETIF_F_TSO | 2535 netdev->features |= NETIF_F_SG | NETIF_F_HW_VLAN_RX | NETIF_F_TSO |
2460 NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_FILTER | NETIF_F_HW_CSUM | 2536 NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_FILTER | NETIF_F_HW_CSUM |
@@ -2476,8 +2552,10 @@ static void be_netdev_init(struct net_device *netdev)
2476 2552
2477 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops); 2553 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
2478 2554
2479 netif_napi_add(netdev, &adapter->rx_eq.napi, be_poll_rx, 2555 for_all_rx_queues(adapter, rxo, i)
2480 BE_NAPI_WEIGHT); 2556 netif_napi_add(netdev, &rxo->rx_eq.napi, be_poll_rx,
2557 BE_NAPI_WEIGHT);
2558
2481 netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx_mcc, 2559 netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx_mcc,
2482 BE_NAPI_WEIGHT); 2560 BE_NAPI_WEIGHT);
2483 2561
@@ -2611,8 +2689,7 @@ done:
2611 2689
2612static void be_stats_cleanup(struct be_adapter *adapter) 2690static void be_stats_cleanup(struct be_adapter *adapter)
2613{ 2691{
2614 struct be_stats_obj *stats = &adapter->stats; 2692 struct be_dma_mem *cmd = &adapter->stats_cmd;
2615 struct be_dma_mem *cmd = &stats->cmd;
2616 2693
2617 if (cmd->va) 2694 if (cmd->va)
2618 pci_free_consistent(adapter->pdev, cmd->size, 2695 pci_free_consistent(adapter->pdev, cmd->size,
@@ -2621,8 +2698,7 @@ static void be_stats_cleanup(struct be_adapter *adapter)
2621 2698
2622static int be_stats_init(struct be_adapter *adapter) 2699static int be_stats_init(struct be_adapter *adapter)
2623{ 2700{
2624 struct be_stats_obj *stats = &adapter->stats; 2701 struct be_dma_mem *cmd = &adapter->stats_cmd;
2625 struct be_dma_mem *cmd = &stats->cmd;
2626 2702
2627 cmd->size = sizeof(struct be_cmd_req_get_stats); 2703 cmd->size = sizeof(struct be_cmd_req_get_stats);
2628 cmd->va = pci_alloc_consistent(adapter->pdev, cmd->size, &cmd->dma); 2704 cmd->va = pci_alloc_consistent(adapter->pdev, cmd->size, &cmd->dma);
@@ -2667,8 +2743,8 @@ static int be_get_config(struct be_adapter *adapter)
2667 if (status) 2743 if (status)
2668 return status; 2744 return status;
2669 2745
2670 status = be_cmd_query_fw_cfg(adapter, 2746 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
2671 &adapter->port_num, &adapter->function_mode); 2747 &adapter->function_mode, &adapter->function_caps);
2672 if (status) 2748 if (status)
2673 return status; 2749 return status;
2674 2750
@@ -2703,7 +2779,6 @@ static int __devinit be_probe(struct pci_dev *pdev,
2703 struct be_adapter *adapter; 2779 struct be_adapter *adapter;
2704 struct net_device *netdev; 2780 struct net_device *netdev;
2705 2781
2706
2707 status = pci_enable_device(pdev); 2782 status = pci_enable_device(pdev);
2708 if (status) 2783 if (status)
2709 goto do_none; 2784 goto do_none;
@@ -2736,11 +2811,8 @@ static int __devinit be_probe(struct pci_dev *pdev,
2736 adapter->pdev = pdev; 2811 adapter->pdev = pdev;
2737 pci_set_drvdata(pdev, adapter); 2812 pci_set_drvdata(pdev, adapter);
2738 adapter->netdev = netdev; 2813 adapter->netdev = netdev;
2739 be_netdev_init(netdev);
2740 SET_NETDEV_DEV(netdev, &pdev->dev); 2814 SET_NETDEV_DEV(netdev, &pdev->dev);
2741 2815
2742 be_msix_enable(adapter);
2743
2744 status = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); 2816 status = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
2745 if (!status) { 2817 if (!status) {
2746 netdev->features |= NETIF_F_HIGHDMA; 2818 netdev->features |= NETIF_F_HIGHDMA;
@@ -2784,12 +2856,15 @@ static int __devinit be_probe(struct pci_dev *pdev,
2784 if (status) 2856 if (status)
2785 goto stats_clean; 2857 goto stats_clean;
2786 2858
2859 be_msix_enable(adapter);
2860
2787 INIT_DELAYED_WORK(&adapter->work, be_worker); 2861 INIT_DELAYED_WORK(&adapter->work, be_worker);
2788 2862
2789 status = be_setup(adapter); 2863 status = be_setup(adapter);
2790 if (status) 2864 if (status)
2791 goto stats_clean; 2865 goto msix_disable;
2792 2866
2867 be_netdev_init(netdev);
2793 status = register_netdev(netdev); 2868 status = register_netdev(netdev);
2794 if (status != 0) 2869 if (status != 0)
2795 goto unsetup; 2870 goto unsetup;
@@ -2799,12 +2874,13 @@ static int __devinit be_probe(struct pci_dev *pdev,
2799 2874
2800unsetup: 2875unsetup:
2801 be_clear(adapter); 2876 be_clear(adapter);
2877msix_disable:
2878 be_msix_disable(adapter);
2802stats_clean: 2879stats_clean:
2803 be_stats_cleanup(adapter); 2880 be_stats_cleanup(adapter);
2804ctrl_clean: 2881ctrl_clean:
2805 be_ctrl_cleanup(adapter); 2882 be_ctrl_cleanup(adapter);
2806free_netdev: 2883free_netdev:
2807 be_msix_disable(adapter);
2808 be_sriov_disable(adapter); 2884 be_sriov_disable(adapter);
2809 free_netdev(adapter->netdev); 2885 free_netdev(adapter->netdev);
2810 pci_set_drvdata(pdev, NULL); 2886 pci_set_drvdata(pdev, NULL);
diff --git a/drivers/net/bmac.c b/drivers/net/bmac.c
index 9322699bb31c..a1b8c8b8010b 100644
--- a/drivers/net/bmac.c
+++ b/drivers/net/bmac.c
@@ -1581,7 +1581,7 @@ bmac_proc_info(char *buffer, char **start, off_t offset, int length)
1581 int i; 1581 int i;
1582 1582
1583 if (bmac_devs == NULL) 1583 if (bmac_devs == NULL)
1584 return (-ENOSYS); 1584 return -ENOSYS;
1585 1585
1586 len += sprintf(buffer, "BMAC counters & registers\n"); 1586 len += sprintf(buffer, "BMAC counters & registers\n");
1587 1587
diff --git a/drivers/net/bna/bfa_ioc.c b/drivers/net/bna/bfa_ioc.c
index caa45c2185e9..e94e5aa97515 100644
--- a/drivers/net/bna/bfa_ioc.c
+++ b/drivers/net/bna/bfa_ioc.c
@@ -65,7 +65,7 @@
65 (!list_empty(&((__ioc)->mbox_mod.cmd_q)) || \ 65 (!list_empty(&((__ioc)->mbox_mod.cmd_q)) || \
66 readl((__ioc)->ioc_regs.hfn_mbox_cmd)) 66 readl((__ioc)->ioc_regs.hfn_mbox_cmd))
67 67
68bool bfa_nw_auto_recover = true; 68static bool bfa_nw_auto_recover = true;
69 69
70/* 70/*
71 * forward declarations 71 * forward declarations
@@ -1276,12 +1276,6 @@ bfa_nw_ioc_auto_recover(bool auto_recover)
1276 bfa_nw_auto_recover = auto_recover; 1276 bfa_nw_auto_recover = auto_recover;
1277} 1277}
1278 1278
1279bool
1280bfa_nw_ioc_is_operational(struct bfa_ioc *ioc)
1281{
1282 return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_op);
1283}
1284
1285static void 1279static void
1286bfa_ioc_msgget(struct bfa_ioc *ioc, void *mbmsg) 1280bfa_ioc_msgget(struct bfa_ioc *ioc, void *mbmsg)
1287{ 1281{
@@ -1514,7 +1508,7 @@ bfa_nw_ioc_mbox_isr(struct bfa_ioc *ioc)
1514 return; 1508 return;
1515 } 1509 }
1516 1510
1517 if ((mc > BFI_MC_MAX) || (mod->mbhdlr[mc].cbfn == NULL)) 1511 if ((mc >= BFI_MC_MAX) || (mod->mbhdlr[mc].cbfn == NULL))
1518 return; 1512 return;
1519 1513
1520 mod->mbhdlr[mc].cbfn(mod->mbhdlr[mc].cbarg, &m); 1514 mod->mbhdlr[mc].cbfn(mod->mbhdlr[mc].cbarg, &m);
diff --git a/drivers/net/bna/bfa_ioc.h b/drivers/net/bna/bfa_ioc.h
index 7f0719e17efc..a73d84ec808c 100644
--- a/drivers/net/bna/bfa_ioc.h
+++ b/drivers/net/bna/bfa_ioc.h
@@ -271,7 +271,6 @@ void bfa_nw_ioc_enable(struct bfa_ioc *ioc);
271void bfa_nw_ioc_disable(struct bfa_ioc *ioc); 271void bfa_nw_ioc_disable(struct bfa_ioc *ioc);
272 272
273void bfa_nw_ioc_error_isr(struct bfa_ioc *ioc); 273void bfa_nw_ioc_error_isr(struct bfa_ioc *ioc);
274bool bfa_nw_ioc_is_operational(struct bfa_ioc *ioc);
275 274
276void bfa_nw_ioc_get_attr(struct bfa_ioc *ioc, struct bfa_ioc_attr *ioc_attr); 275void bfa_nw_ioc_get_attr(struct bfa_ioc *ioc, struct bfa_ioc_attr *ioc_attr);
277void bfa_nw_ioc_hbfail_register(struct bfa_ioc *ioc, 276void bfa_nw_ioc_hbfail_register(struct bfa_ioc *ioc,
diff --git a/drivers/net/bna/bfa_ioc_ct.c b/drivers/net/bna/bfa_ioc_ct.c
index 462857cbab9b..121cfd6d48b1 100644
--- a/drivers/net/bna/bfa_ioc_ct.c
+++ b/drivers/net/bna/bfa_ioc_ct.c
@@ -34,7 +34,7 @@ static void bfa_ioc_ct_notify_hbfail(struct bfa_ioc *ioc);
34static void bfa_ioc_ct_ownership_reset(struct bfa_ioc *ioc); 34static void bfa_ioc_ct_ownership_reset(struct bfa_ioc *ioc);
35static enum bfa_status bfa_ioc_ct_pll_init(void __iomem *rb, bool fcmode); 35static enum bfa_status bfa_ioc_ct_pll_init(void __iomem *rb, bool fcmode);
36 36
37struct bfa_ioc_hwif nw_hwif_ct; 37static struct bfa_ioc_hwif nw_hwif_ct;
38 38
39/** 39/**
40 * Called from bfa_ioc_attach() to map asic specific calls. 40 * Called from bfa_ioc_attach() to map asic specific calls.
diff --git a/drivers/net/bna/bfa_sm.h b/drivers/net/bna/bfa_sm.h
index 1d3d975d6f68..46462c49b6f9 100644
--- a/drivers/net/bna/bfa_sm.h
+++ b/drivers/net/bna/bfa_sm.h
@@ -77,7 +77,7 @@ typedef void (*bfa_fsm_t)(void *fsm, int event);
77 ((_fsm)->fsm == (bfa_fsm_t)(_state)) 77 ((_fsm)->fsm == (bfa_fsm_t)(_state))
78 78
79static inline int 79static inline int
80bfa_sm_to_state(struct bfa_sm_table *smt, bfa_sm_t sm) 80bfa_sm_to_state(const struct bfa_sm_table *smt, bfa_sm_t sm)
81{ 81{
82 int i = 0; 82 int i = 0;
83 83
diff --git a/drivers/net/bna/bna.h b/drivers/net/bna/bna.h
index 6a2b3291c190..df6676bbc84e 100644
--- a/drivers/net/bna/bna.h
+++ b/drivers/net/bna/bna.h
@@ -19,8 +19,7 @@
19#include "bfi_ll.h" 19#include "bfi_ll.h"
20#include "bna_types.h" 20#include "bna_types.h"
21 21
22extern u32 bna_dim_vector[][BNA_BIAS_T_MAX]; 22extern const u32 bna_napi_dim_vector[][BNA_BIAS_T_MAX];
23extern u32 bna_napi_dim_vector[][BNA_BIAS_T_MAX];
24 23
25/** 24/**
26 * 25 *
@@ -344,9 +343,6 @@ do { \
344 * BNA 343 * BNA
345 */ 344 */
346 345
347/* Internal APIs */
348void bna_adv_res_req(struct bna_res_info *res_info);
349
350/* APIs for BNAD */ 346/* APIs for BNAD */
351void bna_res_req(struct bna_res_info *res_info); 347void bna_res_req(struct bna_res_info *res_info);
352void bna_init(struct bna *bna, struct bnad *bnad, 348void bna_init(struct bna *bna, struct bnad *bnad,
@@ -354,7 +350,6 @@ void bna_init(struct bna *bna, struct bnad *bnad,
354 struct bna_res_info *res_info); 350 struct bna_res_info *res_info);
355void bna_uninit(struct bna *bna); 351void bna_uninit(struct bna *bna);
356void bna_stats_get(struct bna *bna); 352void bna_stats_get(struct bna *bna);
357void bna_stats_clr(struct bna *bna);
358void bna_get_perm_mac(struct bna *bna, u8 *mac); 353void bna_get_perm_mac(struct bna *bna, u8 *mac);
359 354
360/* APIs for Rx */ 355/* APIs for Rx */
@@ -376,18 +371,6 @@ void bna_rit_mod_seg_put(struct bna_rit_mod *rit_mod,
376 * DEVICE 371 * DEVICE
377 */ 372 */
378 373
379/* Interanl APIs */
380void bna_adv_device_init(struct bna_device *device, struct bna *bna,
381 struct bna_res_info *res_info);
382
383/* APIs for BNA */
384void bna_device_init(struct bna_device *device, struct bna *bna,
385 struct bna_res_info *res_info);
386void bna_device_uninit(struct bna_device *device);
387void bna_device_cb_port_stopped(void *arg, enum bna_cb_status status);
388int bna_device_status_get(struct bna_device *device);
389int bna_device_state_get(struct bna_device *device);
390
391/* APIs for BNAD */ 374/* APIs for BNAD */
392void bna_device_enable(struct bna_device *device); 375void bna_device_enable(struct bna_device *device);
393void bna_device_disable(struct bna_device *device, 376void bna_device_disable(struct bna_device *device,
@@ -397,12 +380,6 @@ void bna_device_disable(struct bna_device *device,
397 * MBOX 380 * MBOX
398 */ 381 */
399 382
400/* APIs for DEVICE */
401void bna_mbox_mod_init(struct bna_mbox_mod *mbox_mod, struct bna *bna);
402void bna_mbox_mod_uninit(struct bna_mbox_mod *mbox_mod);
403void bna_mbox_mod_start(struct bna_mbox_mod *mbox_mod);
404void bna_mbox_mod_stop(struct bna_mbox_mod *mbox_mod);
405
406/* APIs for PORT, TX, RX */ 383/* APIs for PORT, TX, RX */
407void bna_mbox_handler(struct bna *bna, u32 intr_status); 384void bna_mbox_handler(struct bna *bna, u32 intr_status);
408void bna_mbox_send(struct bna *bna, struct bna_mbox_qe *mbox_qe); 385void bna_mbox_send(struct bna *bna, struct bna_mbox_qe *mbox_qe);
@@ -411,17 +388,6 @@ void bna_mbox_send(struct bna *bna, struct bna_mbox_qe *mbox_qe);
411 * PORT 388 * PORT
412 */ 389 */
413 390
414/* APIs for BNA */
415void bna_port_init(struct bna_port *port, struct bna *bna);
416void bna_port_uninit(struct bna_port *port);
417int bna_port_state_get(struct bna_port *port);
418int bna_llport_state_get(struct bna_llport *llport);
419
420/* APIs for DEVICE */
421void bna_port_start(struct bna_port *port);
422void bna_port_stop(struct bna_port *port);
423void bna_port_fail(struct bna_port *port);
424
425/* API for RX */ 391/* API for RX */
426int bna_port_mtu_get(struct bna_port *port); 392int bna_port_mtu_get(struct bna_port *port);
427void bna_llport_admin_up(struct bna_llport *llport); 393void bna_llport_admin_up(struct bna_llport *llport);
@@ -437,12 +403,6 @@ void bna_port_pause_config(struct bna_port *port,
437void bna_port_mtu_set(struct bna_port *port, int mtu, 403void bna_port_mtu_set(struct bna_port *port, int mtu,
438 void (*cbfn)(struct bnad *, enum bna_cb_status)); 404 void (*cbfn)(struct bnad *, enum bna_cb_status));
439void bna_port_mac_get(struct bna_port *port, mac_t *mac); 405void bna_port_mac_get(struct bna_port *port, mac_t *mac);
440void bna_port_type_set(struct bna_port *port, enum bna_port_type type);
441void bna_port_linkcbfn_set(struct bna_port *port,
442 void (*linkcbfn)(struct bnad *,
443 enum bna_link_status));
444void bna_port_admin_up(struct bna_port *port);
445void bna_port_admin_down(struct bna_port *port);
446 406
447/* Callbacks for TX, RX */ 407/* Callbacks for TX, RX */
448void bna_port_cb_tx_stopped(struct bna_port *port, 408void bna_port_cb_tx_stopped(struct bna_port *port,
@@ -450,11 +410,6 @@ void bna_port_cb_tx_stopped(struct bna_port *port,
450void bna_port_cb_rx_stopped(struct bna_port *port, 410void bna_port_cb_rx_stopped(struct bna_port *port,
451 enum bna_cb_status status); 411 enum bna_cb_status status);
452 412
453/* Callbacks for MBOX */
454void bna_port_cb_link_up(struct bna_port *port, struct bfi_ll_aen *aen,
455 int status);
456void bna_port_cb_link_down(struct bna_port *port, int status);
457
458/** 413/**
459 * IB 414 * IB
460 */ 415 */
@@ -464,25 +419,10 @@ void bna_ib_mod_init(struct bna_ib_mod *ib_mod, struct bna *bna,
464 struct bna_res_info *res_info); 419 struct bna_res_info *res_info);
465void bna_ib_mod_uninit(struct bna_ib_mod *ib_mod); 420void bna_ib_mod_uninit(struct bna_ib_mod *ib_mod);
466 421
467/* APIs for TX, RX */
468struct bna_ib *bna_ib_get(struct bna_ib_mod *ib_mod,
469 enum bna_intr_type intr_type, int vector);
470void bna_ib_put(struct bna_ib_mod *ib_mod, struct bna_ib *ib);
471int bna_ib_reserve_idx(struct bna_ib *ib);
472void bna_ib_release_idx(struct bna_ib *ib, int idx);
473int bna_ib_config(struct bna_ib *ib, struct bna_ib_config *ib_config);
474void bna_ib_start(struct bna_ib *ib);
475void bna_ib_stop(struct bna_ib *ib);
476void bna_ib_fail(struct bna_ib *ib);
477void bna_ib_coalescing_timeo_set(struct bna_ib *ib, u8 coalescing_timeo);
478
479/** 422/**
480 * TX MODULE AND TX 423 * TX MODULE AND TX
481 */ 424 */
482 425
483/* Internal APIs */
484void bna_tx_prio_changed(struct bna_tx *tx, int prio);
485
486/* APIs for BNA */ 426/* APIs for BNA */
487void bna_tx_mod_init(struct bna_tx_mod *tx_mod, struct bna *bna, 427void bna_tx_mod_init(struct bna_tx_mod *tx_mod, struct bna *bna,
488 struct bna_res_info *res_info); 428 struct bna_res_info *res_info);
@@ -508,10 +448,6 @@ void bna_tx_enable(struct bna_tx *tx);
508void bna_tx_disable(struct bna_tx *tx, enum bna_cleanup_type type, 448void bna_tx_disable(struct bna_tx *tx, enum bna_cleanup_type type,
509 void (*cbfn)(void *, struct bna_tx *, 449 void (*cbfn)(void *, struct bna_tx *,
510 enum bna_cb_status)); 450 enum bna_cb_status));
511enum bna_cb_status
512bna_tx_prio_set(struct bna_tx *tx, int prio,
513 void (*cbfn)(struct bnad *, struct bna_tx *,
514 enum bna_cb_status));
515void bna_tx_coalescing_timeo_set(struct bna_tx *tx, int coalescing_timeo); 451void bna_tx_coalescing_timeo_set(struct bna_tx *tx, int coalescing_timeo);
516 452
517/** 453/**
@@ -564,35 +500,20 @@ void bna_rx_disable(struct bna_rx *rx, enum bna_cleanup_type type,
564 void (*cbfn)(void *, struct bna_rx *, 500 void (*cbfn)(void *, struct bna_rx *,
565 enum bna_cb_status)); 501 enum bna_cb_status));
566void bna_rx_coalescing_timeo_set(struct bna_rx *rx, int coalescing_timeo); 502void bna_rx_coalescing_timeo_set(struct bna_rx *rx, int coalescing_timeo);
567void bna_rx_dim_reconfig(struct bna *bna, u32 vector[][BNA_BIAS_T_MAX]); 503void bna_rx_dim_reconfig(struct bna *bna, const u32 vector[][BNA_BIAS_T_MAX]);
568void bna_rx_dim_update(struct bna_ccb *ccb); 504void bna_rx_dim_update(struct bna_ccb *ccb);
569enum bna_cb_status 505enum bna_cb_status
570bna_rx_ucast_set(struct bna_rx *rx, u8 *ucmac, 506bna_rx_ucast_set(struct bna_rx *rx, u8 *ucmac,
571 void (*cbfn)(struct bnad *, struct bna_rx *, 507 void (*cbfn)(struct bnad *, struct bna_rx *,
572 enum bna_cb_status)); 508 enum bna_cb_status));
573enum bna_cb_status 509enum bna_cb_status
574bna_rx_ucast_add(struct bna_rx *rx, u8* ucmac,
575 void (*cbfn)(struct bnad *, struct bna_rx *,
576 enum bna_cb_status));
577enum bna_cb_status
578bna_rx_ucast_del(struct bna_rx *rx, u8 *ucmac,
579 void (*cbfn)(struct bnad *, struct bna_rx *,
580 enum bna_cb_status));
581enum bna_cb_status
582bna_rx_mcast_add(struct bna_rx *rx, u8 *mcmac, 510bna_rx_mcast_add(struct bna_rx *rx, u8 *mcmac,
583 void (*cbfn)(struct bnad *, struct bna_rx *, 511 void (*cbfn)(struct bnad *, struct bna_rx *,
584 enum bna_cb_status)); 512 enum bna_cb_status));
585enum bna_cb_status 513enum bna_cb_status
586bna_rx_mcast_del(struct bna_rx *rx, u8 *mcmac,
587 void (*cbfn)(struct bnad *, struct bna_rx *,
588 enum bna_cb_status));
589enum bna_cb_status
590bna_rx_mcast_listset(struct bna_rx *rx, int count, u8 *mcmac, 514bna_rx_mcast_listset(struct bna_rx *rx, int count, u8 *mcmac,
591 void (*cbfn)(struct bnad *, struct bna_rx *, 515 void (*cbfn)(struct bnad *, struct bna_rx *,
592 enum bna_cb_status)); 516 enum bna_cb_status));
593void bna_rx_mcast_delall(struct bna_rx *rx,
594 void (*cbfn)(struct bnad *, struct bna_rx *,
595 enum bna_cb_status));
596enum bna_cb_status 517enum bna_cb_status
597bna_rx_mode_set(struct bna_rx *rx, enum bna_rxmode rxmode, 518bna_rx_mode_set(struct bna_rx *rx, enum bna_rxmode rxmode,
598 enum bna_rxmode bitmask, 519 enum bna_rxmode bitmask,
@@ -601,36 +522,12 @@ bna_rx_mode_set(struct bna_rx *rx, enum bna_rxmode rxmode,
601void bna_rx_vlan_add(struct bna_rx *rx, int vlan_id); 522void bna_rx_vlan_add(struct bna_rx *rx, int vlan_id);
602void bna_rx_vlan_del(struct bna_rx *rx, int vlan_id); 523void bna_rx_vlan_del(struct bna_rx *rx, int vlan_id);
603void bna_rx_vlanfilter_enable(struct bna_rx *rx); 524void bna_rx_vlanfilter_enable(struct bna_rx *rx);
604void bna_rx_vlanfilter_disable(struct bna_rx *rx);
605void bna_rx_rss_enable(struct bna_rx *rx);
606void bna_rx_rss_disable(struct bna_rx *rx);
607void bna_rx_rss_reconfig(struct bna_rx *rx, struct bna_rxf_rss *rss_config);
608void bna_rx_rss_rit_set(struct bna_rx *rx, unsigned int *vectors,
609 int nvectors);
610void bna_rx_hds_enable(struct bna_rx *rx, struct bna_rxf_hds *hds_config, 525void bna_rx_hds_enable(struct bna_rx *rx, struct bna_rxf_hds *hds_config,
611 void (*cbfn)(struct bnad *, struct bna_rx *, 526 void (*cbfn)(struct bnad *, struct bna_rx *,
612 enum bna_cb_status)); 527 enum bna_cb_status));
613void bna_rx_hds_disable(struct bna_rx *rx, 528void bna_rx_hds_disable(struct bna_rx *rx,
614 void (*cbfn)(struct bnad *, struct bna_rx *, 529 void (*cbfn)(struct bnad *, struct bna_rx *,
615 enum bna_cb_status)); 530 enum bna_cb_status));
616void bna_rx_receive_pause(struct bna_rx *rx,
617 void (*cbfn)(struct bnad *, struct bna_rx *,
618 enum bna_cb_status));
619void bna_rx_receive_resume(struct bna_rx *rx,
620 void (*cbfn)(struct bnad *, struct bna_rx *,
621 enum bna_cb_status));
622
623/* RxF APIs for RX */
624void bna_rxf_start(struct bna_rxf *rxf);
625void bna_rxf_stop(struct bna_rxf *rxf);
626void bna_rxf_fail(struct bna_rxf *rxf);
627void bna_rxf_init(struct bna_rxf *rxf, struct bna_rx *rx,
628 struct bna_rx_config *q_config);
629void bna_rxf_uninit(struct bna_rxf *rxf);
630
631/* Callback from RXF to RX */
632void bna_rx_cb_rxf_stopped(struct bna_rx *rx, enum bna_cb_status);
633void bna_rx_cb_rxf_started(struct bna_rx *rx, enum bna_cb_status);
634 531
635/** 532/**
636 * BNAD 533 * BNAD
@@ -639,7 +536,6 @@ void bna_rx_cb_rxf_started(struct bna_rx *rx, enum bna_cb_status);
639/* Callbacks for BNA */ 536/* Callbacks for BNA */
640void bnad_cb_stats_get(struct bnad *bnad, enum bna_cb_status status, 537void bnad_cb_stats_get(struct bnad *bnad, enum bna_cb_status status,
641 struct bna_stats *stats); 538 struct bna_stats *stats);
642void bnad_cb_stats_clr(struct bnad *bnad);
643 539
644/* Callbacks for DEVICE */ 540/* Callbacks for DEVICE */
645void bnad_cb_device_enabled(struct bnad *bnad, enum bna_cb_status status); 541void bnad_cb_device_enabled(struct bnad *bnad, enum bna_cb_status status);
diff --git a/drivers/net/bna/bna_ctrl.c b/drivers/net/bna/bna_ctrl.c
index f3034d6bda58..07b26598546e 100644
--- a/drivers/net/bna/bna_ctrl.c
+++ b/drivers/net/bna/bna_ctrl.c
@@ -19,14 +19,54 @@
19#include "bfa_sm.h" 19#include "bfa_sm.h"
20#include "bfa_wc.h" 20#include "bfa_wc.h"
21 21
22static void bna_device_cb_port_stopped(void *arg, enum bna_cb_status status);
23
24static void
25bna_port_cb_link_up(struct bna_port *port, struct bfi_ll_aen *aen,
26 int status)
27{
28 int i;
29 u8 prio_map;
30
31 port->llport.link_status = BNA_LINK_UP;
32 if (aen->cee_linkup)
33 port->llport.link_status = BNA_CEE_UP;
34
35 /* Compute the priority */
36 prio_map = aen->prio_map;
37 if (prio_map) {
38 for (i = 0; i < 8; i++) {
39 if ((prio_map >> i) & 0x1)
40 break;
41 }
42 port->priority = i;
43 } else
44 port->priority = 0;
45
46 /* Dispatch events */
47 bna_tx_mod_cee_link_status(&port->bna->tx_mod, aen->cee_linkup);
48 bna_tx_mod_prio_changed(&port->bna->tx_mod, port->priority);
49 port->link_cbfn(port->bna->bnad, port->llport.link_status);
50}
51
52static void
53bna_port_cb_link_down(struct bna_port *port, int status)
54{
55 port->llport.link_status = BNA_LINK_DOWN;
56
57 /* Dispatch events */
58 bna_tx_mod_cee_link_status(&port->bna->tx_mod, BNA_LINK_DOWN);
59 port->link_cbfn(port->bna->bnad, BNA_LINK_DOWN);
60}
61
22/** 62/**
23 * MBOX 63 * MBOX
24 */ 64 */
25static int 65static int
26bna_is_aen(u8 msg_id) 66bna_is_aen(u8 msg_id)
27{ 67{
28 return (msg_id == BFI_LL_I2H_LINK_DOWN_AEN || 68 return msg_id == BFI_LL_I2H_LINK_DOWN_AEN ||
29 msg_id == BFI_LL_I2H_LINK_UP_AEN); 69 msg_id == BFI_LL_I2H_LINK_UP_AEN;
30} 70}
31 71
32static void 72static void
@@ -96,7 +136,7 @@ bna_ll_isr(void *llarg, struct bfi_mbmsg *msg)
96 bna_mbox_aen_callback(bna, msg); 136 bna_mbox_aen_callback(bna, msg);
97} 137}
98 138
99void 139static void
100bna_err_handler(struct bna *bna, u32 intr_status) 140bna_err_handler(struct bna *bna, u32 intr_status)
101{ 141{
102 u32 init_halt; 142 u32 init_halt;
@@ -140,7 +180,7 @@ bna_mbox_send(struct bna *bna, struct bna_mbox_qe *mbox_qe)
140 } 180 }
141} 181}
142 182
143void 183static void
144bna_mbox_flush_q(struct bna *bna, struct list_head *q) 184bna_mbox_flush_q(struct bna *bna, struct list_head *q)
145{ 185{
146 struct bna_mbox_qe *mb_qe = NULL; 186 struct bna_mbox_qe *mb_qe = NULL;
@@ -166,18 +206,18 @@ bna_mbox_flush_q(struct bna *bna, struct list_head *q)
166 bna->mbox_mod.state = BNA_MBOX_FREE; 206 bna->mbox_mod.state = BNA_MBOX_FREE;
167} 207}
168 208
169void 209static void
170bna_mbox_mod_start(struct bna_mbox_mod *mbox_mod) 210bna_mbox_mod_start(struct bna_mbox_mod *mbox_mod)
171{ 211{
172} 212}
173 213
174void 214static void
175bna_mbox_mod_stop(struct bna_mbox_mod *mbox_mod) 215bna_mbox_mod_stop(struct bna_mbox_mod *mbox_mod)
176{ 216{
177 bna_mbox_flush_q(mbox_mod->bna, &mbox_mod->posted_q); 217 bna_mbox_flush_q(mbox_mod->bna, &mbox_mod->posted_q);
178} 218}
179 219
180void 220static void
181bna_mbox_mod_init(struct bna_mbox_mod *mbox_mod, struct bna *bna) 221bna_mbox_mod_init(struct bna_mbox_mod *mbox_mod, struct bna *bna)
182{ 222{
183 bfa_nw_ioc_mbox_regisr(&bna->device.ioc, BFI_MC_LL, bna_ll_isr, bna); 223 bfa_nw_ioc_mbox_regisr(&bna->device.ioc, BFI_MC_LL, bna_ll_isr, bna);
@@ -187,7 +227,7 @@ bna_mbox_mod_init(struct bna_mbox_mod *mbox_mod, struct bna *bna)
187 mbox_mod->bna = bna; 227 mbox_mod->bna = bna;
188} 228}
189 229
190void 230static void
191bna_mbox_mod_uninit(struct bna_mbox_mod *mbox_mod) 231bna_mbox_mod_uninit(struct bna_mbox_mod *mbox_mod)
192{ 232{
193 mbox_mod->bna = NULL; 233 mbox_mod->bna = NULL;
@@ -538,7 +578,7 @@ bna_fw_cb_llport_down(void *arg, int status)
538 bfa_fsm_send_event(llport, LLPORT_E_FWRESP_DOWN); 578 bfa_fsm_send_event(llport, LLPORT_E_FWRESP_DOWN);
539} 579}
540 580
541void 581static void
542bna_port_cb_llport_stopped(struct bna_port *port, 582bna_port_cb_llport_stopped(struct bna_port *port,
543 enum bna_cb_status status) 583 enum bna_cb_status status)
544{ 584{
@@ -591,7 +631,7 @@ bna_llport_fail(struct bna_llport *llport)
591 bfa_fsm_send_event(llport, LLPORT_E_FAIL); 631 bfa_fsm_send_event(llport, LLPORT_E_FAIL);
592} 632}
593 633
594int 634static int
595bna_llport_state_get(struct bna_llport *llport) 635bna_llport_state_get(struct bna_llport *llport)
596{ 636{
597 return bfa_sm_to_state(llport_sm_table, llport->fsm); 637 return bfa_sm_to_state(llport_sm_table, llport->fsm);
@@ -1109,7 +1149,7 @@ bna_port_cb_chld_stopped(void *arg)
1109 bfa_fsm_send_event(port, PORT_E_CHLD_STOPPED); 1149 bfa_fsm_send_event(port, PORT_E_CHLD_STOPPED);
1110} 1150}
1111 1151
1112void 1152static void
1113bna_port_init(struct bna_port *port, struct bna *bna) 1153bna_port_init(struct bna_port *port, struct bna *bna)
1114{ 1154{
1115 port->bna = bna; 1155 port->bna = bna;
@@ -1137,7 +1177,7 @@ bna_port_init(struct bna_port *port, struct bna *bna)
1137 bna_llport_init(&port->llport, bna); 1177 bna_llport_init(&port->llport, bna);
1138} 1178}
1139 1179
1140void 1180static void
1141bna_port_uninit(struct bna_port *port) 1181bna_port_uninit(struct bna_port *port)
1142{ 1182{
1143 bna_llport_uninit(&port->llport); 1183 bna_llport_uninit(&port->llport);
@@ -1147,13 +1187,13 @@ bna_port_uninit(struct bna_port *port)
1147 port->bna = NULL; 1187 port->bna = NULL;
1148} 1188}
1149 1189
1150int 1190static int
1151bna_port_state_get(struct bna_port *port) 1191bna_port_state_get(struct bna_port *port)
1152{ 1192{
1153 return bfa_sm_to_state(port_sm_table, port->fsm); 1193 return bfa_sm_to_state(port_sm_table, port->fsm);
1154} 1194}
1155 1195
1156void 1196static void
1157bna_port_start(struct bna_port *port) 1197bna_port_start(struct bna_port *port)
1158{ 1198{
1159 port->flags |= BNA_PORT_F_DEVICE_READY; 1199 port->flags |= BNA_PORT_F_DEVICE_READY;
@@ -1161,7 +1201,7 @@ bna_port_start(struct bna_port *port)
1161 bfa_fsm_send_event(port, PORT_E_START); 1201 bfa_fsm_send_event(port, PORT_E_START);
1162} 1202}
1163 1203
1164void 1204static void
1165bna_port_stop(struct bna_port *port) 1205bna_port_stop(struct bna_port *port)
1166{ 1206{
1167 port->stop_cbfn = bna_device_cb_port_stopped; 1207 port->stop_cbfn = bna_device_cb_port_stopped;
@@ -1171,7 +1211,7 @@ bna_port_stop(struct bna_port *port)
1171 bfa_fsm_send_event(port, PORT_E_STOP); 1211 bfa_fsm_send_event(port, PORT_E_STOP);
1172} 1212}
1173 1213
1174void 1214static void
1175bna_port_fail(struct bna_port *port) 1215bna_port_fail(struct bna_port *port)
1176{ 1216{
1177 port->flags &= ~BNA_PORT_F_DEVICE_READY; 1217 port->flags &= ~BNA_PORT_F_DEVICE_READY;
@@ -1190,44 +1230,6 @@ bna_port_cb_rx_stopped(struct bna_port *port, enum bna_cb_status status)
1190 bfa_wc_down(&port->chld_stop_wc); 1230 bfa_wc_down(&port->chld_stop_wc);
1191} 1231}
1192 1232
1193void
1194bna_port_cb_link_up(struct bna_port *port, struct bfi_ll_aen *aen,
1195 int status)
1196{
1197 int i;
1198 u8 prio_map;
1199
1200 port->llport.link_status = BNA_LINK_UP;
1201 if (aen->cee_linkup)
1202 port->llport.link_status = BNA_CEE_UP;
1203
1204 /* Compute the priority */
1205 prio_map = aen->prio_map;
1206 if (prio_map) {
1207 for (i = 0; i < 8; i++) {
1208 if ((prio_map >> i) & 0x1)
1209 break;
1210 }
1211 port->priority = i;
1212 } else
1213 port->priority = 0;
1214
1215 /* Dispatch events */
1216 bna_tx_mod_cee_link_status(&port->bna->tx_mod, aen->cee_linkup);
1217 bna_tx_mod_prio_changed(&port->bna->tx_mod, port->priority);
1218 port->link_cbfn(port->bna->bnad, port->llport.link_status);
1219}
1220
1221void
1222bna_port_cb_link_down(struct bna_port *port, int status)
1223{
1224 port->llport.link_status = BNA_LINK_DOWN;
1225
1226 /* Dispatch events */
1227 bna_tx_mod_cee_link_status(&port->bna->tx_mod, BNA_LINK_DOWN);
1228 port->link_cbfn(port->bna->bnad, BNA_LINK_DOWN);
1229}
1230
1231int 1233int
1232bna_port_mtu_get(struct bna_port *port) 1234bna_port_mtu_get(struct bna_port *port)
1233{ 1235{
@@ -1293,54 +1295,6 @@ bna_port_mac_get(struct bna_port *port, mac_t *mac)
1293} 1295}
1294 1296
1295/** 1297/**
1296 * Should be called only when port is disabled
1297 */
1298void
1299bna_port_type_set(struct bna_port *port, enum bna_port_type type)
1300{
1301 port->type = type;
1302 port->llport.type = type;
1303}
1304
1305/**
1306 * Should be called only when port is disabled
1307 */
1308void
1309bna_port_linkcbfn_set(struct bna_port *port,
1310 void (*linkcbfn)(struct bnad *, enum bna_link_status))
1311{
1312 port->link_cbfn = linkcbfn;
1313}
1314
1315void
1316bna_port_admin_up(struct bna_port *port)
1317{
1318 struct bna_llport *llport = &port->llport;
1319
1320 if (llport->flags & BNA_LLPORT_F_ENABLED)
1321 return;
1322
1323 llport->flags |= BNA_LLPORT_F_ENABLED;
1324
1325 if (llport->flags & BNA_LLPORT_F_RX_ENABLED)
1326 bfa_fsm_send_event(llport, LLPORT_E_UP);
1327}
1328
1329void
1330bna_port_admin_down(struct bna_port *port)
1331{
1332 struct bna_llport *llport = &port->llport;
1333
1334 if (!(llport->flags & BNA_LLPORT_F_ENABLED))
1335 return;
1336
1337 llport->flags &= ~BNA_LLPORT_F_ENABLED;
1338
1339 if (llport->flags & BNA_LLPORT_F_RX_ENABLED)
1340 bfa_fsm_send_event(llport, LLPORT_E_DOWN);
1341}
1342
1343/**
1344 * DEVICE 1298 * DEVICE
1345 */ 1299 */
1346#define enable_mbox_intr(_device)\ 1300#define enable_mbox_intr(_device)\
@@ -1357,7 +1311,7 @@ do {\
1357 bnad_cb_device_disable_mbox_intr((_device)->bna->bnad);\ 1311 bnad_cb_device_disable_mbox_intr((_device)->bna->bnad);\
1358} while (0) 1312} while (0)
1359 1313
1360const struct bna_chip_regs_offset reg_offset[] = 1314static const struct bna_chip_regs_offset reg_offset[] =
1361{{HOST_PAGE_NUM_FN0, HOSTFN0_INT_STATUS, 1315{{HOST_PAGE_NUM_FN0, HOSTFN0_INT_STATUS,
1362 HOSTFN0_INT_MASK, HOST_MSIX_ERR_INDEX_FN0}, 1316 HOSTFN0_INT_MASK, HOST_MSIX_ERR_INDEX_FN0},
1363{HOST_PAGE_NUM_FN1, HOSTFN1_INT_STATUS, 1317{HOST_PAGE_NUM_FN1, HOSTFN1_INT_STATUS,
@@ -1642,7 +1596,34 @@ static struct bfa_ioc_cbfn bfa_iocll_cbfn = {
1642 bna_device_cb_iocll_reset 1596 bna_device_cb_iocll_reset
1643}; 1597};
1644 1598
1645void 1599/* device */
1600static void
1601bna_adv_device_init(struct bna_device *device, struct bna *bna,
1602 struct bna_res_info *res_info)
1603{
1604 u8 *kva;
1605 u64 dma;
1606
1607 device->bna = bna;
1608
1609 kva = res_info[BNA_RES_MEM_T_FWTRC].res_u.mem_info.mdl[0].kva;
1610
1611 /**
1612 * Attach common modules (Diag, SFP, CEE, Port) and claim respective
1613 * DMA memory.
1614 */
1615 BNA_GET_DMA_ADDR(
1616 &res_info[BNA_RES_MEM_T_COM].res_u.mem_info.mdl[0].dma, dma);
1617 kva = res_info[BNA_RES_MEM_T_COM].res_u.mem_info.mdl[0].kva;
1618
1619 bfa_nw_cee_attach(&bna->cee, &device->ioc, bna);
1620 bfa_nw_cee_mem_claim(&bna->cee, kva, dma);
1621 kva += bfa_nw_cee_meminfo();
1622 dma += bfa_nw_cee_meminfo();
1623
1624}
1625
1626static void
1646bna_device_init(struct bna_device *device, struct bna *bna, 1627bna_device_init(struct bna_device *device, struct bna *bna,
1647 struct bna_res_info *res_info) 1628 struct bna_res_info *res_info)
1648{ 1629{
@@ -1681,7 +1662,7 @@ bna_device_init(struct bna_device *device, struct bna *bna,
1681 bfa_fsm_set_state(device, bna_device_sm_stopped); 1662 bfa_fsm_set_state(device, bna_device_sm_stopped);
1682} 1663}
1683 1664
1684void 1665static void
1685bna_device_uninit(struct bna_device *device) 1666bna_device_uninit(struct bna_device *device)
1686{ 1667{
1687 bna_mbox_mod_uninit(&device->bna->mbox_mod); 1668 bna_mbox_mod_uninit(&device->bna->mbox_mod);
@@ -1691,7 +1672,7 @@ bna_device_uninit(struct bna_device *device)
1691 device->bna = NULL; 1672 device->bna = NULL;
1692} 1673}
1693 1674
1694void 1675static void
1695bna_device_cb_port_stopped(void *arg, enum bna_cb_status status) 1676bna_device_cb_port_stopped(void *arg, enum bna_cb_status status)
1696{ 1677{
1697 struct bna_device *device = (struct bna_device *)arg; 1678 struct bna_device *device = (struct bna_device *)arg;
@@ -1699,10 +1680,10 @@ bna_device_cb_port_stopped(void *arg, enum bna_cb_status status)
1699 bfa_fsm_send_event(device, DEVICE_E_PORT_STOPPED); 1680 bfa_fsm_send_event(device, DEVICE_E_PORT_STOPPED);
1700} 1681}
1701 1682
1702int 1683static int
1703bna_device_status_get(struct bna_device *device) 1684bna_device_status_get(struct bna_device *device)
1704{ 1685{
1705 return (device->fsm == (bfa_fsm_t)bna_device_sm_ready); 1686 return device->fsm == (bfa_fsm_t)bna_device_sm_ready;
1706} 1687}
1707 1688
1708void 1689void
@@ -1733,24 +1714,13 @@ bna_device_disable(struct bna_device *device, enum bna_cleanup_type type)
1733 bfa_fsm_send_event(device, DEVICE_E_DISABLE); 1714 bfa_fsm_send_event(device, DEVICE_E_DISABLE);
1734} 1715}
1735 1716
1736int 1717static int
1737bna_device_state_get(struct bna_device *device) 1718bna_device_state_get(struct bna_device *device)
1738{ 1719{
1739 return bfa_sm_to_state(device_sm_table, device->fsm); 1720 return bfa_sm_to_state(device_sm_table, device->fsm);
1740} 1721}
1741 1722
1742u32 bna_dim_vector[BNA_LOAD_T_MAX][BNA_BIAS_T_MAX] = { 1723const u32 bna_napi_dim_vector[BNA_LOAD_T_MAX][BNA_BIAS_T_MAX] = {
1743 {12, 20},
1744 {10, 18},
1745 {8, 16},
1746 {6, 12},
1747 {4, 8},
1748 {3, 6},
1749 {2, 4},
1750 {1, 2},
1751};
1752
1753u32 bna_napi_dim_vector[BNA_LOAD_T_MAX][BNA_BIAS_T_MAX] = {
1754 {12, 12}, 1724 {12, 12},
1755 {6, 10}, 1725 {6, 10},
1756 {5, 10}, 1726 {5, 10},
@@ -1761,36 +1731,9 @@ u32 bna_napi_dim_vector[BNA_LOAD_T_MAX][BNA_BIAS_T_MAX] = {
1761 {1, 2}, 1731 {1, 2},
1762}; 1732};
1763 1733
1764/* device */
1765void
1766bna_adv_device_init(struct bna_device *device, struct bna *bna,
1767 struct bna_res_info *res_info)
1768{
1769 u8 *kva;
1770 u64 dma;
1771
1772 device->bna = bna;
1773
1774 kva = res_info[BNA_RES_MEM_T_FWTRC].res_u.mem_info.mdl[0].kva;
1775
1776 /**
1777 * Attach common modules (Diag, SFP, CEE, Port) and claim respective
1778 * DMA memory.
1779 */
1780 BNA_GET_DMA_ADDR(
1781 &res_info[BNA_RES_MEM_T_COM].res_u.mem_info.mdl[0].dma, dma);
1782 kva = res_info[BNA_RES_MEM_T_COM].res_u.mem_info.mdl[0].kva;
1783
1784 bfa_nw_cee_attach(&bna->cee, &device->ioc, bna);
1785 bfa_nw_cee_mem_claim(&bna->cee, kva, dma);
1786 kva += bfa_nw_cee_meminfo();
1787 dma += bfa_nw_cee_meminfo();
1788
1789}
1790
1791/* utils */ 1734/* utils */
1792 1735
1793void 1736static void
1794bna_adv_res_req(struct bna_res_info *res_info) 1737bna_adv_res_req(struct bna_res_info *res_info)
1795{ 1738{
1796 /* DMA memory for COMMON_MODULE */ 1739 /* DMA memory for COMMON_MODULE */
@@ -2044,36 +1987,6 @@ bna_fw_stats_get(struct bna *bna)
2044 bna->stats.txf_bmap[1] = bna->tx_mod.txf_bmap[1]; 1987 bna->stats.txf_bmap[1] = bna->tx_mod.txf_bmap[1];
2045} 1988}
2046 1989
2047static void
2048bna_fw_cb_stats_clr(void *arg, int status)
2049{
2050 struct bna *bna = (struct bna *)arg;
2051
2052 bfa_q_qe_init(&bna->mbox_qe.qe);
2053
2054 memset(bna->stats.sw_stats, 0, sizeof(struct bna_sw_stats));
2055 memset(bna->stats.hw_stats, 0, sizeof(struct bfi_ll_stats));
2056
2057 bnad_cb_stats_clr(bna->bnad);
2058}
2059
2060static void
2061bna_fw_stats_clr(struct bna *bna)
2062{
2063 struct bfi_ll_stats_req ll_req;
2064
2065 bfi_h2i_set(ll_req.mh, BFI_MC_LL, BFI_LL_H2I_STATS_CLEAR_REQ, 0);
2066 ll_req.stats_mask = htons(BFI_LL_STATS_ALL);
2067 ll_req.rxf_id_mask[0] = htonl(0xffffffff);
2068 ll_req.rxf_id_mask[1] = htonl(0xffffffff);
2069 ll_req.txf_id_mask[0] = htonl(0xffffffff);
2070 ll_req.txf_id_mask[1] = htonl(0xffffffff);
2071
2072 bna_mbox_qe_fill(&bna->mbox_qe, &ll_req, sizeof(ll_req),
2073 bna_fw_cb_stats_clr, bna);
2074 bna_mbox_send(bna, &bna->mbox_qe);
2075}
2076
2077void 1990void
2078bna_stats_get(struct bna *bna) 1991bna_stats_get(struct bna *bna)
2079{ 1992{
@@ -2083,22 +1996,8 @@ bna_stats_get(struct bna *bna)
2083 bnad_cb_stats_get(bna->bnad, BNA_CB_FAIL, &bna->stats); 1996 bnad_cb_stats_get(bna->bnad, BNA_CB_FAIL, &bna->stats);
2084} 1997}
2085 1998
2086void
2087bna_stats_clr(struct bna *bna)
2088{
2089 if (bna_device_status_get(&bna->device))
2090 bna_fw_stats_clr(bna);
2091 else {
2092 memset(&bna->stats.sw_stats, 0,
2093 sizeof(struct bna_sw_stats));
2094 memset(bna->stats.hw_stats, 0,
2095 sizeof(struct bfi_ll_stats));
2096 bnad_cb_stats_clr(bna->bnad);
2097 }
2098}
2099
2100/* IB */ 1999/* IB */
2101void 2000static void
2102bna_ib_coalescing_timeo_set(struct bna_ib *ib, u8 coalescing_timeo) 2001bna_ib_coalescing_timeo_set(struct bna_ib *ib, u8 coalescing_timeo)
2103{ 2002{
2104 ib->ib_config.coalescing_timeo = coalescing_timeo; 2003 ib->ib_config.coalescing_timeo = coalescing_timeo;
@@ -2157,7 +2056,7 @@ rxf_fltr_mbox_cmd(struct bna_rxf *rxf, u8 cmd, enum bna_status status)
2157 bna_mbox_send(rxf->rx->bna, &rxf->mbox_qe); 2056 bna_mbox_send(rxf->rx->bna, &rxf->mbox_qe);
2158} 2057}
2159 2058
2160void 2059static void
2161__rxf_default_function_config(struct bna_rxf *rxf, enum bna_status status) 2060__rxf_default_function_config(struct bna_rxf *rxf, enum bna_status status)
2162{ 2061{
2163 struct bna_rx_fndb_ram *rx_fndb_ram; 2062 struct bna_rx_fndb_ram *rx_fndb_ram;
@@ -2553,7 +2452,7 @@ rxf_reset_packet_filter_allmulti(struct bna_rxf *rxf)
2553 * 0 = no h/w change 2452 * 0 = no h/w change
2554 * 1 = need h/w change 2453 * 1 = need h/w change
2555 */ 2454 */
2556int 2455static int
2557rxf_promisc_enable(struct bna_rxf *rxf) 2456rxf_promisc_enable(struct bna_rxf *rxf)
2558{ 2457{
2559 struct bna *bna = rxf->rx->bna; 2458 struct bna *bna = rxf->rx->bna;
@@ -2584,7 +2483,7 @@ rxf_promisc_enable(struct bna_rxf *rxf)
2584 * 0 = no h/w change 2483 * 0 = no h/w change
2585 * 1 = need h/w change 2484 * 1 = need h/w change
2586 */ 2485 */
2587int 2486static int
2588rxf_promisc_disable(struct bna_rxf *rxf) 2487rxf_promisc_disable(struct bna_rxf *rxf)
2589{ 2488{
2590 struct bna *bna = rxf->rx->bna; 2489 struct bna *bna = rxf->rx->bna;
@@ -2623,7 +2522,7 @@ rxf_promisc_disable(struct bna_rxf *rxf)
2623 * 0 = no h/w change 2522 * 0 = no h/w change
2624 * 1 = need h/w change 2523 * 1 = need h/w change
2625 */ 2524 */
2626int 2525static int
2627rxf_default_enable(struct bna_rxf *rxf) 2526rxf_default_enable(struct bna_rxf *rxf)
2628{ 2527{
2629 struct bna *bna = rxf->rx->bna; 2528 struct bna *bna = rxf->rx->bna;
@@ -2654,7 +2553,7 @@ rxf_default_enable(struct bna_rxf *rxf)
2654 * 0 = no h/w change 2553 * 0 = no h/w change
2655 * 1 = need h/w change 2554 * 1 = need h/w change
2656 */ 2555 */
2657int 2556static int
2658rxf_default_disable(struct bna_rxf *rxf) 2557rxf_default_disable(struct bna_rxf *rxf)
2659{ 2558{
2660 struct bna *bna = rxf->rx->bna; 2559 struct bna *bna = rxf->rx->bna;
@@ -2693,7 +2592,7 @@ rxf_default_disable(struct bna_rxf *rxf)
2693 * 0 = no h/w change 2592 * 0 = no h/w change
2694 * 1 = need h/w change 2593 * 1 = need h/w change
2695 */ 2594 */
2696int 2595static int
2697rxf_allmulti_enable(struct bna_rxf *rxf) 2596rxf_allmulti_enable(struct bna_rxf *rxf)
2698{ 2597{
2699 int ret = 0; 2598 int ret = 0;
@@ -2721,7 +2620,7 @@ rxf_allmulti_enable(struct bna_rxf *rxf)
2721 * 0 = no h/w change 2620 * 0 = no h/w change
2722 * 1 = need h/w change 2621 * 1 = need h/w change
2723 */ 2622 */
2724int 2623static int
2725rxf_allmulti_disable(struct bna_rxf *rxf) 2624rxf_allmulti_disable(struct bna_rxf *rxf)
2726{ 2625{
2727 int ret = 0; 2626 int ret = 0;
@@ -2746,159 +2645,6 @@ rxf_allmulti_disable(struct bna_rxf *rxf)
2746} 2645}
2747 2646
2748/* RxF <- bnad */ 2647/* RxF <- bnad */
2749void
2750bna_rx_mcast_delall(struct bna_rx *rx,
2751 void (*cbfn)(struct bnad *, struct bna_rx *,
2752 enum bna_cb_status))
2753{
2754 struct bna_rxf *rxf = &rx->rxf;
2755 struct list_head *qe;
2756 struct bna_mac *mac;
2757 int need_hw_config = 0;
2758
2759 /* Purge all entries from pending_add_q */
2760 while (!list_empty(&rxf->mcast_pending_add_q)) {
2761 bfa_q_deq(&rxf->mcast_pending_add_q, &qe);
2762 mac = (struct bna_mac *)qe;
2763 bfa_q_qe_init(&mac->qe);
2764 bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac);
2765 }
2766
2767 /* Schedule all entries in active_q for deletion */
2768 while (!list_empty(&rxf->mcast_active_q)) {
2769 bfa_q_deq(&rxf->mcast_active_q, &qe);
2770 mac = (struct bna_mac *)qe;
2771 bfa_q_qe_init(&mac->qe);
2772 list_add_tail(&mac->qe, &rxf->mcast_pending_del_q);
2773 need_hw_config = 1;
2774 }
2775
2776 if (need_hw_config) {
2777 rxf->cam_fltr_cbfn = cbfn;
2778 rxf->cam_fltr_cbarg = rx->bna->bnad;
2779 bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_MOD);
2780 return;
2781 }
2782
2783 if (cbfn)
2784 (*cbfn)(rx->bna->bnad, rx, BNA_CB_SUCCESS);
2785}
2786
2787/* RxF <- Rx */
2788void
2789bna_rx_receive_resume(struct bna_rx *rx,
2790 void (*cbfn)(struct bnad *, struct bna_rx *,
2791 enum bna_cb_status))
2792{
2793 struct bna_rxf *rxf = &rx->rxf;
2794
2795 if (rxf->rxf_oper_state == BNA_RXF_OPER_STATE_PAUSED) {
2796 rxf->oper_state_cbfn = cbfn;
2797 rxf->oper_state_cbarg = rx->bna->bnad;
2798 bfa_fsm_send_event(rxf, RXF_E_RESUME);
2799 } else if (cbfn)
2800 (*cbfn)(rx->bna->bnad, rx, BNA_CB_SUCCESS);
2801}
2802
2803void
2804bna_rx_receive_pause(struct bna_rx *rx,
2805 void (*cbfn)(struct bnad *, struct bna_rx *,
2806 enum bna_cb_status))
2807{
2808 struct bna_rxf *rxf = &rx->rxf;
2809
2810 if (rxf->rxf_oper_state == BNA_RXF_OPER_STATE_RUNNING) {
2811 rxf->oper_state_cbfn = cbfn;
2812 rxf->oper_state_cbarg = rx->bna->bnad;
2813 bfa_fsm_send_event(rxf, RXF_E_PAUSE);
2814 } else if (cbfn)
2815 (*cbfn)(rx->bna->bnad, rx, BNA_CB_SUCCESS);
2816}
2817
2818/* RxF <- bnad */
2819enum bna_cb_status
2820bna_rx_ucast_add(struct bna_rx *rx, u8 *addr,
2821 void (*cbfn)(struct bnad *, struct bna_rx *,
2822 enum bna_cb_status))
2823{
2824 struct bna_rxf *rxf = &rx->rxf;
2825 struct list_head *qe;
2826 struct bna_mac *mac;
2827
2828 /* Check if already added */
2829 list_for_each(qe, &rxf->ucast_active_q) {
2830 mac = (struct bna_mac *)qe;
2831 if (BNA_MAC_IS_EQUAL(mac->addr, addr)) {
2832 if (cbfn)
2833 (*cbfn)(rx->bna->bnad, rx, BNA_CB_SUCCESS);
2834 return BNA_CB_SUCCESS;
2835 }
2836 }
2837
2838 /* Check if pending addition */
2839 list_for_each(qe, &rxf->ucast_pending_add_q) {
2840 mac = (struct bna_mac *)qe;
2841 if (BNA_MAC_IS_EQUAL(mac->addr, addr)) {
2842 if (cbfn)
2843 (*cbfn)(rx->bna->bnad, rx, BNA_CB_SUCCESS);
2844 return BNA_CB_SUCCESS;
2845 }
2846 }
2847
2848 mac = bna_ucam_mod_mac_get(&rxf->rx->bna->ucam_mod);
2849 if (mac == NULL)
2850 return BNA_CB_UCAST_CAM_FULL;
2851 bfa_q_qe_init(&mac->qe);
2852 memcpy(mac->addr, addr, ETH_ALEN);
2853 list_add_tail(&mac->qe, &rxf->ucast_pending_add_q);
2854
2855 rxf->cam_fltr_cbfn = cbfn;
2856 rxf->cam_fltr_cbarg = rx->bna->bnad;
2857
2858 bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_MOD);
2859
2860 return BNA_CB_SUCCESS;
2861}
2862
2863/* RxF <- bnad */
2864enum bna_cb_status
2865bna_rx_ucast_del(struct bna_rx *rx, u8 *addr,
2866 void (*cbfn)(struct bnad *, struct bna_rx *,
2867 enum bna_cb_status))
2868{
2869 struct bna_rxf *rxf = &rx->rxf;
2870 struct list_head *qe;
2871 struct bna_mac *mac;
2872
2873 list_for_each(qe, &rxf->ucast_pending_add_q) {
2874 mac = (struct bna_mac *)qe;
2875 if (BNA_MAC_IS_EQUAL(mac->addr, addr)) {
2876 list_del(qe);
2877 bfa_q_qe_init(qe);
2878 bna_ucam_mod_mac_put(&rxf->rx->bna->ucam_mod, mac);
2879 if (cbfn)
2880 (*cbfn)(rx->bna->bnad, rx, BNA_CB_SUCCESS);
2881 return BNA_CB_SUCCESS;
2882 }
2883 }
2884
2885 list_for_each(qe, &rxf->ucast_active_q) {
2886 mac = (struct bna_mac *)qe;
2887 if (BNA_MAC_IS_EQUAL(mac->addr, addr)) {
2888 list_del(qe);
2889 bfa_q_qe_init(qe);
2890 list_add_tail(qe, &rxf->ucast_pending_del_q);
2891 rxf->cam_fltr_cbfn = cbfn;
2892 rxf->cam_fltr_cbarg = rx->bna->bnad;
2893 bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_MOD);
2894 return BNA_CB_SUCCESS;
2895 }
2896 }
2897
2898 return BNA_CB_INVALID_MAC;
2899}
2900
2901/* RxF <- bnad */
2902enum bna_cb_status 2648enum bna_cb_status
2903bna_rx_mode_set(struct bna_rx *rx, enum bna_rxmode new_mode, 2649bna_rx_mode_set(struct bna_rx *rx, enum bna_rxmode new_mode,
2904 enum bna_rxmode bitmask, 2650 enum bna_rxmode bitmask,
@@ -2978,39 +2724,6 @@ err_return:
2978 return BNA_CB_FAIL; 2724 return BNA_CB_FAIL;
2979} 2725}
2980 2726
2981/* RxF <- bnad */
2982void
2983bna_rx_rss_enable(struct bna_rx *rx)
2984{
2985 struct bna_rxf *rxf = &rx->rxf;
2986
2987 rxf->rxf_flags |= BNA_RXF_FL_RSS_CONFIG_PENDING;
2988 rxf->rss_status = BNA_STATUS_T_ENABLED;
2989 bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_MOD);
2990}
2991
2992/* RxF <- bnad */
2993void
2994bna_rx_rss_disable(struct bna_rx *rx)
2995{
2996 struct bna_rxf *rxf = &rx->rxf;
2997
2998 rxf->rxf_flags |= BNA_RXF_FL_RSS_CONFIG_PENDING;
2999 rxf->rss_status = BNA_STATUS_T_DISABLED;
3000 bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_MOD);
3001}
3002
3003/* RxF <- bnad */
3004void
3005bna_rx_rss_reconfig(struct bna_rx *rx, struct bna_rxf_rss *rss_config)
3006{
3007 struct bna_rxf *rxf = &rx->rxf;
3008 rxf->rxf_flags |= BNA_RXF_FL_RSS_CONFIG_PENDING;
3009 rxf->rss_status = BNA_STATUS_T_ENABLED;
3010 rxf->rss_cfg = *rss_config;
3011 bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_MOD);
3012}
3013
3014void 2727void
3015/* RxF <- bnad */ 2728/* RxF <- bnad */
3016bna_rx_vlanfilter_enable(struct bna_rx *rx) 2729bna_rx_vlanfilter_enable(struct bna_rx *rx)
@@ -3024,68 +2737,8 @@ bna_rx_vlanfilter_enable(struct bna_rx *rx)
3024 } 2737 }
3025} 2738}
3026 2739
3027/* RxF <- bnad */
3028void
3029bna_rx_vlanfilter_disable(struct bna_rx *rx)
3030{
3031 struct bna_rxf *rxf = &rx->rxf;
3032
3033 if (rxf->vlan_filter_status == BNA_STATUS_T_ENABLED) {
3034 rxf->rxf_flags |= BNA_RXF_FL_VLAN_CONFIG_PENDING;
3035 rxf->vlan_filter_status = BNA_STATUS_T_DISABLED;
3036 bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_MOD);
3037 }
3038}
3039
3040/* Rx */ 2740/* Rx */
3041 2741
3042struct bna_rxp *
3043bna_rx_get_rxp(struct bna_rx *rx, int vector)
3044{
3045 struct bna_rxp *rxp;
3046 struct list_head *qe;
3047
3048 list_for_each(qe, &rx->rxp_q) {
3049 rxp = (struct bna_rxp *)qe;
3050 if (rxp->vector == vector)
3051 return rxp;
3052 }
3053 return NULL;
3054}
3055
3056/*
3057 * bna_rx_rss_rit_set()
3058 * Sets the Q ids for the specified msi-x vectors in the RIT.
3059 * Maximum rit size supported is 64, which should be the max size of the
3060 * vectors array.
3061 */
3062
3063void
3064bna_rx_rss_rit_set(struct bna_rx *rx, unsigned int *vectors, int nvectors)
3065{
3066 int i;
3067 struct bna_rxp *rxp;
3068 struct bna_rxq *q0 = NULL, *q1 = NULL;
3069 struct bna *bna;
3070 struct bna_rxf *rxf;
3071
3072 /* Build the RIT contents for this RX */
3073 bna = rx->bna;
3074
3075 rxf = &rx->rxf;
3076 for (i = 0; i < nvectors; i++) {
3077 rxp = bna_rx_get_rxp(rx, vectors[i]);
3078
3079 GET_RXQS(rxp, q0, q1);
3080 rxf->rit_segment->rit[i].large_rxq_id = q0->rxq_id;
3081 rxf->rit_segment->rit[i].small_rxq_id = (q1 ? q1->rxq_id : 0);
3082 }
3083
3084 rxf->rit_segment->rit_size = nvectors;
3085
3086 /* Subsequent call to enable/reconfig RSS will update the RIT in h/w */
3087}
3088
3089/* Rx <- bnad */ 2742/* Rx <- bnad */
3090void 2743void
3091bna_rx_coalescing_timeo_set(struct bna_rx *rx, int coalescing_timeo) 2744bna_rx_coalescing_timeo_set(struct bna_rx *rx, int coalescing_timeo)
@@ -3102,7 +2755,7 @@ bna_rx_coalescing_timeo_set(struct bna_rx *rx, int coalescing_timeo)
3102 2755
3103/* Rx <- bnad */ 2756/* Rx <- bnad */
3104void 2757void
3105bna_rx_dim_reconfig(struct bna *bna, u32 vector[][BNA_BIAS_T_MAX]) 2758bna_rx_dim_reconfig(struct bna *bna, const u32 vector[][BNA_BIAS_T_MAX])
3106{ 2759{
3107 int i, j; 2760 int i, j;
3108 2761
@@ -3165,22 +2818,6 @@ bna_rx_dim_update(struct bna_ccb *ccb)
3165 2818
3166/* Tx */ 2819/* Tx */
3167/* TX <- bnad */ 2820/* TX <- bnad */
3168enum bna_cb_status
3169bna_tx_prio_set(struct bna_tx *tx, int prio,
3170 void (*cbfn)(struct bnad *, struct bna_tx *,
3171 enum bna_cb_status))
3172{
3173 if (tx->flags & BNA_TX_F_PRIO_LOCK)
3174 return BNA_CB_FAIL;
3175 else {
3176 tx->prio_change_cbfn = cbfn;
3177 bna_tx_prio_changed(tx, prio);
3178 }
3179
3180 return BNA_CB_SUCCESS;
3181}
3182
3183/* TX <- bnad */
3184void 2821void
3185bna_tx_coalescing_timeo_set(struct bna_tx *tx, int coalescing_timeo) 2822bna_tx_coalescing_timeo_set(struct bna_tx *tx, int coalescing_timeo)
3186{ 2823{
diff --git a/drivers/net/bna/bna_hw.h b/drivers/net/bna/bna_hw.h
index 67eb376c5c7e..806b224a4c63 100644
--- a/drivers/net/bna/bna_hw.h
+++ b/drivers/net/bna/bna_hw.h
@@ -1282,7 +1282,6 @@ struct bna_chip_regs_offset {
1282 u32 fn_int_mask; 1282 u32 fn_int_mask;
1283 u32 msix_idx; 1283 u32 msix_idx;
1284}; 1284};
1285extern const struct bna_chip_regs_offset reg_offset[];
1286 1285
1287struct bna_chip_regs { 1286struct bna_chip_regs {
1288 void __iomem *page_addr; 1287 void __iomem *page_addr;
diff --git a/drivers/net/bna/bna_txrx.c b/drivers/net/bna/bna_txrx.c
index 890846d55502..ad93fdb0f427 100644
--- a/drivers/net/bna/bna_txrx.c
+++ b/drivers/net/bna/bna_txrx.c
@@ -195,7 +195,7 @@ bna_ib_mod_uninit(struct bna_ib_mod *ib_mod)
195 ib_mod->bna = NULL; 195 ib_mod->bna = NULL;
196} 196}
197 197
198struct bna_ib * 198static struct bna_ib *
199bna_ib_get(struct bna_ib_mod *ib_mod, 199bna_ib_get(struct bna_ib_mod *ib_mod,
200 enum bna_intr_type intr_type, 200 enum bna_intr_type intr_type,
201 int vector) 201 int vector)
@@ -240,7 +240,7 @@ bna_ib_get(struct bna_ib_mod *ib_mod,
240 return ib; 240 return ib;
241} 241}
242 242
243void 243static void
244bna_ib_put(struct bna_ib_mod *ib_mod, struct bna_ib *ib) 244bna_ib_put(struct bna_ib_mod *ib_mod, struct bna_ib *ib)
245{ 245{
246 bna_intr_put(ib_mod, ib->intr); 246 bna_intr_put(ib_mod, ib->intr);
@@ -255,7 +255,7 @@ bna_ib_put(struct bna_ib_mod *ib_mod, struct bna_ib *ib)
255} 255}
256 256
257/* Returns index offset - starting from 0 */ 257/* Returns index offset - starting from 0 */
258int 258static int
259bna_ib_reserve_idx(struct bna_ib *ib) 259bna_ib_reserve_idx(struct bna_ib *ib)
260{ 260{
261 struct bna_ib_mod *ib_mod = &ib->bna->ib_mod; 261 struct bna_ib_mod *ib_mod = &ib->bna->ib_mod;
@@ -309,7 +309,7 @@ bna_ib_reserve_idx(struct bna_ib *ib)
309 return idx; 309 return idx;
310} 310}
311 311
312void 312static void
313bna_ib_release_idx(struct bna_ib *ib, int idx) 313bna_ib_release_idx(struct bna_ib *ib, int idx)
314{ 314{
315 struct bna_ib_mod *ib_mod = &ib->bna->ib_mod; 315 struct bna_ib_mod *ib_mod = &ib->bna->ib_mod;
@@ -356,7 +356,7 @@ bna_ib_release_idx(struct bna_ib *ib, int idx)
356 } 356 }
357} 357}
358 358
359int 359static int
360bna_ib_config(struct bna_ib *ib, struct bna_ib_config *ib_config) 360bna_ib_config(struct bna_ib *ib, struct bna_ib_config *ib_config)
361{ 361{
362 if (ib->start_count) 362 if (ib->start_count)
@@ -374,7 +374,7 @@ bna_ib_config(struct bna_ib *ib, struct bna_ib_config *ib_config)
374 return 0; 374 return 0;
375} 375}
376 376
377void 377static void
378bna_ib_start(struct bna_ib *ib) 378bna_ib_start(struct bna_ib *ib)
379{ 379{
380 struct bna_ib_blk_mem ib_cfg; 380 struct bna_ib_blk_mem ib_cfg;
@@ -450,7 +450,7 @@ bna_ib_start(struct bna_ib *ib)
450 } 450 }
451} 451}
452 452
453void 453static void
454bna_ib_stop(struct bna_ib *ib) 454bna_ib_stop(struct bna_ib *ib)
455{ 455{
456 u32 intx_mask; 456 u32 intx_mask;
@@ -468,7 +468,7 @@ bna_ib_stop(struct bna_ib *ib)
468 } 468 }
469} 469}
470 470
471void 471static void
472bna_ib_fail(struct bna_ib *ib) 472bna_ib_fail(struct bna_ib *ib)
473{ 473{
474 ib->start_count = 0; 474 ib->start_count = 0;
@@ -1394,7 +1394,7 @@ rxf_reset_packet_filter(struct bna_rxf *rxf)
1394 rxf_reset_packet_filter_allmulti(rxf); 1394 rxf_reset_packet_filter_allmulti(rxf);
1395} 1395}
1396 1396
1397void 1397static void
1398bna_rxf_init(struct bna_rxf *rxf, 1398bna_rxf_init(struct bna_rxf *rxf,
1399 struct bna_rx *rx, 1399 struct bna_rx *rx,
1400 struct bna_rx_config *q_config) 1400 struct bna_rx_config *q_config)
@@ -1444,7 +1444,7 @@ bna_rxf_init(struct bna_rxf *rxf,
1444 bfa_fsm_set_state(rxf, bna_rxf_sm_stopped); 1444 bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
1445} 1445}
1446 1446
1447void 1447static void
1448bna_rxf_uninit(struct bna_rxf *rxf) 1448bna_rxf_uninit(struct bna_rxf *rxf)
1449{ 1449{
1450 struct bna_mac *mac; 1450 struct bna_mac *mac;
@@ -1476,7 +1476,18 @@ bna_rxf_uninit(struct bna_rxf *rxf)
1476 rxf->rx = NULL; 1476 rxf->rx = NULL;
1477} 1477}
1478 1478
1479void 1479static void
1480bna_rx_cb_rxf_started(struct bna_rx *rx, enum bna_cb_status status)
1481{
1482 bfa_fsm_send_event(rx, RX_E_RXF_STARTED);
1483 if (rx->rxf.rxf_id < 32)
1484 rx->bna->rx_mod.rxf_bmap[0] |= ((u32)1 << rx->rxf.rxf_id);
1485 else
1486 rx->bna->rx_mod.rxf_bmap[1] |= ((u32)
1487 1 << (rx->rxf.rxf_id - 32));
1488}
1489
1490static void
1480bna_rxf_start(struct bna_rxf *rxf) 1491bna_rxf_start(struct bna_rxf *rxf)
1481{ 1492{
1482 rxf->start_cbfn = bna_rx_cb_rxf_started; 1493 rxf->start_cbfn = bna_rx_cb_rxf_started;
@@ -1485,7 +1496,18 @@ bna_rxf_start(struct bna_rxf *rxf)
1485 bfa_fsm_send_event(rxf, RXF_E_START); 1496 bfa_fsm_send_event(rxf, RXF_E_START);
1486} 1497}
1487 1498
1488void 1499static void
1500bna_rx_cb_rxf_stopped(struct bna_rx *rx, enum bna_cb_status status)
1501{
1502 bfa_fsm_send_event(rx, RX_E_RXF_STOPPED);
1503 if (rx->rxf.rxf_id < 32)
1504 rx->bna->rx_mod.rxf_bmap[0] &= ~(u32)1 << rx->rxf.rxf_id;
1505 else
1506 rx->bna->rx_mod.rxf_bmap[1] &= ~(u32)
1507 1 << (rx->rxf.rxf_id - 32);
1508}
1509
1510static void
1489bna_rxf_stop(struct bna_rxf *rxf) 1511bna_rxf_stop(struct bna_rxf *rxf)
1490{ 1512{
1491 rxf->stop_cbfn = bna_rx_cb_rxf_stopped; 1513 rxf->stop_cbfn = bna_rx_cb_rxf_stopped;
@@ -1493,7 +1515,7 @@ bna_rxf_stop(struct bna_rxf *rxf)
1493 bfa_fsm_send_event(rxf, RXF_E_STOP); 1515 bfa_fsm_send_event(rxf, RXF_E_STOP);
1494} 1516}
1495 1517
1496void 1518static void
1497bna_rxf_fail(struct bna_rxf *rxf) 1519bna_rxf_fail(struct bna_rxf *rxf)
1498{ 1520{
1499 rxf->rxf_flags |= BNA_RXF_FL_FAILED; 1521 rxf->rxf_flags |= BNA_RXF_FL_FAILED;
@@ -1576,43 +1598,6 @@ bna_rx_mcast_add(struct bna_rx *rx, u8 *addr,
1576} 1598}
1577 1599
1578enum bna_cb_status 1600enum bna_cb_status
1579bna_rx_mcast_del(struct bna_rx *rx, u8 *addr,
1580 void (*cbfn)(struct bnad *, struct bna_rx *,
1581 enum bna_cb_status))
1582{
1583 struct bna_rxf *rxf = &rx->rxf;
1584 struct list_head *qe;
1585 struct bna_mac *mac;
1586
1587 list_for_each(qe, &rxf->mcast_pending_add_q) {
1588 mac = (struct bna_mac *)qe;
1589 if (BNA_MAC_IS_EQUAL(mac->addr, addr)) {
1590 list_del(qe);
1591 bfa_q_qe_init(qe);
1592 bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac);
1593 if (cbfn)
1594 (*cbfn)(rx->bna->bnad, rx, BNA_CB_SUCCESS);
1595 return BNA_CB_SUCCESS;
1596 }
1597 }
1598
1599 list_for_each(qe, &rxf->mcast_active_q) {
1600 mac = (struct bna_mac *)qe;
1601 if (BNA_MAC_IS_EQUAL(mac->addr, addr)) {
1602 list_del(qe);
1603 bfa_q_qe_init(qe);
1604 list_add_tail(qe, &rxf->mcast_pending_del_q);
1605 rxf->cam_fltr_cbfn = cbfn;
1606 rxf->cam_fltr_cbarg = rx->bna->bnad;
1607 bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_MOD);
1608 return BNA_CB_SUCCESS;
1609 }
1610 }
1611
1612 return BNA_CB_INVALID_MAC;
1613}
1614
1615enum bna_cb_status
1616bna_rx_mcast_listset(struct bna_rx *rx, int count, u8 *mclist, 1601bna_rx_mcast_listset(struct bna_rx *rx, int count, u8 *mclist,
1617 void (*cbfn)(struct bnad *, struct bna_rx *, 1602 void (*cbfn)(struct bnad *, struct bna_rx *,
1618 enum bna_cb_status)) 1603 enum bna_cb_status))
@@ -1862,7 +1847,7 @@ bfa_fsm_state_decl(bna_rx, rxf_stop_wait,
1862bfa_fsm_state_decl(bna_rx, rxq_stop_wait, 1847bfa_fsm_state_decl(bna_rx, rxq_stop_wait,
1863 struct bna_rx, enum bna_rx_event); 1848 struct bna_rx, enum bna_rx_event);
1864 1849
1865static struct bfa_sm_table rx_sm_table[] = { 1850static const struct bfa_sm_table rx_sm_table[] = {
1866 {BFA_SM(bna_rx_sm_stopped), BNA_RX_STOPPED}, 1851 {BFA_SM(bna_rx_sm_stopped), BNA_RX_STOPPED},
1867 {BFA_SM(bna_rx_sm_rxf_start_wait), BNA_RX_RXF_START_WAIT}, 1852 {BFA_SM(bna_rx_sm_rxf_start_wait), BNA_RX_RXF_START_WAIT},
1868 {BFA_SM(bna_rx_sm_started), BNA_RX_STARTED}, 1853 {BFA_SM(bna_rx_sm_started), BNA_RX_STARTED},
@@ -2247,7 +2232,7 @@ bna_rit_create(struct bna_rx *rx)
2247 } 2232 }
2248} 2233}
2249 2234
2250int 2235static int
2251_rx_can_satisfy(struct bna_rx_mod *rx_mod, 2236_rx_can_satisfy(struct bna_rx_mod *rx_mod,
2252 struct bna_rx_config *rx_cfg) 2237 struct bna_rx_config *rx_cfg)
2253{ 2238{
@@ -2272,7 +2257,7 @@ _rx_can_satisfy(struct bna_rx_mod *rx_mod,
2272 return 1; 2257 return 1;
2273} 2258}
2274 2259
2275struct bna_rxq * 2260static struct bna_rxq *
2276_get_free_rxq(struct bna_rx_mod *rx_mod) 2261_get_free_rxq(struct bna_rx_mod *rx_mod)
2277{ 2262{
2278 struct bna_rxq *rxq = NULL; 2263 struct bna_rxq *rxq = NULL;
@@ -2286,7 +2271,7 @@ _get_free_rxq(struct bna_rx_mod *rx_mod)
2286 return rxq; 2271 return rxq;
2287} 2272}
2288 2273
2289void 2274static void
2290_put_free_rxq(struct bna_rx_mod *rx_mod, struct bna_rxq *rxq) 2275_put_free_rxq(struct bna_rx_mod *rx_mod, struct bna_rxq *rxq)
2291{ 2276{
2292 bfa_q_qe_init(&rxq->qe); 2277 bfa_q_qe_init(&rxq->qe);
@@ -2294,7 +2279,7 @@ _put_free_rxq(struct bna_rx_mod *rx_mod, struct bna_rxq *rxq)
2294 rx_mod->rxq_free_count++; 2279 rx_mod->rxq_free_count++;
2295} 2280}
2296 2281
2297struct bna_rxp * 2282static struct bna_rxp *
2298_get_free_rxp(struct bna_rx_mod *rx_mod) 2283_get_free_rxp(struct bna_rx_mod *rx_mod)
2299{ 2284{
2300 struct list_head *qe = NULL; 2285 struct list_head *qe = NULL;
@@ -2310,7 +2295,7 @@ _get_free_rxp(struct bna_rx_mod *rx_mod)
2310 return rxp; 2295 return rxp;
2311} 2296}
2312 2297
2313void 2298static void
2314_put_free_rxp(struct bna_rx_mod *rx_mod, struct bna_rxp *rxp) 2299_put_free_rxp(struct bna_rx_mod *rx_mod, struct bna_rxp *rxp)
2315{ 2300{
2316 bfa_q_qe_init(&rxp->qe); 2301 bfa_q_qe_init(&rxp->qe);
@@ -2318,7 +2303,7 @@ _put_free_rxp(struct bna_rx_mod *rx_mod, struct bna_rxp *rxp)
2318 rx_mod->rxp_free_count++; 2303 rx_mod->rxp_free_count++;
2319} 2304}
2320 2305
2321struct bna_rx * 2306static struct bna_rx *
2322_get_free_rx(struct bna_rx_mod *rx_mod) 2307_get_free_rx(struct bna_rx_mod *rx_mod)
2323{ 2308{
2324 struct list_head *qe = NULL; 2309 struct list_head *qe = NULL;
@@ -2336,7 +2321,7 @@ _get_free_rx(struct bna_rx_mod *rx_mod)
2336 return rx; 2321 return rx;
2337} 2322}
2338 2323
2339void 2324static void
2340_put_free_rx(struct bna_rx_mod *rx_mod, struct bna_rx *rx) 2325_put_free_rx(struct bna_rx_mod *rx_mod, struct bna_rx *rx)
2341{ 2326{
2342 bfa_q_qe_init(&rx->qe); 2327 bfa_q_qe_init(&rx->qe);
@@ -2344,7 +2329,7 @@ _put_free_rx(struct bna_rx_mod *rx_mod, struct bna_rx *rx)
2344 rx_mod->rx_free_count++; 2329 rx_mod->rx_free_count++;
2345} 2330}
2346 2331
2347void 2332static void
2348_rx_init(struct bna_rx *rx, struct bna *bna) 2333_rx_init(struct bna_rx *rx, struct bna *bna)
2349{ 2334{
2350 rx->bna = bna; 2335 rx->bna = bna;
@@ -2360,7 +2345,7 @@ _rx_init(struct bna_rx *rx, struct bna *bna)
2360 rx->stop_cbarg = NULL; 2345 rx->stop_cbarg = NULL;
2361} 2346}
2362 2347
2363void 2348static void
2364_rxp_add_rxqs(struct bna_rxp *rxp, 2349_rxp_add_rxqs(struct bna_rxp *rxp,
2365 struct bna_rxq *q0, 2350 struct bna_rxq *q0,
2366 struct bna_rxq *q1) 2351 struct bna_rxq *q1)
@@ -2383,7 +2368,7 @@ _rxp_add_rxqs(struct bna_rxp *rxp,
2383 } 2368 }
2384} 2369}
2385 2370
2386void 2371static void
2387_rxq_qpt_init(struct bna_rxq *rxq, 2372_rxq_qpt_init(struct bna_rxq *rxq,
2388 struct bna_rxp *rxp, 2373 struct bna_rxp *rxp,
2389 u32 page_count, 2374 u32 page_count,
@@ -2412,7 +2397,7 @@ _rxq_qpt_init(struct bna_rxq *rxq,
2412 } 2397 }
2413} 2398}
2414 2399
2415void 2400static void
2416_rxp_cqpt_setup(struct bna_rxp *rxp, 2401_rxp_cqpt_setup(struct bna_rxp *rxp,
2417 u32 page_count, 2402 u32 page_count,
2418 u32 page_size, 2403 u32 page_size,
@@ -2441,13 +2426,13 @@ _rxp_cqpt_setup(struct bna_rxp *rxp,
2441 } 2426 }
2442} 2427}
2443 2428
2444void 2429static void
2445_rx_add_rxp(struct bna_rx *rx, struct bna_rxp *rxp) 2430_rx_add_rxp(struct bna_rx *rx, struct bna_rxp *rxp)
2446{ 2431{
2447 list_add_tail(&rxp->qe, &rx->rxp_q); 2432 list_add_tail(&rxp->qe, &rx->rxp_q);
2448} 2433}
2449 2434
2450void 2435static void
2451_init_rxmod_queues(struct bna_rx_mod *rx_mod) 2436_init_rxmod_queues(struct bna_rx_mod *rx_mod)
2452{ 2437{
2453 INIT_LIST_HEAD(&rx_mod->rx_free_q); 2438 INIT_LIST_HEAD(&rx_mod->rx_free_q);
@@ -2460,7 +2445,7 @@ _init_rxmod_queues(struct bna_rx_mod *rx_mod)
2460 rx_mod->rxp_free_count = 0; 2445 rx_mod->rxp_free_count = 0;
2461} 2446}
2462 2447
2463void 2448static void
2464_rx_ctor(struct bna_rx *rx, int id) 2449_rx_ctor(struct bna_rx *rx, int id)
2465{ 2450{
2466 bfa_q_qe_init(&rx->qe); 2451 bfa_q_qe_init(&rx->qe);
@@ -2492,7 +2477,7 @@ bna_rx_cb_rxq_stopped_all(void *arg)
2492 bfa_fsm_send_event(rx, RX_E_RXQ_STOPPED); 2477 bfa_fsm_send_event(rx, RX_E_RXQ_STOPPED);
2493} 2478}
2494 2479
2495void 2480static void
2496bna_rx_mod_cb_rx_stopped(void *arg, struct bna_rx *rx, 2481bna_rx_mod_cb_rx_stopped(void *arg, struct bna_rx *rx,
2497 enum bna_cb_status status) 2482 enum bna_cb_status status)
2498{ 2483{
@@ -2501,7 +2486,7 @@ bna_rx_mod_cb_rx_stopped(void *arg, struct bna_rx *rx,
2501 bfa_wc_down(&rx_mod->rx_stop_wc); 2486 bfa_wc_down(&rx_mod->rx_stop_wc);
2502} 2487}
2503 2488
2504void 2489static void
2505bna_rx_mod_cb_rx_stopped_all(void *arg) 2490bna_rx_mod_cb_rx_stopped_all(void *arg)
2506{ 2491{
2507 struct bna_rx_mod *rx_mod = (struct bna_rx_mod *)arg; 2492 struct bna_rx_mod *rx_mod = (struct bna_rx_mod *)arg;
@@ -2511,7 +2496,7 @@ bna_rx_mod_cb_rx_stopped_all(void *arg)
2511 rx_mod->stop_cbfn = NULL; 2496 rx_mod->stop_cbfn = NULL;
2512} 2497}
2513 2498
2514void 2499static void
2515bna_rx_start(struct bna_rx *rx) 2500bna_rx_start(struct bna_rx *rx)
2516{ 2501{
2517 rx->rx_flags |= BNA_RX_F_PORT_ENABLED; 2502 rx->rx_flags |= BNA_RX_F_PORT_ENABLED;
@@ -2519,7 +2504,7 @@ bna_rx_start(struct bna_rx *rx)
2519 bfa_fsm_send_event(rx, RX_E_START); 2504 bfa_fsm_send_event(rx, RX_E_START);
2520} 2505}
2521 2506
2522void 2507static void
2523bna_rx_stop(struct bna_rx *rx) 2508bna_rx_stop(struct bna_rx *rx)
2524{ 2509{
2525 rx->rx_flags &= ~BNA_RX_F_PORT_ENABLED; 2510 rx->rx_flags &= ~BNA_RX_F_PORT_ENABLED;
@@ -2532,7 +2517,7 @@ bna_rx_stop(struct bna_rx *rx)
2532 } 2517 }
2533} 2518}
2534 2519
2535void 2520static void
2536bna_rx_fail(struct bna_rx *rx) 2521bna_rx_fail(struct bna_rx *rx)
2537{ 2522{
2538 /* Indicate port is not enabled, and failed */ 2523 /* Indicate port is not enabled, and failed */
@@ -2542,28 +2527,6 @@ bna_rx_fail(struct bna_rx *rx)
2542} 2527}
2543 2528
2544void 2529void
2545bna_rx_cb_rxf_started(struct bna_rx *rx, enum bna_cb_status status)
2546{
2547 bfa_fsm_send_event(rx, RX_E_RXF_STARTED);
2548 if (rx->rxf.rxf_id < 32)
2549 rx->bna->rx_mod.rxf_bmap[0] |= ((u32)1 << rx->rxf.rxf_id);
2550 else
2551 rx->bna->rx_mod.rxf_bmap[1] |= ((u32)
2552 1 << (rx->rxf.rxf_id - 32));
2553}
2554
2555void
2556bna_rx_cb_rxf_stopped(struct bna_rx *rx, enum bna_cb_status status)
2557{
2558 bfa_fsm_send_event(rx, RX_E_RXF_STOPPED);
2559 if (rx->rxf.rxf_id < 32)
2560 rx->bna->rx_mod.rxf_bmap[0] &= ~(u32)1 << rx->rxf.rxf_id;
2561 else
2562 rx->bna->rx_mod.rxf_bmap[1] &= ~(u32)
2563 1 << (rx->rxf.rxf_id - 32);
2564}
2565
2566void
2567bna_rx_mod_start(struct bna_rx_mod *rx_mod, enum bna_rx_type type) 2530bna_rx_mod_start(struct bna_rx_mod *rx_mod, enum bna_rx_type type)
2568{ 2531{
2569 struct bna_rx *rx; 2532 struct bna_rx *rx;
@@ -3731,7 +3694,7 @@ bna_tx_fail(struct bna_tx *tx)
3731 bfa_fsm_send_event(tx, TX_E_FAIL); 3694 bfa_fsm_send_event(tx, TX_E_FAIL);
3732} 3695}
3733 3696
3734void 3697static void
3735bna_tx_prio_changed(struct bna_tx *tx, int prio) 3698bna_tx_prio_changed(struct bna_tx *tx, int prio)
3736{ 3699{
3737 struct bna_txq *txq; 3700 struct bna_txq *txq;
diff --git a/drivers/net/bna/bnad.c b/drivers/net/bna/bnad.c
index e380c0e88f4f..7e839b9cec22 100644
--- a/drivers/net/bna/bnad.c
+++ b/drivers/net/bna/bnad.c
@@ -28,7 +28,7 @@
28#include "bna.h" 28#include "bna.h"
29#include "cna.h" 29#include "cna.h"
30 30
31DEFINE_MUTEX(bnad_fwimg_mutex); 31static DEFINE_MUTEX(bnad_fwimg_mutex);
32 32
33/* 33/*
34 * Module params 34 * Module params
@@ -46,7 +46,7 @@ MODULE_PARM_DESC(bnad_ioc_auto_recover, "Enable / Disable auto recovery");
46 */ 46 */
47u32 bnad_rxqs_per_cq = 2; 47u32 bnad_rxqs_per_cq = 2;
48 48
49const u8 bnad_bcast_addr[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; 49static const u8 bnad_bcast_addr[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
50 50
51/* 51/*
52 * Local MACROS 52 * Local MACROS
@@ -564,9 +564,11 @@ bnad_disable_rx_irq(struct bnad *bnad, struct bna_ccb *ccb)
564static void 564static void
565bnad_enable_rx_irq(struct bnad *bnad, struct bna_ccb *ccb) 565bnad_enable_rx_irq(struct bnad *bnad, struct bna_ccb *ccb)
566{ 566{
567 spin_lock_irq(&bnad->bna_lock); /* Because of polling context */ 567 unsigned long flags;
568
569 spin_lock_irqsave(&bnad->bna_lock, flags); /* Because of polling context */
568 bnad_enable_rx_irq_unsafe(ccb); 570 bnad_enable_rx_irq_unsafe(ccb);
569 spin_unlock_irq(&bnad->bna_lock); 571 spin_unlock_irqrestore(&bnad->bna_lock, flags);
570} 572}
571 573
572static void 574static void
@@ -599,7 +601,7 @@ static irqreturn_t
599bnad_msix_mbox_handler(int irq, void *data) 601bnad_msix_mbox_handler(int irq, void *data)
600{ 602{
601 u32 intr_status; 603 u32 intr_status;
602 unsigned long flags; 604 unsigned long flags;
603 struct net_device *netdev = data; 605 struct net_device *netdev = data;
604 struct bnad *bnad; 606 struct bnad *bnad;
605 607
@@ -630,13 +632,15 @@ bnad_isr(int irq, void *data)
630 struct bnad_rx_info *rx_info; 632 struct bnad_rx_info *rx_info;
631 struct bnad_rx_ctrl *rx_ctrl; 633 struct bnad_rx_ctrl *rx_ctrl;
632 634
633 spin_lock_irqsave(&bnad->bna_lock, flags); 635 if (unlikely(test_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags)))
636 return IRQ_NONE;
634 637
635 bna_intr_status_get(&bnad->bna, intr_status); 638 bna_intr_status_get(&bnad->bna, intr_status);
636 if (!intr_status) { 639
637 spin_unlock_irqrestore(&bnad->bna_lock, flags); 640 if (unlikely(!intr_status))
638 return IRQ_NONE; 641 return IRQ_NONE;
639 } 642
643 spin_lock_irqsave(&bnad->bna_lock, flags);
640 644
641 if (BNA_IS_MBOX_ERR_INTR(intr_status)) { 645 if (BNA_IS_MBOX_ERR_INTR(intr_status)) {
642 bna_mbox_handler(&bnad->bna, intr_status); 646 bna_mbox_handler(&bnad->bna, intr_status);
@@ -672,11 +676,10 @@ bnad_enable_mbox_irq(struct bnad *bnad)
672{ 676{
673 int irq = BNAD_GET_MBOX_IRQ(bnad); 677 int irq = BNAD_GET_MBOX_IRQ(bnad);
674 678
675 if (!(bnad->cfg_flags & BNAD_CF_MSIX))
676 return;
677
678 if (test_and_clear_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags)) 679 if (test_and_clear_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags))
679 enable_irq(irq); 680 if (bnad->cfg_flags & BNAD_CF_MSIX)
681 enable_irq(irq);
682
680 BNAD_UPDATE_CTR(bnad, mbox_intr_enabled); 683 BNAD_UPDATE_CTR(bnad, mbox_intr_enabled);
681} 684}
682 685
@@ -684,16 +687,16 @@ bnad_enable_mbox_irq(struct bnad *bnad)
684 * Called with bnad->bna_lock held b'cos of 687 * Called with bnad->bna_lock held b'cos of
685 * bnad->cfg_flags access. 688 * bnad->cfg_flags access.
686 */ 689 */
687void 690static void
688bnad_disable_mbox_irq(struct bnad *bnad) 691bnad_disable_mbox_irq(struct bnad *bnad)
689{ 692{
690 int irq = BNAD_GET_MBOX_IRQ(bnad); 693 int irq = BNAD_GET_MBOX_IRQ(bnad);
691 694
692 if (!(bnad->cfg_flags & BNAD_CF_MSIX))
693 return;
694 695
695 if (!test_and_set_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags)) 696 if (!test_and_set_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags))
696 disable_irq_nosync(irq); 697 if (bnad->cfg_flags & BNAD_CF_MSIX)
698 disable_irq_nosync(irq);
699
697 BNAD_UPDATE_CTR(bnad, mbox_intr_disabled); 700 BNAD_UPDATE_CTR(bnad, mbox_intr_disabled);
698} 701}
699 702
@@ -953,11 +956,6 @@ bnad_cb_stats_get(struct bnad *bnad, enum bna_cb_status status,
953 jiffies + msecs_to_jiffies(BNAD_STATS_TIMER_FREQ)); 956 jiffies + msecs_to_jiffies(BNAD_STATS_TIMER_FREQ));
954} 957}
955 958
956void
957bnad_cb_stats_clr(struct bnad *bnad)
958{
959}
960
961/* Resource allocation, free functions */ 959/* Resource allocation, free functions */
962 960
963static void 961static void
@@ -1045,14 +1043,12 @@ bnad_mbox_irq_free(struct bnad *bnad,
1045 return; 1043 return;
1046 1044
1047 spin_lock_irqsave(&bnad->bna_lock, flags); 1045 spin_lock_irqsave(&bnad->bna_lock, flags);
1048
1049 bnad_disable_mbox_irq(bnad); 1046 bnad_disable_mbox_irq(bnad);
1047 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1050 1048
1051 irq = BNAD_GET_MBOX_IRQ(bnad); 1049 irq = BNAD_GET_MBOX_IRQ(bnad);
1052 free_irq(irq, bnad->netdev); 1050 free_irq(irq, bnad->netdev);
1053 1051
1054 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1055
1056 kfree(intr_info->idl); 1052 kfree(intr_info->idl);
1057} 1053}
1058 1054
@@ -1094,8 +1090,15 @@ bnad_mbox_irq_alloc(struct bnad *bnad,
1094 1090
1095 sprintf(bnad->mbox_irq_name, "%s", BNAD_NAME); 1091 sprintf(bnad->mbox_irq_name, "%s", BNAD_NAME);
1096 1092
1093 /*
1094 * Set the Mbox IRQ disable flag, so that the IRQ handler
1095 * called from request_irq() for SHARED IRQs do not execute
1096 */
1097 set_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags);
1098
1097 err = request_irq(irq, irq_handler, flags, 1099 err = request_irq(irq, irq_handler, flags,
1098 bnad->mbox_irq_name, bnad->netdev); 1100 bnad->mbox_irq_name, bnad->netdev);
1101
1099 if (err) { 1102 if (err) {
1100 kfree(intr_info->idl); 1103 kfree(intr_info->idl);
1101 intr_info->idl = NULL; 1104 intr_info->idl = NULL;
@@ -1103,7 +1106,10 @@ bnad_mbox_irq_alloc(struct bnad *bnad,
1103 } 1106 }
1104 1107
1105 spin_lock_irqsave(&bnad->bna_lock, flags); 1108 spin_lock_irqsave(&bnad->bna_lock, flags);
1106 bnad_disable_mbox_irq(bnad); 1109
1110 if (bnad->cfg_flags & BNAD_CF_MSIX)
1111 disable_irq_nosync(irq);
1112
1107 spin_unlock_irqrestore(&bnad->bna_lock, flags); 1113 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1108 return 0; 1114 return 0;
1109} 1115}
@@ -1485,7 +1491,6 @@ bnad_stats_timer_start(struct bnad *bnad)
1485 jiffies + msecs_to_jiffies(BNAD_STATS_TIMER_FREQ)); 1491 jiffies + msecs_to_jiffies(BNAD_STATS_TIMER_FREQ));
1486 } 1492 }
1487 spin_unlock_irqrestore(&bnad->bna_lock, flags); 1493 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1488
1489} 1494}
1490 1495
1491/* 1496/*
@@ -2170,7 +2175,6 @@ bnad_device_disable(struct bnad *bnad)
2170 spin_unlock_irqrestore(&bnad->bna_lock, flags); 2175 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2171 2176
2172 wait_for_completion(&bnad->bnad_completions.ioc_comp); 2177 wait_for_completion(&bnad->bnad_completions.ioc_comp);
2173
2174} 2178}
2175 2179
2176static int 2180static int
@@ -2236,7 +2240,6 @@ static void
2236bnad_enable_msix(struct bnad *bnad) 2240bnad_enable_msix(struct bnad *bnad)
2237{ 2241{
2238 int i, ret; 2242 int i, ret;
2239 u32 tot_msix_num;
2240 unsigned long flags; 2243 unsigned long flags;
2241 2244
2242 spin_lock_irqsave(&bnad->bna_lock, flags); 2245 spin_lock_irqsave(&bnad->bna_lock, flags);
@@ -2249,18 +2252,16 @@ bnad_enable_msix(struct bnad *bnad)
2249 if (bnad->msix_table) 2252 if (bnad->msix_table)
2250 return; 2253 return;
2251 2254
2252 tot_msix_num = bnad->msix_num + bnad->msix_diag_num;
2253
2254 bnad->msix_table = 2255 bnad->msix_table =
2255 kcalloc(tot_msix_num, sizeof(struct msix_entry), GFP_KERNEL); 2256 kcalloc(bnad->msix_num, sizeof(struct msix_entry), GFP_KERNEL);
2256 2257
2257 if (!bnad->msix_table) 2258 if (!bnad->msix_table)
2258 goto intx_mode; 2259 goto intx_mode;
2259 2260
2260 for (i = 0; i < tot_msix_num; i++) 2261 for (i = 0; i < bnad->msix_num; i++)
2261 bnad->msix_table[i].entry = i; 2262 bnad->msix_table[i].entry = i;
2262 2263
2263 ret = pci_enable_msix(bnad->pcidev, bnad->msix_table, tot_msix_num); 2264 ret = pci_enable_msix(bnad->pcidev, bnad->msix_table, bnad->msix_num);
2264 if (ret > 0) { 2265 if (ret > 0) {
2265 /* Not enough MSI-X vectors. */ 2266 /* Not enough MSI-X vectors. */
2266 2267
@@ -2273,12 +2274,11 @@ bnad_enable_msix(struct bnad *bnad)
2273 + (bnad->num_rx 2274 + (bnad->num_rx
2274 * bnad->num_rxp_per_rx) + 2275 * bnad->num_rxp_per_rx) +
2275 BNAD_MAILBOX_MSIX_VECTORS; 2276 BNAD_MAILBOX_MSIX_VECTORS;
2276 tot_msix_num = bnad->msix_num + bnad->msix_diag_num;
2277 2277
2278 /* Try once more with adjusted numbers */ 2278 /* Try once more with adjusted numbers */
2279 /* If this fails, fall back to INTx */ 2279 /* If this fails, fall back to INTx */
2280 ret = pci_enable_msix(bnad->pcidev, bnad->msix_table, 2280 ret = pci_enable_msix(bnad->pcidev, bnad->msix_table,
2281 tot_msix_num); 2281 bnad->msix_num);
2282 if (ret) 2282 if (ret)
2283 goto intx_mode; 2283 goto intx_mode;
2284 2284
@@ -2291,7 +2291,6 @@ intx_mode:
2291 kfree(bnad->msix_table); 2291 kfree(bnad->msix_table);
2292 bnad->msix_table = NULL; 2292 bnad->msix_table = NULL;
2293 bnad->msix_num = 0; 2293 bnad->msix_num = 0;
2294 bnad->msix_diag_num = 0;
2295 spin_lock_irqsave(&bnad->bna_lock, flags); 2294 spin_lock_irqsave(&bnad->bna_lock, flags);
2296 bnad->cfg_flags &= ~BNAD_CF_MSIX; 2295 bnad->cfg_flags &= ~BNAD_CF_MSIX;
2297 bnad_q_num_init(bnad); 2296 bnad_q_num_init(bnad);
@@ -2502,7 +2501,7 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
2502 htons((skb_is_gso(skb) ? BNA_TXQ_WI_SEND_LSO : 2501 htons((skb_is_gso(skb) ? BNA_TXQ_WI_SEND_LSO :
2503 BNA_TXQ_WI_SEND)); 2502 BNA_TXQ_WI_SEND));
2504 2503
2505 if (bnad->vlan_grp && vlan_tx_tag_present(skb)) { 2504 if (vlan_tx_tag_present(skb)) {
2506 vlan_tag = (u16) vlan_tx_tag_get(skb); 2505 vlan_tag = (u16) vlan_tx_tag_get(skb);
2507 flags |= (BNA_TXQ_WI_CF_INS_PRIO | BNA_TXQ_WI_CF_INS_VLAN); 2506 flags |= (BNA_TXQ_WI_CF_INS_PRIO | BNA_TXQ_WI_CF_INS_VLAN);
2508 } 2507 }
@@ -2939,7 +2938,6 @@ bnad_init(struct bnad *bnad,
2939 bnad->msix_num = (bnad->num_tx * bnad->num_txq_per_tx) + 2938 bnad->msix_num = (bnad->num_tx * bnad->num_txq_per_tx) +
2940 (bnad->num_rx * bnad->num_rxp_per_rx) + 2939 (bnad->num_rx * bnad->num_rxp_per_rx) +
2941 BNAD_MAILBOX_MSIX_VECTORS; 2940 BNAD_MAILBOX_MSIX_VECTORS;
2942 bnad->msix_diag_num = 2; /* 1 for Tx, 1 for Rx */
2943 2941
2944 bnad->txq_depth = BNAD_TXQ_DEPTH; 2942 bnad->txq_depth = BNAD_TXQ_DEPTH;
2945 bnad->rxq_depth = BNAD_RXQ_DEPTH; 2943 bnad->rxq_depth = BNAD_RXQ_DEPTH;
@@ -3108,7 +3106,6 @@ bnad_pci_probe(struct pci_dev *pdev,
3108 3106
3109 spin_lock_irqsave(&bnad->bna_lock, flags); 3107 spin_lock_irqsave(&bnad->bna_lock, flags);
3110 bna_init(bna, bnad, &pcidev_info, &bnad->res_info[0]); 3108 bna_init(bna, bnad, &pcidev_info, &bnad->res_info[0]);
3111
3112 spin_unlock_irqrestore(&bnad->bna_lock, flags); 3109 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3113 3110
3114 bnad->stats.bna_stats = &bna->stats; 3111 bnad->stats.bna_stats = &bna->stats;
@@ -3211,7 +3208,7 @@ bnad_pci_remove(struct pci_dev *pdev)
3211 free_netdev(netdev); 3208 free_netdev(netdev);
3212} 3209}
3213 3210
3214const struct pci_device_id bnad_pci_id_table[] = { 3211static const struct pci_device_id bnad_pci_id_table[] = {
3215 { 3212 {
3216 PCI_DEVICE(PCI_VENDOR_ID_BROCADE, 3213 PCI_DEVICE(PCI_VENDOR_ID_BROCADE,
3217 PCI_DEVICE_ID_BROCADE_CT), 3214 PCI_DEVICE_ID_BROCADE_CT),
diff --git a/drivers/net/bna/bnad.h b/drivers/net/bna/bnad.h
index ee377888b905..ebc3a9078642 100644
--- a/drivers/net/bna/bnad.h
+++ b/drivers/net/bna/bnad.h
@@ -248,7 +248,6 @@ struct bnad {
248 u64 mmio_len; 248 u64 mmio_len;
249 249
250 u32 msix_num; 250 u32 msix_num;
251 u32 msix_diag_num;
252 struct msix_entry *msix_table; 251 struct msix_entry *msix_table;
253 252
254 struct mutex conf_mutex; 253 struct mutex conf_mutex;
diff --git a/drivers/net/bna/cna_fwimg.c b/drivers/net/bna/cna_fwimg.c
index 0bd1d3790a27..e8f4ecd9ebb5 100644
--- a/drivers/net/bna/cna_fwimg.c
+++ b/drivers/net/bna/cna_fwimg.c
@@ -22,7 +22,7 @@ const struct firmware *bfi_fw;
22static u32 *bfi_image_ct_cna; 22static u32 *bfi_image_ct_cna;
23static u32 bfi_image_ct_cna_size; 23static u32 bfi_image_ct_cna_size;
24 24
25u32 * 25static u32 *
26cna_read_firmware(struct pci_dev *pdev, u32 **bfi_image, 26cna_read_firmware(struct pci_dev *pdev, u32 **bfi_image,
27 u32 *bfi_image_size, char *fw_name) 27 u32 *bfi_image_size, char *fw_name)
28{ 28{
diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c
index 4ff76e38e788..bf3c830e7dda 100644
--- a/drivers/net/bnx2.c
+++ b/drivers/net/bnx2.c
@@ -37,9 +37,6 @@
37#include <linux/ethtool.h> 37#include <linux/ethtool.h>
38#include <linux/mii.h> 38#include <linux/mii.h>
39#include <linux/if_vlan.h> 39#include <linux/if_vlan.h>
40#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
41#define BCM_VLAN 1
42#endif
43#include <net/ip.h> 40#include <net/ip.h>
44#include <net/tcp.h> 41#include <net/tcp.h>
45#include <net/checksum.h> 42#include <net/checksum.h>
@@ -59,13 +56,13 @@
59#include "bnx2_fw.h" 56#include "bnx2_fw.h"
60 57
61#define DRV_MODULE_NAME "bnx2" 58#define DRV_MODULE_NAME "bnx2"
62#define DRV_MODULE_VERSION "2.0.17" 59#define DRV_MODULE_VERSION "2.0.18"
63#define DRV_MODULE_RELDATE "July 18, 2010" 60#define DRV_MODULE_RELDATE "Oct 7, 2010"
64#define FW_MIPS_FILE_06 "bnx2/bnx2-mips-06-5.0.0.j6.fw" 61#define FW_MIPS_FILE_06 "bnx2/bnx2-mips-06-6.0.15.fw"
65#define FW_RV2P_FILE_06 "bnx2/bnx2-rv2p-06-5.0.0.j3.fw" 62#define FW_RV2P_FILE_06 "bnx2/bnx2-rv2p-06-6.0.15.fw"
66#define FW_MIPS_FILE_09 "bnx2/bnx2-mips-09-5.0.0.j15.fw" 63#define FW_MIPS_FILE_09 "bnx2/bnx2-mips-09-6.0.17.fw"
67#define FW_RV2P_FILE_09_Ax "bnx2/bnx2-rv2p-09ax-5.0.0.j10.fw" 64#define FW_RV2P_FILE_09_Ax "bnx2/bnx2-rv2p-09ax-6.0.17.fw"
68#define FW_RV2P_FILE_09 "bnx2/bnx2-rv2p-09-5.0.0.j10.fw" 65#define FW_RV2P_FILE_09 "bnx2/bnx2-rv2p-09-6.0.17.fw"
69 66
70#define RUN_AT(x) (jiffies + (x)) 67#define RUN_AT(x) (jiffies + (x))
71 68
@@ -266,7 +263,7 @@ static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_tx_ring_info *txr)
266 if (diff == TX_DESC_CNT) 263 if (diff == TX_DESC_CNT)
267 diff = MAX_TX_DESC_CNT; 264 diff = MAX_TX_DESC_CNT;
268 } 265 }
269 return (bp->tx_ring_size - diff); 266 return bp->tx_ring_size - diff;
270} 267}
271 268
272static u32 269static u32
@@ -299,7 +296,7 @@ bnx2_shmem_wr(struct bnx2 *bp, u32 offset, u32 val)
299static u32 296static u32
300bnx2_shmem_rd(struct bnx2 *bp, u32 offset) 297bnx2_shmem_rd(struct bnx2 *bp, u32 offset)
301{ 298{
302 return (bnx2_reg_rd_ind(bp, bp->shmem_base + offset)); 299 return bnx2_reg_rd_ind(bp, bp->shmem_base + offset);
303} 300}
304 301
305static void 302static void
@@ -977,9 +974,9 @@ bnx2_report_fw_link(struct bnx2 *bp)
977static char * 974static char *
978bnx2_xceiver_str(struct bnx2 *bp) 975bnx2_xceiver_str(struct bnx2 *bp)
979{ 976{
980 return ((bp->phy_port == PORT_FIBRE) ? "SerDes" : 977 return (bp->phy_port == PORT_FIBRE) ? "SerDes" :
981 ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) ? "Remote Copper" : 978 ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) ? "Remote Copper" :
982 "Copper")); 979 "Copper");
983} 980}
984 981
985static void 982static void
@@ -1269,30 +1266,9 @@ bnx2_init_rx_context(struct bnx2 *bp, u32 cid)
1269 val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2; 1266 val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
1270 val |= 0x02 << 8; 1267 val |= 0x02 << 8;
1271 1268
1272 if (CHIP_NUM(bp) == CHIP_NUM_5709) { 1269 if (bp->flow_ctrl & FLOW_CTRL_TX)
1273 u32 lo_water, hi_water; 1270 val |= BNX2_L2CTX_FLOW_CTRL_ENABLE;
1274
1275 if (bp->flow_ctrl & FLOW_CTRL_TX)
1276 lo_water = BNX2_L2CTX_LO_WATER_MARK_DEFAULT;
1277 else
1278 lo_water = BNX2_L2CTX_LO_WATER_MARK_DIS;
1279 if (lo_water >= bp->rx_ring_size)
1280 lo_water = 0;
1281
1282 hi_water = min_t(int, bp->rx_ring_size / 4, lo_water + 16);
1283
1284 if (hi_water <= lo_water)
1285 lo_water = 0;
1286
1287 hi_water /= BNX2_L2CTX_HI_WATER_MARK_SCALE;
1288 lo_water /= BNX2_L2CTX_LO_WATER_MARK_SCALE;
1289 1271
1290 if (hi_water > 0xf)
1291 hi_water = 0xf;
1292 else if (hi_water == 0)
1293 lo_water = 0;
1294 val |= lo_water | (hi_water << BNX2_L2CTX_HI_WATER_MARK_SHIFT);
1295 }
1296 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_CTX_TYPE, val); 1272 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_CTX_TYPE, val);
1297} 1273}
1298 1274
@@ -1373,8 +1349,7 @@ bnx2_set_mac_link(struct bnx2 *bp)
1373 /* Acknowledge the interrupt. */ 1349 /* Acknowledge the interrupt. */
1374 REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE); 1350 REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
1375 1351
1376 if (CHIP_NUM(bp) == CHIP_NUM_5709) 1352 bnx2_init_all_rx_contexts(bp);
1377 bnx2_init_all_rx_contexts(bp);
1378} 1353}
1379 1354
1380static void 1355static void
@@ -1758,7 +1733,7 @@ __acquires(&bp->phy_lock)
1758 u32 new_adv = 0; 1733 u32 new_adv = 0;
1759 1734
1760 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) 1735 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1761 return (bnx2_setup_remote_phy(bp, port)); 1736 return bnx2_setup_remote_phy(bp, port);
1762 1737
1763 if (!(bp->autoneg & AUTONEG_SPEED)) { 1738 if (!(bp->autoneg & AUTONEG_SPEED)) {
1764 u32 new_bmcr; 1739 u32 new_bmcr;
@@ -2171,10 +2146,10 @@ __acquires(&bp->phy_lock)
2171 return 0; 2146 return 0;
2172 2147
2173 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) { 2148 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
2174 return (bnx2_setup_serdes_phy(bp, port)); 2149 return bnx2_setup_serdes_phy(bp, port);
2175 } 2150 }
2176 else { 2151 else {
2177 return (bnx2_setup_copper_phy(bp)); 2152 return bnx2_setup_copper_phy(bp);
2178 } 2153 }
2179} 2154}
2180 2155
@@ -3109,8 +3084,6 @@ bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
3109 struct sw_bd *rx_buf, *next_rx_buf; 3084 struct sw_bd *rx_buf, *next_rx_buf;
3110 struct sk_buff *skb; 3085 struct sk_buff *skb;
3111 dma_addr_t dma_addr; 3086 dma_addr_t dma_addr;
3112 u16 vtag = 0;
3113 int hw_vlan __maybe_unused = 0;
3114 3087
3115 sw_ring_cons = RX_RING_IDX(sw_cons); 3088 sw_ring_cons = RX_RING_IDX(sw_cons);
3116 sw_ring_prod = RX_RING_IDX(sw_prod); 3089 sw_ring_prod = RX_RING_IDX(sw_prod);
@@ -3190,23 +3163,8 @@ bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
3190 goto next_rx; 3163 goto next_rx;
3191 3164
3192 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) && 3165 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) &&
3193 !(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG)) { 3166 !(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG))
3194 vtag = rx_hdr->l2_fhdr_vlan_tag; 3167 __vlan_hwaccel_put_tag(skb, rx_hdr->l2_fhdr_vlan_tag);
3195#ifdef BCM_VLAN
3196 if (bp->vlgrp)
3197 hw_vlan = 1;
3198 else
3199#endif
3200 {
3201 struct vlan_ethhdr *ve = (struct vlan_ethhdr *)
3202 __skb_push(skb, 4);
3203
3204 memmove(ve, skb->data + 4, ETH_ALEN * 2);
3205 ve->h_vlan_proto = htons(ETH_P_8021Q);
3206 ve->h_vlan_TCI = htons(vtag);
3207 len += 4;
3208 }
3209 }
3210 3168
3211 skb->protocol = eth_type_trans(skb, bp->dev); 3169 skb->protocol = eth_type_trans(skb, bp->dev);
3212 3170
@@ -3233,14 +3191,7 @@ bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
3233 skb->rxhash = rx_hdr->l2_fhdr_hash; 3191 skb->rxhash = rx_hdr->l2_fhdr_hash;
3234 3192
3235 skb_record_rx_queue(skb, bnapi - &bp->bnx2_napi[0]); 3193 skb_record_rx_queue(skb, bnapi - &bp->bnx2_napi[0]);
3236 3194 napi_gro_receive(&bnapi->napi, skb);
3237#ifdef BCM_VLAN
3238 if (hw_vlan)
3239 vlan_gro_receive(&bnapi->napi, bp->vlgrp, vtag, skb);
3240 else
3241#endif
3242 napi_gro_receive(&bnapi->napi, skb);
3243
3244 rx_pkt++; 3195 rx_pkt++;
3245 3196
3246next_rx: 3197next_rx:
@@ -3555,13 +3506,9 @@ bnx2_set_rx_mode(struct net_device *dev)
3555 rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS | 3506 rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
3556 BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG); 3507 BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
3557 sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN; 3508 sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
3558#ifdef BCM_VLAN 3509 if (!(dev->features & NETIF_F_HW_VLAN_RX) &&
3559 if (!bp->vlgrp && (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN)) 3510 (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN))
3560 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
3561#else
3562 if (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN)
3563 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG; 3511 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
3564#endif
3565 if (dev->flags & IFF_PROMISC) { 3512 if (dev->flags & IFF_PROMISC) {
3566 /* Promiscuous mode. */ 3513 /* Promiscuous mode. */
3567 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS; 3514 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
@@ -4974,6 +4921,11 @@ bnx2_init_chip(struct bnx2 *bp)
4974 4921
4975 REG_WR(bp, BNX2_HC_CONFIG, val); 4922 REG_WR(bp, BNX2_HC_CONFIG, val);
4976 4923
4924 if (bp->rx_ticks < 25)
4925 bnx2_reg_wr_ind(bp, BNX2_FW_RX_LOW_LATENCY, 1);
4926 else
4927 bnx2_reg_wr_ind(bp, BNX2_FW_RX_LOW_LATENCY, 0);
4928
4977 for (i = 1; i < bp->irq_nvecs; i++) { 4929 for (i = 1; i < bp->irq_nvecs; i++) {
4978 u32 base = ((i - 1) * BNX2_HC_SB_CONFIG_SIZE) + 4930 u32 base = ((i - 1) * BNX2_HC_SB_CONFIG_SIZE) +
4979 BNX2_HC_SB_CONFIG_1; 4931 BNX2_HC_SB_CONFIG_1;
@@ -5242,18 +5194,20 @@ bnx2_init_all_rings(struct bnx2 *bp)
5242 bnx2_init_rx_ring(bp, i); 5194 bnx2_init_rx_ring(bp, i);
5243 5195
5244 if (bp->num_rx_rings > 1) { 5196 if (bp->num_rx_rings > 1) {
5245 u32 tbl_32; 5197 u32 tbl_32 = 0;
5246 u8 *tbl = (u8 *) &tbl_32;
5247
5248 bnx2_reg_wr_ind(bp, BNX2_RXP_SCRATCH_RSS_TBL_SZ,
5249 BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES);
5250 5198
5251 for (i = 0; i < BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES; i++) { 5199 for (i = 0; i < BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES; i++) {
5252 tbl[i % 4] = i % (bp->num_rx_rings - 1); 5200 int shift = (i % 8) << 2;
5253 if ((i % 4) == 3) 5201
5254 bnx2_reg_wr_ind(bp, 5202 tbl_32 |= (i % (bp->num_rx_rings - 1)) << shift;
5255 BNX2_RXP_SCRATCH_RSS_TBL + i, 5203 if ((i % 8) == 7) {
5256 cpu_to_be32(tbl_32)); 5204 REG_WR(bp, BNX2_RLUP_RSS_DATA, tbl_32);
5205 REG_WR(bp, BNX2_RLUP_RSS_COMMAND, (i >> 3) |
5206 BNX2_RLUP_RSS_COMMAND_RSS_WRITE_MASK |
5207 BNX2_RLUP_RSS_COMMAND_WRITE |
5208 BNX2_RLUP_RSS_COMMAND_HASH_MASK);
5209 tbl_32 = 0;
5210 }
5257 } 5211 }
5258 5212
5259 val = BNX2_RLUP_RSS_CONFIG_IPV4_RSS_TYPE_ALL_XI | 5213 val = BNX2_RLUP_RSS_CONFIG_IPV4_RSS_TYPE_ALL_XI |
@@ -6202,7 +6156,7 @@ bnx2_enable_msix(struct bnx2 *bp, int msix_vecs)
6202 } 6156 }
6203} 6157}
6204 6158
6205static void 6159static int
6206bnx2_setup_int_mode(struct bnx2 *bp, int dis_msi) 6160bnx2_setup_int_mode(struct bnx2 *bp, int dis_msi)
6207{ 6161{
6208 int cpus = num_online_cpus(); 6162 int cpus = num_online_cpus();
@@ -6231,9 +6185,10 @@ bnx2_setup_int_mode(struct bnx2 *bp, int dis_msi)
6231 } 6185 }
6232 6186
6233 bp->num_tx_rings = rounddown_pow_of_two(bp->irq_nvecs); 6187 bp->num_tx_rings = rounddown_pow_of_two(bp->irq_nvecs);
6234 bp->dev->real_num_tx_queues = bp->num_tx_rings; 6188 netif_set_real_num_tx_queues(bp->dev, bp->num_tx_rings);
6235 6189
6236 bp->num_rx_rings = bp->irq_nvecs; 6190 bp->num_rx_rings = bp->irq_nvecs;
6191 return netif_set_real_num_rx_queues(bp->dev, bp->num_rx_rings);
6237} 6192}
6238 6193
6239/* Called with rtnl_lock */ 6194/* Called with rtnl_lock */
@@ -6248,7 +6203,9 @@ bnx2_open(struct net_device *dev)
6248 bnx2_set_power_state(bp, PCI_D0); 6203 bnx2_set_power_state(bp, PCI_D0);
6249 bnx2_disable_int(bp); 6204 bnx2_disable_int(bp);
6250 6205
6251 bnx2_setup_int_mode(bp, disable_msi); 6206 rc = bnx2_setup_int_mode(bp, disable_msi);
6207 if (rc)
6208 goto open_err;
6252 bnx2_init_napi(bp); 6209 bnx2_init_napi(bp);
6253 bnx2_napi_enable(bp); 6210 bnx2_napi_enable(bp);
6254 rc = bnx2_alloc_mem(bp); 6211 rc = bnx2_alloc_mem(bp);
@@ -6377,29 +6334,6 @@ bnx2_tx_timeout(struct net_device *dev)
6377 schedule_work(&bp->reset_task); 6334 schedule_work(&bp->reset_task);
6378} 6335}
6379 6336
6380#ifdef BCM_VLAN
6381/* Called with rtnl_lock */
6382static void
6383bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
6384{
6385 struct bnx2 *bp = netdev_priv(dev);
6386
6387 if (netif_running(dev))
6388 bnx2_netif_stop(bp, false);
6389
6390 bp->vlgrp = vlgrp;
6391
6392 if (!netif_running(dev))
6393 return;
6394
6395 bnx2_set_rx_mode(dev);
6396 if (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN)
6397 bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_KEEP_VLAN_UPDATE, 0, 1);
6398
6399 bnx2_netif_start(bp, false);
6400}
6401#endif
6402
6403/* Called with netif_tx_lock. 6337/* Called with netif_tx_lock.
6404 * bnx2_tx_int() runs without netif_tx_lock unless it needs to call 6338 * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
6405 * netif_wake_queue(). 6339 * netif_wake_queue().
@@ -6440,12 +6374,11 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
6440 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM; 6374 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
6441 } 6375 }
6442 6376
6443#ifdef BCM_VLAN 6377 if (vlan_tx_tag_present(skb)) {
6444 if (bp->vlgrp && vlan_tx_tag_present(skb)) {
6445 vlan_tag_flags |= 6378 vlan_tag_flags |=
6446 (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16)); 6379 (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
6447 } 6380 }
6448#endif 6381
6449 if ((mss = skb_shinfo(skb)->gso_size)) { 6382 if ((mss = skb_shinfo(skb)->gso_size)) {
6450 u32 tcp_opt_len; 6383 u32 tcp_opt_len;
6451 struct iphdr *iph; 6384 struct iphdr *iph;
@@ -7582,15 +7515,36 @@ bnx2_set_tx_csum(struct net_device *dev, u32 data)
7582 struct bnx2 *bp = netdev_priv(dev); 7515 struct bnx2 *bp = netdev_priv(dev);
7583 7516
7584 if (CHIP_NUM(bp) == CHIP_NUM_5709) 7517 if (CHIP_NUM(bp) == CHIP_NUM_5709)
7585 return (ethtool_op_set_tx_ipv6_csum(dev, data)); 7518 return ethtool_op_set_tx_ipv6_csum(dev, data);
7586 else 7519 else
7587 return (ethtool_op_set_tx_csum(dev, data)); 7520 return ethtool_op_set_tx_csum(dev, data);
7588} 7521}
7589 7522
7590static int 7523static int
7591bnx2_set_flags(struct net_device *dev, u32 data) 7524bnx2_set_flags(struct net_device *dev, u32 data)
7592{ 7525{
7593 return ethtool_op_set_flags(dev, data, ETH_FLAG_RXHASH); 7526 struct bnx2 *bp = netdev_priv(dev);
7527 int rc;
7528
7529 if (!(bp->flags & BNX2_FLAG_CAN_KEEP_VLAN) &&
7530 !(data & ETH_FLAG_RXVLAN))
7531 return -EOPNOTSUPP;
7532
7533 rc = ethtool_op_set_flags(dev, data, ETH_FLAG_RXHASH | ETH_FLAG_RXVLAN |
7534 ETH_FLAG_TXVLAN);
7535 if (rc)
7536 return rc;
7537
7538 if ((!!(data & ETH_FLAG_RXVLAN) !=
7539 !!(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG)) &&
7540 netif_running(dev)) {
7541 bnx2_netif_stop(bp, false);
7542 bnx2_set_rx_mode(dev);
7543 bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_KEEP_VLAN_UPDATE, 0, 1);
7544 bnx2_netif_start(bp, false);
7545 }
7546
7547 return 0;
7594} 7548}
7595 7549
7596static const struct ethtool_ops bnx2_ethtool_ops = { 7550static const struct ethtool_ops bnx2_ethtool_ops = {
@@ -7705,7 +7659,7 @@ bnx2_change_mtu(struct net_device *dev, int new_mtu)
7705 return -EINVAL; 7659 return -EINVAL;
7706 7660
7707 dev->mtu = new_mtu; 7661 dev->mtu = new_mtu;
7708 return (bnx2_change_ring_size(bp, bp->rx_ring_size, bp->tx_ring_size)); 7662 return bnx2_change_ring_size(bp, bp->rx_ring_size, bp->tx_ring_size);
7709} 7663}
7710 7664
7711#ifdef CONFIG_NET_POLL_CONTROLLER 7665#ifdef CONFIG_NET_POLL_CONTROLLER
@@ -7927,16 +7881,7 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
7927 goto err_out_disable; 7881 goto err_out_disable;
7928 } 7882 }
7929 7883
7930 /* AER (Advanced Error Reporting) hooks */
7931 err = pci_enable_pcie_error_reporting(pdev);
7932 if (err) {
7933 dev_err(&pdev->dev, "pci_enable_pcie_error_reporting failed "
7934 "0x%x\n", err);
7935 /* non-fatal, continue */
7936 }
7937
7938 pci_set_master(pdev); 7884 pci_set_master(pdev);
7939 pci_save_state(pdev);
7940 7885
7941 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM); 7886 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
7942 if (bp->pm_cap == 0) { 7887 if (bp->pm_cap == 0) {
@@ -7991,6 +7936,15 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
7991 bp->flags |= BNX2_FLAG_PCIE; 7936 bp->flags |= BNX2_FLAG_PCIE;
7992 if (CHIP_REV(bp) == CHIP_REV_Ax) 7937 if (CHIP_REV(bp) == CHIP_REV_Ax)
7993 bp->flags |= BNX2_FLAG_JUMBO_BROKEN; 7938 bp->flags |= BNX2_FLAG_JUMBO_BROKEN;
7939
7940 /* AER (Advanced Error Reporting) hooks */
7941 err = pci_enable_pcie_error_reporting(pdev);
7942 if (err) {
7943 dev_err(&pdev->dev, "pci_enable_pcie_error_reporting "
7944 "failed 0x%x\n", err);
7945 /* non-fatal, continue */
7946 }
7947
7994 } else { 7948 } else {
7995 bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX); 7949 bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
7996 if (bp->pcix_cap == 0) { 7950 if (bp->pcix_cap == 0) {
@@ -8247,16 +8201,20 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
8247 bp->timer.data = (unsigned long) bp; 8201 bp->timer.data = (unsigned long) bp;
8248 bp->timer.function = bnx2_timer; 8202 bp->timer.function = bnx2_timer;
8249 8203
8204 pci_save_state(pdev);
8205
8250 return 0; 8206 return 0;
8251 8207
8252err_out_unmap: 8208err_out_unmap:
8209 if (bp->flags & BNX2_FLAG_PCIE)
8210 pci_disable_pcie_error_reporting(pdev);
8211
8253 if (bp->regview) { 8212 if (bp->regview) {
8254 iounmap(bp->regview); 8213 iounmap(bp->regview);
8255 bp->regview = NULL; 8214 bp->regview = NULL;
8256 } 8215 }
8257 8216
8258err_out_release: 8217err_out_release:
8259 pci_disable_pcie_error_reporting(pdev);
8260 pci_release_regions(pdev); 8218 pci_release_regions(pdev);
8261 8219
8262err_out_disable: 8220err_out_disable:
@@ -8326,9 +8284,6 @@ static const struct net_device_ops bnx2_netdev_ops = {
8326 .ndo_set_mac_address = bnx2_change_mac_addr, 8284 .ndo_set_mac_address = bnx2_change_mac_addr,
8327 .ndo_change_mtu = bnx2_change_mtu, 8285 .ndo_change_mtu = bnx2_change_mtu,
8328 .ndo_tx_timeout = bnx2_tx_timeout, 8286 .ndo_tx_timeout = bnx2_tx_timeout,
8329#ifdef BCM_VLAN
8330 .ndo_vlan_rx_register = bnx2_vlan_rx_register,
8331#endif
8332#ifdef CONFIG_NET_POLL_CONTROLLER 8287#ifdef CONFIG_NET_POLL_CONTROLLER
8333 .ndo_poll_controller = poll_bnx2, 8288 .ndo_poll_controller = poll_bnx2,
8334#endif 8289#endif
@@ -8336,9 +8291,7 @@ static const struct net_device_ops bnx2_netdev_ops = {
8336 8291
8337static void inline vlan_features_add(struct net_device *dev, unsigned long flags) 8292static void inline vlan_features_add(struct net_device *dev, unsigned long flags)
8338{ 8293{
8339#ifdef BCM_VLAN
8340 dev->vlan_features |= flags; 8294 dev->vlan_features |= flags;
8341#endif
8342} 8295}
8343 8296
8344static int __devinit 8297static int __devinit
@@ -8387,9 +8340,7 @@ bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
8387 dev->features |= NETIF_F_IPV6_CSUM; 8340 dev->features |= NETIF_F_IPV6_CSUM;
8388 vlan_features_add(dev, NETIF_F_IPV6_CSUM); 8341 vlan_features_add(dev, NETIF_F_IPV6_CSUM);
8389 } 8342 }
8390#ifdef BCM_VLAN
8391 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; 8343 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
8392#endif
8393 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN; 8344 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
8394 vlan_features_add(dev, NETIF_F_TSO | NETIF_F_TSO_ECN); 8345 vlan_features_add(dev, NETIF_F_TSO | NETIF_F_TSO_ECN);
8395 if (CHIP_NUM(bp) == CHIP_NUM_5709) { 8346 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
@@ -8446,9 +8397,10 @@ bnx2_remove_one(struct pci_dev *pdev)
8446 8397
8447 kfree(bp->temp_stats_blk); 8398 kfree(bp->temp_stats_blk);
8448 8399
8449 free_netdev(dev); 8400 if (bp->flags & BNX2_FLAG_PCIE)
8401 pci_disable_pcie_error_reporting(pdev);
8450 8402
8451 pci_disable_pcie_error_reporting(pdev); 8403 free_netdev(dev);
8452 8404
8453 pci_release_regions(pdev); 8405 pci_release_regions(pdev);
8454 pci_disable_device(pdev); 8406 pci_disable_device(pdev);
@@ -8562,6 +8514,9 @@ static pci_ers_result_t bnx2_io_slot_reset(struct pci_dev *pdev)
8562 } 8514 }
8563 rtnl_unlock(); 8515 rtnl_unlock();
8564 8516
8517 if (!(bp->flags & BNX2_FLAG_PCIE))
8518 return result;
8519
8565 err = pci_cleanup_aer_uncorrect_error_status(pdev); 8520 err = pci_cleanup_aer_uncorrect_error_status(pdev);
8566 if (err) { 8521 if (err) {
8567 dev_err(&pdev->dev, 8522 dev_err(&pdev->dev,
diff --git a/drivers/net/bnx2.h b/drivers/net/bnx2.h
index 2104c1005d02..bf4c3421067d 100644
--- a/drivers/net/bnx2.h
+++ b/drivers/net/bnx2.h
@@ -352,12 +352,7 @@ struct l2_fhdr {
352#define BNX2_L2CTX_BD_PRE_READ 0x00000000 352#define BNX2_L2CTX_BD_PRE_READ 0x00000000
353#define BNX2_L2CTX_CTX_SIZE 0x00000000 353#define BNX2_L2CTX_CTX_SIZE 0x00000000
354#define BNX2_L2CTX_CTX_TYPE 0x00000000 354#define BNX2_L2CTX_CTX_TYPE 0x00000000
355#define BNX2_L2CTX_LO_WATER_MARK_DEFAULT 4 355#define BNX2_L2CTX_FLOW_CTRL_ENABLE 0x000000ff
356#define BNX2_L2CTX_LO_WATER_MARK_SCALE 4
357#define BNX2_L2CTX_LO_WATER_MARK_DIS 0
358#define BNX2_L2CTX_HI_WATER_MARK_SHIFT 4
359#define BNX2_L2CTX_HI_WATER_MARK_SCALE 16
360#define BNX2_L2CTX_WATER_MARKS_MSK 0x000000ff
361#define BNX2_L2CTX_CTX_TYPE_SIZE_L2 ((0x20/20)<<16) 356#define BNX2_L2CTX_CTX_TYPE_SIZE_L2 ((0x20/20)<<16)
362#define BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE (0xf<<28) 357#define BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE (0xf<<28)
363#define BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_UNDEFINED (0<<28) 358#define BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_UNDEFINED (0<<28)
@@ -4185,6 +4180,15 @@ struct l2_fhdr {
4185#define BNX2_RLUP_RSS_CONFIG_IPV6_RSS_TYPE_IP_ONLY_XI (2L<<2) 4180#define BNX2_RLUP_RSS_CONFIG_IPV6_RSS_TYPE_IP_ONLY_XI (2L<<2)
4186#define BNX2_RLUP_RSS_CONFIG_IPV6_RSS_TYPE_RES_XI (3L<<2) 4181#define BNX2_RLUP_RSS_CONFIG_IPV6_RSS_TYPE_RES_XI (3L<<2)
4187 4182
4183#define BNX2_RLUP_RSS_COMMAND 0x00002048
4184#define BNX2_RLUP_RSS_COMMAND_RSS_IND_TABLE_ADDR (0xfUL<<0)
4185#define BNX2_RLUP_RSS_COMMAND_RSS_WRITE_MASK (0xffUL<<4)
4186#define BNX2_RLUP_RSS_COMMAND_WRITE (1UL<<12)
4187#define BNX2_RLUP_RSS_COMMAND_READ (1UL<<13)
4188#define BNX2_RLUP_RSS_COMMAND_HASH_MASK (0x7UL<<14)
4189
4190#define BNX2_RLUP_RSS_DATA 0x0000204c
4191
4188 4192
4189/* 4193/*
4190 * rbuf_reg definition 4194 * rbuf_reg definition
@@ -6077,6 +6081,7 @@ struct l2_fhdr {
6077 6081
6078#define BNX2_COM_SCRATCH 0x00120000 6082#define BNX2_COM_SCRATCH 0x00120000
6079 6083
6084#define BNX2_FW_RX_LOW_LATENCY 0x00120058
6080#define BNX2_FW_RX_DROP_COUNT 0x00120084 6085#define BNX2_FW_RX_DROP_COUNT 0x00120084
6081 6086
6082 6087
@@ -6497,8 +6502,8 @@ struct l2_fhdr {
6497#define TX_DESC_CNT (BCM_PAGE_SIZE / sizeof(struct tx_bd)) 6502#define TX_DESC_CNT (BCM_PAGE_SIZE / sizeof(struct tx_bd))
6498#define MAX_TX_DESC_CNT (TX_DESC_CNT - 1) 6503#define MAX_TX_DESC_CNT (TX_DESC_CNT - 1)
6499 6504
6500#define MAX_RX_RINGS 4 6505#define MAX_RX_RINGS 8
6501#define MAX_RX_PG_RINGS 16 6506#define MAX_RX_PG_RINGS 32
6502#define RX_DESC_CNT (BCM_PAGE_SIZE / sizeof(struct rx_bd)) 6507#define RX_DESC_CNT (BCM_PAGE_SIZE / sizeof(struct rx_bd))
6503#define MAX_RX_DESC_CNT (RX_DESC_CNT - 1) 6508#define MAX_RX_DESC_CNT (RX_DESC_CNT - 1)
6504#define MAX_TOTAL_RX_DESC_CNT (MAX_RX_DESC_CNT * MAX_RX_RINGS) 6509#define MAX_TOTAL_RX_DESC_CNT (MAX_RX_DESC_CNT * MAX_RX_RINGS)
@@ -6737,10 +6742,6 @@ struct bnx2 {
6737 6742
6738 struct bnx2_napi bnx2_napi[BNX2_MAX_MSIX_VEC]; 6743 struct bnx2_napi bnx2_napi[BNX2_MAX_MSIX_VEC];
6739 6744
6740#ifdef BCM_VLAN
6741 struct vlan_group *vlgrp;
6742#endif
6743
6744 u32 rx_buf_use_size; /* useable size */ 6745 u32 rx_buf_use_size; /* useable size */
6745 u32 rx_buf_size; /* with alignment */ 6746 u32 rx_buf_size; /* with alignment */
6746 u32 rx_copy_thresh; 6747 u32 rx_copy_thresh;
diff --git a/drivers/net/bnx2x/bnx2x.h b/drivers/net/bnx2x/bnx2x.h
index 64329c5fbdea..9571ecf48f35 100644
--- a/drivers/net/bnx2x/bnx2x.h
+++ b/drivers/net/bnx2x/bnx2x.h
@@ -20,26 +20,20 @@
20 * (you will need to reboot afterwards) */ 20 * (you will need to reboot afterwards) */
21/* #define BNX2X_STOP_ON_ERROR */ 21/* #define BNX2X_STOP_ON_ERROR */
22 22
23#define DRV_MODULE_VERSION "1.52.53-7" 23#define DRV_MODULE_VERSION "1.60.00-3"
24#define DRV_MODULE_RELDATE "2010/09/12" 24#define DRV_MODULE_RELDATE "2010/10/19"
25#define BNX2X_BC_VER 0x040200 25#define BNX2X_BC_VER 0x040200
26 26
27#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
28#define BCM_VLAN 1
29#endif
30
31#define BNX2X_MULTI_QUEUE 27#define BNX2X_MULTI_QUEUE
32 28
33#define BNX2X_NEW_NAPI 29#define BNX2X_NEW_NAPI
34 30
35 31
36
37#if defined(CONFIG_CNIC) || defined(CONFIG_CNIC_MODULE) 32#if defined(CONFIG_CNIC) || defined(CONFIG_CNIC_MODULE)
38#define BCM_CNIC 1 33#define BCM_CNIC 1
39#include "../cnic_if.h" 34#include "../cnic_if.h"
40#endif 35#endif
41 36
42
43#ifdef BCM_CNIC 37#ifdef BCM_CNIC
44#define BNX2X_MIN_MSIX_VEC_CNT 3 38#define BNX2X_MIN_MSIX_VEC_CNT 3
45#define BNX2X_MSIX_VEC_FP_START 2 39#define BNX2X_MSIX_VEC_FP_START 2
@@ -129,16 +123,18 @@ void bnx2x_panic_dump(struct bnx2x *bp);
129 } while (0) 123 } while (0)
130#endif 124#endif
131 125
126#define bnx2x_mc_addr(ha) ((ha)->addr)
132 127
133#define U64_LO(x) (u32)(((u64)(x)) & 0xffffffff) 128#define U64_LO(x) (u32)(((u64)(x)) & 0xffffffff)
134#define U64_HI(x) (u32)(((u64)(x)) >> 32) 129#define U64_HI(x) (u32)(((u64)(x)) >> 32)
135#define HILO_U64(hi, lo) ((((u64)(hi)) << 32) + (lo)) 130#define HILO_U64(hi, lo) ((((u64)(hi)) << 32) + (lo))
136 131
137 132
138#define REG_ADDR(bp, offset) (bp->regview + offset) 133#define REG_ADDR(bp, offset) ((bp->regview) + (offset))
139 134
140#define REG_RD(bp, offset) readl(REG_ADDR(bp, offset)) 135#define REG_RD(bp, offset) readl(REG_ADDR(bp, offset))
141#define REG_RD8(bp, offset) readb(REG_ADDR(bp, offset)) 136#define REG_RD8(bp, offset) readb(REG_ADDR(bp, offset))
137#define REG_RD16(bp, offset) readw(REG_ADDR(bp, offset))
142 138
143#define REG_WR(bp, offset, val) writel((u32)val, REG_ADDR(bp, offset)) 139#define REG_WR(bp, offset, val) writel((u32)val, REG_ADDR(bp, offset))
144#define REG_WR8(bp, offset, val) writeb((u8)val, REG_ADDR(bp, offset)) 140#define REG_WR8(bp, offset, val) writeb((u8)val, REG_ADDR(bp, offset))
@@ -160,6 +156,9 @@ void bnx2x_panic_dump(struct bnx2x *bp);
160 offset, len32); \ 156 offset, len32); \
161 } while (0) 157 } while (0)
162 158
159#define REG_WR_DMAE_LEN(bp, offset, valp, len32) \
160 REG_WR_DMAE(bp, offset, valp, len32)
161
163#define VIRT_WR_DMAE_LEN(bp, data, addr, len32, le32_swap) \ 162#define VIRT_WR_DMAE_LEN(bp, data, addr, len32, le32_swap) \
164 do { \ 163 do { \
165 memcpy(GUNZIP_BUF(bp), data, (len32) * 4); \ 164 memcpy(GUNZIP_BUF(bp), data, (len32) * 4); \
@@ -175,16 +174,59 @@ void bnx2x_panic_dump(struct bnx2x *bp);
175 offsetof(struct shmem2_region, field)) 174 offsetof(struct shmem2_region, field))
176#define SHMEM2_RD(bp, field) REG_RD(bp, SHMEM2_ADDR(bp, field)) 175#define SHMEM2_RD(bp, field) REG_RD(bp, SHMEM2_ADDR(bp, field))
177#define SHMEM2_WR(bp, field, val) REG_WR(bp, SHMEM2_ADDR(bp, field), val) 176#define SHMEM2_WR(bp, field, val) REG_WR(bp, SHMEM2_ADDR(bp, field), val)
177#define MF_CFG_ADDR(bp, field) (bp->common.mf_cfg_base + \
178 offsetof(struct mf_cfg, field))
179#define MF2_CFG_ADDR(bp, field) (bp->common.mf2_cfg_base + \
180 offsetof(struct mf2_cfg, field))
178 181
179#define MF_CFG_RD(bp, field) SHMEM_RD(bp, mf_cfg.field) 182#define MF_CFG_RD(bp, field) REG_RD(bp, MF_CFG_ADDR(bp, field))
180#define MF_CFG_WR(bp, field, val) SHMEM_WR(bp, mf_cfg.field, val) 183#define MF_CFG_WR(bp, field, val) REG_WR(bp,\
184 MF_CFG_ADDR(bp, field), (val))
185#define MF2_CFG_RD(bp, field) REG_RD(bp, MF2_CFG_ADDR(bp, field))
186
187#define SHMEM2_HAS(bp, field) ((bp)->common.shmem2_base && \
188 (SHMEM2_RD((bp), size) > \
189 offsetof(struct shmem2_region, field)))
181 190
182#define EMAC_RD(bp, reg) REG_RD(bp, emac_base + reg) 191#define EMAC_RD(bp, reg) REG_RD(bp, emac_base + reg)
183#define EMAC_WR(bp, reg, val) REG_WR(bp, emac_base + reg, val) 192#define EMAC_WR(bp, reg, val) REG_WR(bp, emac_base + reg, val)
184 193
194/* SP SB indices */
195
196/* General SP events - stats query, cfc delete, etc */
197#define HC_SP_INDEX_ETH_DEF_CONS 3
198
199/* EQ completions */
200#define HC_SP_INDEX_EQ_CONS 7
201
202/* iSCSI L2 */
203#define HC_SP_INDEX_ETH_ISCSI_CQ_CONS 5
204#define HC_SP_INDEX_ETH_ISCSI_RX_CQ_CONS 1
205
206/**
207 * CIDs and CLIDs:
208 * CLIDs below is a CLID for func 0, then the CLID for other
209 * functions will be calculated by the formula:
210 *
211 * FUNC_N_CLID_X = N * NUM_SPECIAL_CLIENTS + FUNC_0_CLID_X
212 *
213 */
214/* iSCSI L2 */
215#define BNX2X_ISCSI_ETH_CL_ID 17
216#define BNX2X_ISCSI_ETH_CID 17
217
218/** Additional rings budgeting */
219#ifdef BCM_CNIC
220#define CNIC_CONTEXT_USE 1
221#else
222#define CNIC_CONTEXT_USE 0
223#endif /* BCM_CNIC */
224
185#define AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR \ 225#define AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR \
186 AEU_INPUTS_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR 226 AEU_INPUTS_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR
187 227
228#define SM_RX_ID 0
229#define SM_TX_ID 1
188 230
189/* fast path */ 231/* fast path */
190 232
@@ -254,11 +296,24 @@ union db_prod {
254#define RX_SGE_MASK_LEN_MASK (RX_SGE_MASK_LEN - 1) 296#define RX_SGE_MASK_LEN_MASK (RX_SGE_MASK_LEN - 1)
255#define NEXT_SGE_MASK_ELEM(el) (((el) + 1) & RX_SGE_MASK_LEN_MASK) 297#define NEXT_SGE_MASK_ELEM(el) (((el) + 1) & RX_SGE_MASK_LEN_MASK)
256 298
299union host_hc_status_block {
300 /* pointer to fp status block e1x */
301 struct host_hc_status_block_e1x *e1x_sb;
302 /* pointer to fp status block e2 */
303 struct host_hc_status_block_e2 *e2_sb;
304};
257 305
258struct bnx2x_fastpath { 306struct bnx2x_fastpath {
259 307
308#define BNX2X_NAPI_WEIGHT 128
260 struct napi_struct napi; 309 struct napi_struct napi;
261 struct host_status_block *status_blk; 310 union host_hc_status_block status_blk;
311 /* chip independed shortcuts into sb structure */
312 __le16 *sb_index_values;
313 __le16 *sb_running_index;
314 /* chip independed shortcut into rx_prods_offset memory */
315 u32 ustorm_rx_prods_offset;
316
262 dma_addr_t status_blk_mapping; 317 dma_addr_t status_blk_mapping;
263 318
264 struct sw_tx_bd *tx_buf_ring; 319 struct sw_tx_bd *tx_buf_ring;
@@ -288,10 +343,15 @@ struct bnx2x_fastpath {
288#define BNX2X_FP_STATE_OPEN 0xa0000 343#define BNX2X_FP_STATE_OPEN 0xa0000
289#define BNX2X_FP_STATE_HALTING 0xb0000 344#define BNX2X_FP_STATE_HALTING 0xb0000
290#define BNX2X_FP_STATE_HALTED 0xc0000 345#define BNX2X_FP_STATE_HALTED 0xc0000
346#define BNX2X_FP_STATE_TERMINATING 0xd0000
347#define BNX2X_FP_STATE_TERMINATED 0xe0000
291 348
292 u8 index; /* number in fp array */ 349 u8 index; /* number in fp array */
293 u8 cl_id; /* eth client id */ 350 u8 cl_id; /* eth client id */
294 u8 sb_id; /* status block number in HW */ 351 u8 cl_qzone_id;
352 u8 fw_sb_id; /* status block number in FW */
353 u8 igu_sb_id; /* status block number in HW */
354 u32 cid;
295 355
296 union db_prod tx_db; 356 union db_prod tx_db;
297 357
@@ -301,8 +361,7 @@ struct bnx2x_fastpath {
301 u16 tx_bd_cons; 361 u16 tx_bd_cons;
302 __le16 *tx_cons_sb; 362 __le16 *tx_cons_sb;
303 363
304 __le16 fp_c_idx; 364 __le16 fp_hc_idx;
305 __le16 fp_u_idx;
306 365
307 u16 rx_bd_prod; 366 u16 rx_bd_prod;
308 u16 rx_bd_cons; 367 u16 rx_bd_cons;
@@ -312,8 +371,6 @@ struct bnx2x_fastpath {
312 /* The last maximal completed SGE */ 371 /* The last maximal completed SGE */
313 u16 last_max_sge; 372 u16 last_max_sge;
314 __le16 *rx_cons_sb; 373 __le16 *rx_cons_sb;
315 __le16 *rx_bd_cons_sb;
316
317 374
318 unsigned long tx_pkt, 375 unsigned long tx_pkt,
319 rx_pkt, 376 rx_pkt,
@@ -356,6 +413,8 @@ struct bnx2x_fastpath {
356#define NUM_TX_BD (TX_DESC_CNT * NUM_TX_RINGS) 413#define NUM_TX_BD (TX_DESC_CNT * NUM_TX_RINGS)
357#define MAX_TX_BD (NUM_TX_BD - 1) 414#define MAX_TX_BD (NUM_TX_BD - 1)
358#define MAX_TX_AVAIL (MAX_TX_DESC_CNT * NUM_TX_RINGS - 2) 415#define MAX_TX_AVAIL (MAX_TX_DESC_CNT * NUM_TX_RINGS - 2)
416#define INIT_JUMBO_TX_RING_SIZE MAX_TX_AVAIL
417#define INIT_TX_RING_SIZE MAX_TX_AVAIL
359#define NEXT_TX_IDX(x) ((((x) & MAX_TX_DESC_CNT) == \ 418#define NEXT_TX_IDX(x) ((((x) & MAX_TX_DESC_CNT) == \
360 (MAX_TX_DESC_CNT - 1)) ? (x) + 2 : (x) + 1) 419 (MAX_TX_DESC_CNT - 1)) ? (x) + 2 : (x) + 1)
361#define TX_BD(x) ((x) & MAX_TX_BD) 420#define TX_BD(x) ((x) & MAX_TX_BD)
@@ -370,6 +429,8 @@ struct bnx2x_fastpath {
370#define MAX_RX_BD (NUM_RX_BD - 1) 429#define MAX_RX_BD (NUM_RX_BD - 1)
371#define MAX_RX_AVAIL (MAX_RX_DESC_CNT * NUM_RX_RINGS - 2) 430#define MAX_RX_AVAIL (MAX_RX_DESC_CNT * NUM_RX_RINGS - 2)
372#define MIN_RX_AVAIL 128 431#define MIN_RX_AVAIL 128
432#define INIT_JUMBO_RX_RING_SIZE MAX_RX_AVAIL
433#define INIT_RX_RING_SIZE MAX_RX_AVAIL
373#define NEXT_RX_IDX(x) ((((x) & RX_DESC_MASK) == \ 434#define NEXT_RX_IDX(x) ((((x) & RX_DESC_MASK) == \
374 (MAX_RX_DESC_CNT - 1)) ? (x) + 3 : (x) + 1) 435 (MAX_RX_DESC_CNT - 1)) ? (x) + 3 : (x) + 1)
375#define RX_BD(x) ((x) & MAX_RX_BD) 436#define RX_BD(x) ((x) & MAX_RX_BD)
@@ -420,11 +481,12 @@ struct bnx2x_fastpath {
420 le32_to_cpu((bd)->addr_lo)) 481 le32_to_cpu((bd)->addr_lo))
421#define BD_UNMAP_LEN(bd) (le16_to_cpu((bd)->nbytes)) 482#define BD_UNMAP_LEN(bd) (le16_to_cpu((bd)->nbytes))
422 483
423 484#define BNX2X_DB_MIN_SHIFT 3 /* 8 bytes */
485#define BNX2X_DB_SHIFT 7 /* 128 bytes*/
424#define DPM_TRIGER_TYPE 0x40 486#define DPM_TRIGER_TYPE 0x40
425#define DOORBELL(bp, cid, val) \ 487#define DOORBELL(bp, cid, val) \
426 do { \ 488 do { \
427 writel((u32)(val), bp->doorbells + (BCM_PAGE_SIZE * (cid)) + \ 489 writel((u32)(val), bp->doorbells + (bp->db_size * (cid)) + \
428 DPM_TRIGER_TYPE); \ 490 DPM_TRIGER_TYPE); \
429 } while (0) 491 } while (0)
430 492
@@ -482,31 +544,15 @@ struct bnx2x_fastpath {
482#define BNX2X_RX_SUM_FIX(cqe) \ 544#define BNX2X_RX_SUM_FIX(cqe) \
483 BNX2X_PRS_FLAG_OVERETH_IPV4(cqe->fast_path_cqe.pars_flags.flags) 545 BNX2X_PRS_FLAG_OVERETH_IPV4(cqe->fast_path_cqe.pars_flags.flags)
484 546
485 547#define U_SB_ETH_RX_CQ_INDEX 1
486#define FP_USB_FUNC_OFF (2 + 2*HC_USTORM_SB_NUM_INDICES) 548#define U_SB_ETH_RX_BD_INDEX 2
487#define FP_CSB_FUNC_OFF (2 + 2*HC_CSTORM_SB_NUM_INDICES) 549#define C_SB_ETH_TX_CQ_INDEX 5
488
489#define U_SB_ETH_RX_CQ_INDEX HC_INDEX_U_ETH_RX_CQ_CONS
490#define U_SB_ETH_RX_BD_INDEX HC_INDEX_U_ETH_RX_BD_CONS
491#define C_SB_ETH_TX_CQ_INDEX HC_INDEX_C_ETH_TX_CQ_CONS
492 550
493#define BNX2X_RX_SB_INDEX \ 551#define BNX2X_RX_SB_INDEX \
494 (&fp->status_blk->u_status_block.index_values[U_SB_ETH_RX_CQ_INDEX]) 552 (&fp->sb_index_values[U_SB_ETH_RX_CQ_INDEX])
495
496#define BNX2X_RX_SB_BD_INDEX \
497 (&fp->status_blk->u_status_block.index_values[U_SB_ETH_RX_BD_INDEX])
498
499#define BNX2X_RX_SB_INDEX_NUM \
500 (((U_SB_ETH_RX_CQ_INDEX << \
501 USTORM_ETH_ST_CONTEXT_CONFIG_CQE_SB_INDEX_NUMBER_SHIFT) & \
502 USTORM_ETH_ST_CONTEXT_CONFIG_CQE_SB_INDEX_NUMBER) | \
503 ((U_SB_ETH_RX_BD_INDEX << \
504 USTORM_ETH_ST_CONTEXT_CONFIG_BD_SB_INDEX_NUMBER_SHIFT) & \
505 USTORM_ETH_ST_CONTEXT_CONFIG_BD_SB_INDEX_NUMBER))
506 553
507#define BNX2X_TX_SB_INDEX \ 554#define BNX2X_TX_SB_INDEX \
508 (&fp->status_blk->c_status_block.index_values[C_SB_ETH_TX_CQ_INDEX]) 555 (&fp->sb_index_values[C_SB_ETH_TX_CQ_INDEX])
509
510 556
511/* end of fast path */ 557/* end of fast path */
512 558
@@ -522,12 +568,19 @@ struct bnx2x_common {
522#define CHIP_NUM_57710 0x164e 568#define CHIP_NUM_57710 0x164e
523#define CHIP_NUM_57711 0x164f 569#define CHIP_NUM_57711 0x164f
524#define CHIP_NUM_57711E 0x1650 570#define CHIP_NUM_57711E 0x1650
571#define CHIP_NUM_57712 0x1662
572#define CHIP_NUM_57712E 0x1663
525#define CHIP_IS_E1(bp) (CHIP_NUM(bp) == CHIP_NUM_57710) 573#define CHIP_IS_E1(bp) (CHIP_NUM(bp) == CHIP_NUM_57710)
526#define CHIP_IS_57711(bp) (CHIP_NUM(bp) == CHIP_NUM_57711) 574#define CHIP_IS_57711(bp) (CHIP_NUM(bp) == CHIP_NUM_57711)
527#define CHIP_IS_57711E(bp) (CHIP_NUM(bp) == CHIP_NUM_57711E) 575#define CHIP_IS_57711E(bp) (CHIP_NUM(bp) == CHIP_NUM_57711E)
576#define CHIP_IS_57712(bp) (CHIP_NUM(bp) == CHIP_NUM_57712)
577#define CHIP_IS_57712E(bp) (CHIP_NUM(bp) == CHIP_NUM_57712E)
528#define CHIP_IS_E1H(bp) (CHIP_IS_57711(bp) || \ 578#define CHIP_IS_E1H(bp) (CHIP_IS_57711(bp) || \
529 CHIP_IS_57711E(bp)) 579 CHIP_IS_57711E(bp))
530#define IS_E1H_OFFSET CHIP_IS_E1H(bp) 580#define CHIP_IS_E2(bp) (CHIP_IS_57712(bp) || \
581 CHIP_IS_57712E(bp))
582#define CHIP_IS_E1x(bp) (CHIP_IS_E1((bp)) || CHIP_IS_E1H((bp)))
583#define IS_E1H_OFFSET (CHIP_IS_E1H(bp) || CHIP_IS_E2(bp))
531 584
532#define CHIP_REV(bp) (bp->common.chip_id & 0x0000f000) 585#define CHIP_REV(bp) (bp->common.chip_id & 0x0000f000)
533#define CHIP_REV_Ax 0x00000000 586#define CHIP_REV_Ax 0x00000000
@@ -553,12 +606,34 @@ struct bnx2x_common {
553 606
554 u32 shmem_base; 607 u32 shmem_base;
555 u32 shmem2_base; 608 u32 shmem2_base;
609 u32 mf_cfg_base;
610 u32 mf2_cfg_base;
556 611
557 u32 hw_config; 612 u32 hw_config;
558 613
559 u32 bc_ver; 614 u32 bc_ver;
615
616 u8 int_block;
617#define INT_BLOCK_HC 0
618#define INT_BLOCK_IGU 1
619#define INT_BLOCK_MODE_NORMAL 0
620#define INT_BLOCK_MODE_BW_COMP 2
621#define CHIP_INT_MODE_IS_NBC(bp) \
622 (CHIP_IS_E2(bp) && \
623 !((bp)->common.int_block & INT_BLOCK_MODE_BW_COMP))
624#define CHIP_INT_MODE_IS_BC(bp) (!CHIP_INT_MODE_IS_NBC(bp))
625
626 u8 chip_port_mode;
627#define CHIP_4_PORT_MODE 0x0
628#define CHIP_2_PORT_MODE 0x1
629#define CHIP_PORT_MODE_NONE 0x2
630#define CHIP_MODE(bp) (bp->common.chip_port_mode)
631#define CHIP_MODE_IS_4_PORT(bp) (CHIP_MODE(bp) == CHIP_4_PORT_MODE)
560}; 632};
561 633
634/* IGU MSIX STATISTICS on 57712: 64 for VFs; 4 for PFs; 4 for Attentions */
635#define BNX2X_IGU_STAS_MSG_VF_CNT 64
636#define BNX2X_IGU_STAS_MSG_PF_CNT 4
562 637
563/* end of common */ 638/* end of common */
564 639
@@ -590,27 +665,98 @@ struct bnx2x_port {
590 665
591/* end of port */ 666/* end of port */
592 667
668/* e1h Classification CAM line allocations */
669enum {
670 CAM_ETH_LINE = 0,
671 CAM_ISCSI_ETH_LINE,
672 CAM_MAX_PF_LINE = CAM_ISCSI_ETH_LINE
673};
593 674
675#define BNX2X_VF_ID_INVALID 0xFF
594 676
595#ifdef BCM_CNIC 677/*
596#define MAX_CONTEXT 15 678 * The total number of L2 queues, MSIX vectors and HW contexts (CIDs) is
597#else 679 * control by the number of fast-path status blocks supported by the
598#define MAX_CONTEXT 16 680 * device (HW/FW). Each fast-path status block (FP-SB) aka non-default
599#endif 681 * status block represents an independent interrupts context that can
682 * serve a regular L2 networking queue. However special L2 queues such
683 * as the FCoE queue do not require a FP-SB and other components like
684 * the CNIC may consume FP-SB reducing the number of possible L2 queues
685 *
686 * If the maximum number of FP-SB available is X then:
687 * a. If CNIC is supported it consumes 1 FP-SB thus the max number of
688 * regular L2 queues is Y=X-1
689 * b. in MF mode the actual number of L2 queues is Y= (X-1/MF_factor)
690 * c. If the FCoE L2 queue is supported the actual number of L2 queues
691 * is Y+1
692 * d. The number of irqs (MSIX vectors) is either Y+1 (one extra for
693 * slow-path interrupts) or Y+2 if CNIC is supported (one additional
694 * FP interrupt context for the CNIC).
695 * e. The number of HW context (CID count) is always X or X+1 if FCoE
696 * L2 queue is supported. the cid for the FCoE L2 queue is always X.
697 */
698
699#define FP_SB_MAX_E1x 16 /* fast-path interrupt contexts E1x */
700#define FP_SB_MAX_E2 16 /* fast-path interrupt contexts E2 */
701
702/*
703 * cid_cnt paramter below refers to the value returned by
704 * 'bnx2x_get_l2_cid_count()' routine
705 */
706
707/*
708 * The number of FP context allocated by the driver == max number of regular
709 * L2 queues + 1 for the FCoE L2 queue
710 */
711#define L2_FP_COUNT(cid_cnt) ((cid_cnt) - CNIC_CONTEXT_USE)
600 712
601union cdu_context { 713union cdu_context {
602 struct eth_context eth; 714 struct eth_context eth;
603 char pad[1024]; 715 char pad[1024];
604}; 716};
605 717
718/* CDU host DB constants */
719#define CDU_ILT_PAGE_SZ_HW 3
720#define CDU_ILT_PAGE_SZ (4096 << CDU_ILT_PAGE_SZ_HW) /* 32K */
721#define ILT_PAGE_CIDS (CDU_ILT_PAGE_SZ / sizeof(union cdu_context))
722
723#ifdef BCM_CNIC
724#define CNIC_ISCSI_CID_MAX 256
725#define CNIC_CID_MAX (CNIC_ISCSI_CID_MAX)
726#define CNIC_ILT_LINES DIV_ROUND_UP(CNIC_CID_MAX, ILT_PAGE_CIDS)
727#endif
728
729#define QM_ILT_PAGE_SZ_HW 3
730#define QM_ILT_PAGE_SZ (4096 << QM_ILT_PAGE_SZ_HW) /* 32K */
731#define QM_CID_ROUND 1024
732
733#ifdef BCM_CNIC
734/* TM (timers) host DB constants */
735#define TM_ILT_PAGE_SZ_HW 2
736#define TM_ILT_PAGE_SZ (4096 << TM_ILT_PAGE_SZ_HW) /* 16K */
737/* #define TM_CONN_NUM (CNIC_STARTING_CID+CNIC_ISCSI_CXT_MAX) */
738#define TM_CONN_NUM 1024
739#define TM_ILT_SZ (8 * TM_CONN_NUM)
740#define TM_ILT_LINES DIV_ROUND_UP(TM_ILT_SZ, TM_ILT_PAGE_SZ)
741
742/* SRC (Searcher) host DB constants */
743#define SRC_ILT_PAGE_SZ_HW 3
744#define SRC_ILT_PAGE_SZ (4096 << SRC_ILT_PAGE_SZ_HW) /* 32K */
745#define SRC_HASH_BITS 10
746#define SRC_CONN_NUM (1 << SRC_HASH_BITS) /* 1024 */
747#define SRC_ILT_SZ (sizeof(struct src_ent) * SRC_CONN_NUM)
748#define SRC_T2_SZ SRC_ILT_SZ
749#define SRC_ILT_LINES DIV_ROUND_UP(SRC_ILT_SZ, SRC_ILT_PAGE_SZ)
750#endif
751
606#define MAX_DMAE_C 8 752#define MAX_DMAE_C 8
607 753
608/* DMA memory not used in fastpath */ 754/* DMA memory not used in fastpath */
609struct bnx2x_slowpath { 755struct bnx2x_slowpath {
610 union cdu_context context[MAX_CONTEXT];
611 struct eth_stats_query fw_stats; 756 struct eth_stats_query fw_stats;
612 struct mac_configuration_cmd mac_config; 757 struct mac_configuration_cmd mac_config;
613 struct mac_configuration_cmd mcast_config; 758 struct mac_configuration_cmd mcast_config;
759 struct client_init_ramrod_data client_init_data;
614 760
615 /* used by dmae command executer */ 761 /* used by dmae command executer */
616 struct dmae_command dmae[MAX_DMAE_C]; 762 struct dmae_command dmae[MAX_DMAE_C];
@@ -635,52 +781,83 @@ struct bnx2x_slowpath {
635#define MAX_DYNAMIC_ATTN_GRPS 8 781#define MAX_DYNAMIC_ATTN_GRPS 8
636 782
637struct attn_route { 783struct attn_route {
638 u32 sig[4]; 784 u32 sig[5];
639}; 785};
640 786
787struct iro {
788 u32 base;
789 u16 m1;
790 u16 m2;
791 u16 m3;
792 u16 size;
793};
794
795struct hw_context {
796 union cdu_context *vcxt;
797 dma_addr_t cxt_mapping;
798 size_t size;
799};
800
801/* forward */
802struct bnx2x_ilt;
803
641typedef enum { 804typedef enum {
642 BNX2X_RECOVERY_DONE, 805 BNX2X_RECOVERY_DONE,
643 BNX2X_RECOVERY_INIT, 806 BNX2X_RECOVERY_INIT,
644 BNX2X_RECOVERY_WAIT, 807 BNX2X_RECOVERY_WAIT,
645} bnx2x_recovery_state_t; 808} bnx2x_recovery_state_t;
646 809
810/**
811 * Event queue (EQ or event ring) MC hsi
812 * NUM_EQ_PAGES and EQ_DESC_CNT_PAGE must be power of 2
813 */
814#define NUM_EQ_PAGES 1
815#define EQ_DESC_CNT_PAGE (BCM_PAGE_SIZE / sizeof(union event_ring_elem))
816#define EQ_DESC_MAX_PAGE (EQ_DESC_CNT_PAGE - 1)
817#define NUM_EQ_DESC (EQ_DESC_CNT_PAGE * NUM_EQ_PAGES)
818#define EQ_DESC_MASK (NUM_EQ_DESC - 1)
819#define MAX_EQ_AVAIL (EQ_DESC_MAX_PAGE * NUM_EQ_PAGES - 2)
820
821/* depends on EQ_DESC_CNT_PAGE being a power of 2 */
822#define NEXT_EQ_IDX(x) ((((x) & EQ_DESC_MAX_PAGE) == \
823 (EQ_DESC_MAX_PAGE - 1)) ? (x) + 2 : (x) + 1)
824
825/* depends on the above and on NUM_EQ_PAGES being a power of 2 */
826#define EQ_DESC(x) ((x) & EQ_DESC_MASK)
827
828#define BNX2X_EQ_INDEX \
829 (&bp->def_status_blk->sp_sb.\
830 index_values[HC_SP_INDEX_EQ_CONS])
831
647struct bnx2x { 832struct bnx2x {
648 /* Fields used in the tx and intr/napi performance paths 833 /* Fields used in the tx and intr/napi performance paths
649 * are grouped together in the beginning of the structure 834 * are grouped together in the beginning of the structure
650 */ 835 */
651 struct bnx2x_fastpath fp[MAX_CONTEXT]; 836 struct bnx2x_fastpath *fp;
652 void __iomem *regview; 837 void __iomem *regview;
653 void __iomem *doorbells; 838 void __iomem *doorbells;
654#ifdef BCM_CNIC 839 u16 db_size;
655#define BNX2X_DB_SIZE (18*BCM_PAGE_SIZE)
656#else
657#define BNX2X_DB_SIZE (16*BCM_PAGE_SIZE)
658#endif
659 840
660 struct net_device *dev; 841 struct net_device *dev;
661 struct pci_dev *pdev; 842 struct pci_dev *pdev;
662 843
844 struct iro *iro_arr;
845#define IRO (bp->iro_arr)
846
663 atomic_t intr_sem; 847 atomic_t intr_sem;
664 848
665 bnx2x_recovery_state_t recovery_state; 849 bnx2x_recovery_state_t recovery_state;
666 int is_leader; 850 int is_leader;
667#ifdef BCM_CNIC 851 struct msix_entry *msix_table;
668 struct msix_entry msix_table[MAX_CONTEXT+2];
669#else
670 struct msix_entry msix_table[MAX_CONTEXT+1];
671#endif
672#define INT_MODE_INTx 1 852#define INT_MODE_INTx 1
673#define INT_MODE_MSI 2 853#define INT_MODE_MSI 2
674 854
675 int tx_ring_size; 855 int tx_ring_size;
676 856
677#ifdef BCM_VLAN
678 struct vlan_group *vlgrp;
679#endif
680
681 u32 rx_csum; 857 u32 rx_csum;
682 u32 rx_buf_size; 858 u32 rx_buf_size;
683#define ETH_OVREHEAD (ETH_HLEN + 8) /* 8 for CRC + VLAN */ 859/* L2 header size + 2*VLANs (8 bytes) + LLC SNAP (8 bytes) */
860#define ETH_OVREHEAD (ETH_HLEN + 8 + 8)
684#define ETH_MIN_PACKET_SIZE 60 861#define ETH_MIN_PACKET_SIZE 60
685#define ETH_MAX_PACKET_SIZE 1500 862#define ETH_MAX_PACKET_SIZE 1500
686#define ETH_MAX_JUMBO_PACKET_SIZE 9600 863#define ETH_MAX_JUMBO_PACKET_SIZE 9600
@@ -689,13 +866,12 @@ struct bnx2x {
689#define BNX2X_RX_ALIGN_SHIFT ((L1_CACHE_SHIFT < 8) ? \ 866#define BNX2X_RX_ALIGN_SHIFT ((L1_CACHE_SHIFT < 8) ? \
690 L1_CACHE_SHIFT : 8) 867 L1_CACHE_SHIFT : 8)
691#define BNX2X_RX_ALIGN (1 << BNX2X_RX_ALIGN_SHIFT) 868#define BNX2X_RX_ALIGN (1 << BNX2X_RX_ALIGN_SHIFT)
869#define BNX2X_PXP_DRAM_ALIGN (BNX2X_RX_ALIGN_SHIFT - 5)
692 870
693 struct host_def_status_block *def_status_blk; 871 struct host_sp_status_block *def_status_blk;
694#define DEF_SB_ID 16 872#define DEF_SB_IGU_ID 16
695 __le16 def_c_idx; 873#define DEF_SB_ID HC_SP_SB_ID
696 __le16 def_u_idx; 874 __le16 def_idx;
697 __le16 def_x_idx;
698 __le16 def_t_idx;
699 __le16 def_att_idx; 875 __le16 def_att_idx;
700 u32 attn_state; 876 u32 attn_state;
701 struct attn_route attn_group[MAX_DYNAMIC_ATTN_GRPS]; 877 struct attn_route attn_group[MAX_DYNAMIC_ATTN_GRPS];
@@ -707,10 +883,17 @@ struct bnx2x {
707 struct eth_spe *spq_prod_bd; 883 struct eth_spe *spq_prod_bd;
708 struct eth_spe *spq_last_bd; 884 struct eth_spe *spq_last_bd;
709 __le16 *dsb_sp_prod; 885 __le16 *dsb_sp_prod;
710 u16 spq_left; /* serialize spq */ 886 atomic_t spq_left; /* serialize spq */
711 /* used to synchronize spq accesses */ 887 /* used to synchronize spq accesses */
712 spinlock_t spq_lock; 888 spinlock_t spq_lock;
713 889
890 /* event queue */
891 union event_ring_elem *eq_ring;
892 dma_addr_t eq_mapping;
893 u16 eq_prod;
894 u16 eq_cons;
895 __le16 *eq_cons_sb;
896
714 /* Flags for marking that there is a STAT_QUERY or 897 /* Flags for marking that there is a STAT_QUERY or
715 SET_MAC ramrod pending */ 898 SET_MAC ramrod pending */
716 int stats_pending; 899 int stats_pending;
@@ -729,18 +912,27 @@ struct bnx2x {
729#define USING_DAC_FLAG 0x10 912#define USING_DAC_FLAG 0x10
730#define USING_MSIX_FLAG 0x20 913#define USING_MSIX_FLAG 0x20
731#define USING_MSI_FLAG 0x40 914#define USING_MSI_FLAG 0x40
915
732#define TPA_ENABLE_FLAG 0x80 916#define TPA_ENABLE_FLAG 0x80
733#define NO_MCP_FLAG 0x100 917#define NO_MCP_FLAG 0x100
918#define DISABLE_MSI_FLAG 0x200
734#define BP_NOMCP(bp) (bp->flags & NO_MCP_FLAG) 919#define BP_NOMCP(bp) (bp->flags & NO_MCP_FLAG)
735#define HW_VLAN_TX_FLAG 0x400
736#define HW_VLAN_RX_FLAG 0x800
737#define MF_FUNC_DIS 0x1000 920#define MF_FUNC_DIS 0x1000
738 921
739 int func; 922 int pf_num; /* absolute PF number */
740#define BP_PORT(bp) (bp->func % PORT_MAX) 923 int pfid; /* per-path PF number */
741#define BP_FUNC(bp) (bp->func) 924 int base_fw_ndsb;
742#define BP_E1HVN(bp) (bp->func >> 1) 925#define BP_PATH(bp) (!CHIP_IS_E2(bp) ? \
926 0 : (bp->pf_num & 1))
927#define BP_PORT(bp) (bp->pfid & 1)
928#define BP_FUNC(bp) (bp->pfid)
929#define BP_ABS_FUNC(bp) (bp->pf_num)
930#define BP_E1HVN(bp) (bp->pfid >> 1)
931#define BP_VN(bp) (CHIP_MODE_IS_4_PORT(bp) ? \
932 0 : BP_E1HVN(bp))
743#define BP_L_ID(bp) (BP_E1HVN(bp) << 2) 933#define BP_L_ID(bp) (BP_E1HVN(bp) << 2)
934#define BP_FW_MB_IDX(bp) (BP_PORT(bp) +\
935 BP_VN(bp) * (CHIP_IS_E1x(bp) ? 2 : 1))
744 936
745#ifdef BCM_CNIC 937#ifdef BCM_CNIC
746#define BCM_CNIC_CID_START 16 938#define BCM_CNIC_CID_START 16
@@ -770,10 +962,11 @@ struct bnx2x {
770 struct cmng_struct_per_port cmng; 962 struct cmng_struct_per_port cmng;
771 u32 vn_weight_sum; 963 u32 vn_weight_sum;
772 964
773 u32 mf_config; 965 u32 mf_config[E1HVN_MAX];
774 u16 e1hov; 966 u32 mf2_config[E2_FUNC_MAX];
775 u8 e1hmf; 967 u16 mf_ov;
776#define IS_E1HMF(bp) (bp->e1hmf != 0) 968 u8 mf_mode;
969#define IS_MF(bp) (bp->mf_mode != 0)
777 970
778 u8 wol; 971 u8 wol;
779 972
@@ -801,6 +994,7 @@ struct bnx2x {
801#define BNX2X_STATE_CLOSING_WAIT4_HALT 0x4000 994#define BNX2X_STATE_CLOSING_WAIT4_HALT 0x4000
802#define BNX2X_STATE_CLOSING_WAIT4_DELETE 0x5000 995#define BNX2X_STATE_CLOSING_WAIT4_DELETE 0x5000
803#define BNX2X_STATE_CLOSING_WAIT4_UNLOAD 0x6000 996#define BNX2X_STATE_CLOSING_WAIT4_UNLOAD 0x6000
997#define BNX2X_STATE_FUNC_STARTED 0x7000
804#define BNX2X_STATE_DIAG 0xe000 998#define BNX2X_STATE_DIAG 0xe000
805#define BNX2X_STATE_ERROR 0xf000 999#define BNX2X_STATE_ERROR 0xf000
806 1000
@@ -809,6 +1003,15 @@ struct bnx2x {
809 int disable_tpa; 1003 int disable_tpa;
810 int int_mode; 1004 int int_mode;
811 1005
1006 struct tstorm_eth_mac_filter_config mac_filters;
1007#define BNX2X_ACCEPT_NONE 0x0000
1008#define BNX2X_ACCEPT_UNICAST 0x0001
1009#define BNX2X_ACCEPT_MULTICAST 0x0002
1010#define BNX2X_ACCEPT_ALL_UNICAST 0x0004
1011#define BNX2X_ACCEPT_ALL_MULTICAST 0x0008
1012#define BNX2X_ACCEPT_BROADCAST 0x0010
1013#define BNX2X_PROMISCUOUS_MODE 0x10000
1014
812 u32 rx_mode; 1015 u32 rx_mode;
813#define BNX2X_RX_MODE_NONE 0 1016#define BNX2X_RX_MODE_NONE 0
814#define BNX2X_RX_MODE_NORMAL 1 1017#define BNX2X_RX_MODE_NORMAL 1
@@ -817,34 +1020,41 @@ struct bnx2x {
817#define BNX2X_MAX_MULTICAST 64 1020#define BNX2X_MAX_MULTICAST 64
818#define BNX2X_MAX_EMUL_MULTI 16 1021#define BNX2X_MAX_EMUL_MULTI 16
819 1022
820 u32 rx_mode_cl_mask; 1023 u8 igu_dsb_id;
821 1024 u8 igu_base_sb;
1025 u8 igu_sb_cnt;
822 dma_addr_t def_status_blk_mapping; 1026 dma_addr_t def_status_blk_mapping;
823 1027
824 struct bnx2x_slowpath *slowpath; 1028 struct bnx2x_slowpath *slowpath;
825 dma_addr_t slowpath_mapping; 1029 dma_addr_t slowpath_mapping;
1030 struct hw_context context;
1031
1032 struct bnx2x_ilt *ilt;
1033#define BP_ILT(bp) ((bp)->ilt)
1034#define ILT_MAX_LINES 128
1035
1036 int l2_cid_count;
1037#define L2_ILT_LINES(bp) (DIV_ROUND_UP((bp)->l2_cid_count, \
1038 ILT_PAGE_CIDS))
1039#define BNX2X_DB_SIZE(bp) ((bp)->l2_cid_count * (1 << BNX2X_DB_SHIFT))
1040
1041 int qm_cid_count;
826 1042
827 int dropless_fc; 1043 int dropless_fc;
828 1044
829#ifdef BCM_CNIC 1045#ifdef BCM_CNIC
830 u32 cnic_flags; 1046 u32 cnic_flags;
831#define BNX2X_CNIC_FLAG_MAC_SET 1 1047#define BNX2X_CNIC_FLAG_MAC_SET 1
832
833 void *t1;
834 dma_addr_t t1_mapping;
835 void *t2; 1048 void *t2;
836 dma_addr_t t2_mapping; 1049 dma_addr_t t2_mapping;
837 void *timers;
838 dma_addr_t timers_mapping;
839 void *qm;
840 dma_addr_t qm_mapping;
841 struct cnic_ops *cnic_ops; 1050 struct cnic_ops *cnic_ops;
842 void *cnic_data; 1051 void *cnic_data;
843 u32 cnic_tag; 1052 u32 cnic_tag;
844 struct cnic_eth_dev cnic_eth_dev; 1053 struct cnic_eth_dev cnic_eth_dev;
845 struct host_status_block *cnic_sb; 1054 union host_hc_status_block cnic_sb;
846 dma_addr_t cnic_sb_mapping; 1055 dma_addr_t cnic_sb_mapping;
847#define CNIC_SB_ID(bp) BP_L_ID(bp) 1056#define CNIC_SB_ID(bp) ((bp)->base_fw_ndsb + BP_L_ID(bp))
1057#define CNIC_IGU_SB_ID(bp) ((bp)->igu_base_sb)
848 struct eth_spe *cnic_kwq; 1058 struct eth_spe *cnic_kwq;
849 struct eth_spe *cnic_kwq_prod; 1059 struct eth_spe *cnic_kwq_prod;
850 struct eth_spe *cnic_kwq_cons; 1060 struct eth_spe *cnic_kwq_cons;
@@ -914,33 +1124,197 @@ struct bnx2x {
914 const struct firmware *firmware; 1124 const struct firmware *firmware;
915}; 1125};
916 1126
1127/**
1128 * Init queue/func interface
1129 */
1130/* queue init flags */
1131#define QUEUE_FLG_TPA 0x0001
1132#define QUEUE_FLG_CACHE_ALIGN 0x0002
1133#define QUEUE_FLG_STATS 0x0004
1134#define QUEUE_FLG_OV 0x0008
1135#define QUEUE_FLG_VLAN 0x0010
1136#define QUEUE_FLG_COS 0x0020
1137#define QUEUE_FLG_HC 0x0040
1138#define QUEUE_FLG_DHC 0x0080
1139#define QUEUE_FLG_OOO 0x0100
1140
1141#define QUEUE_DROP_IP_CS_ERR TSTORM_ETH_CLIENT_CONFIG_DROP_IP_CS_ERR
1142#define QUEUE_DROP_TCP_CS_ERR TSTORM_ETH_CLIENT_CONFIG_DROP_TCP_CS_ERR
1143#define QUEUE_DROP_TTL0 TSTORM_ETH_CLIENT_CONFIG_DROP_TTL0
1144#define QUEUE_DROP_UDP_CS_ERR TSTORM_ETH_CLIENT_CONFIG_DROP_UDP_CS_ERR
1145
1146
1147
1148/* rss capabilities */
1149#define RSS_IPV4_CAP 0x0001
1150#define RSS_IPV4_TCP_CAP 0x0002
1151#define RSS_IPV6_CAP 0x0004
1152#define RSS_IPV6_TCP_CAP 0x0008
917 1153
918#define BNX2X_MAX_QUEUES(bp) (IS_E1HMF(bp) ? (MAX_CONTEXT/E1HVN_MAX) \
919 : MAX_CONTEXT)
920#define BNX2X_NUM_QUEUES(bp) (bp->num_queues) 1154#define BNX2X_NUM_QUEUES(bp) (bp->num_queues)
921#define is_multi(bp) (BNX2X_NUM_QUEUES(bp) > 1) 1155#define is_multi(bp) (BNX2X_NUM_QUEUES(bp) > 1)
922 1156
1157#define BNX2X_MAX_QUEUES(bp) (bp->igu_sb_cnt - CNIC_CONTEXT_USE)
1158#define is_eth_multi(bp) (BNX2X_NUM_ETH_QUEUES(bp) > 1)
1159
1160#define RSS_IPV4_CAP_MASK \
1161 TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_CAPABILITY
1162
1163#define RSS_IPV4_TCP_CAP_MASK \
1164 TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_TCP_CAPABILITY
1165
1166#define RSS_IPV6_CAP_MASK \
1167 TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_CAPABILITY
1168
1169#define RSS_IPV6_TCP_CAP_MASK \
1170 TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_TCP_CAPABILITY
1171
1172/* func init flags */
1173#define FUNC_FLG_STATS 0x0001
1174#define FUNC_FLG_TPA 0x0002
1175#define FUNC_FLG_SPQ 0x0004
1176#define FUNC_FLG_LEADING 0x0008 /* PF only */
1177
1178struct rxq_pause_params {
1179 u16 bd_th_lo;
1180 u16 bd_th_hi;
1181 u16 rcq_th_lo;
1182 u16 rcq_th_hi;
1183 u16 sge_th_lo; /* valid iff QUEUE_FLG_TPA */
1184 u16 sge_th_hi; /* valid iff QUEUE_FLG_TPA */
1185 u16 pri_map;
1186};
1187
1188struct bnx2x_rxq_init_params {
1189 /* cxt*/
1190 struct eth_context *cxt;
1191
1192 /* dma */
1193 dma_addr_t dscr_map;
1194 dma_addr_t sge_map;
1195 dma_addr_t rcq_map;
1196 dma_addr_t rcq_np_map;
1197
1198 u16 flags;
1199 u16 drop_flags;
1200 u16 mtu;
1201 u16 buf_sz;
1202 u16 fw_sb_id;
1203 u16 cl_id;
1204 u16 spcl_id;
1205 u16 cl_qzone_id;
1206
1207 /* valid iff QUEUE_FLG_STATS */
1208 u16 stat_id;
1209
1210 /* valid iff QUEUE_FLG_TPA */
1211 u16 tpa_agg_sz;
1212 u16 sge_buf_sz;
1213 u16 max_sges_pkt;
1214
1215 /* valid iff QUEUE_FLG_CACHE_ALIGN */
1216 u8 cache_line_log;
1217
1218 u8 sb_cq_index;
1219 u32 cid;
1220
1221 /* desired interrupts per sec. valid iff QUEUE_FLG_HC */
1222 u32 hc_rate;
1223};
1224
1225struct bnx2x_txq_init_params {
1226 /* cxt*/
1227 struct eth_context *cxt;
1228
1229 /* dma */
1230 dma_addr_t dscr_map;
1231
1232 u16 flags;
1233 u16 fw_sb_id;
1234 u8 sb_cq_index;
1235 u8 cos; /* valid iff QUEUE_FLG_COS */
1236 u16 stat_id; /* valid iff QUEUE_FLG_STATS */
1237 u16 traffic_type;
1238 u32 cid;
1239 u16 hc_rate; /* desired interrupts per sec.*/
1240 /* valid iff QUEUE_FLG_HC */
1241
1242};
1243
1244struct bnx2x_client_ramrod_params {
1245 int *pstate;
1246 int state;
1247 u16 index;
1248 u16 cl_id;
1249 u32 cid;
1250 u8 poll;
1251#define CLIENT_IS_LEADING_RSS 0x02
1252 u8 flags;
1253};
1254
1255struct bnx2x_client_init_params {
1256 struct rxq_pause_params pause;
1257 struct bnx2x_rxq_init_params rxq_params;
1258 struct bnx2x_txq_init_params txq_params;
1259 struct bnx2x_client_ramrod_params ramrod_params;
1260};
1261
1262struct bnx2x_rss_params {
1263 int mode;
1264 u16 cap;
1265 u16 result_mask;
1266};
1267
1268struct bnx2x_func_init_params {
1269
1270 /* rss */
1271 struct bnx2x_rss_params *rss; /* valid iff FUNC_FLG_RSS */
1272
1273 /* dma */
1274 dma_addr_t fw_stat_map; /* valid iff FUNC_FLG_STATS */
1275 dma_addr_t spq_map; /* valid iff FUNC_FLG_SPQ */
1276
1277 u16 func_flgs;
1278 u16 func_id; /* abs fid */
1279 u16 pf_id;
1280 u16 spq_prod; /* valid iff FUNC_FLG_SPQ */
1281};
1282
923#define for_each_queue(bp, var) \ 1283#define for_each_queue(bp, var) \
924 for (var = 0; var < BNX2X_NUM_QUEUES(bp); var++) 1284 for (var = 0; var < BNX2X_NUM_QUEUES(bp); var++)
925#define for_each_nondefault_queue(bp, var) \ 1285#define for_each_nondefault_queue(bp, var) \
926 for (var = 1; var < BNX2X_NUM_QUEUES(bp); var++) 1286 for (var = 1; var < BNX2X_NUM_QUEUES(bp); var++)
927 1287
928 1288
1289#define WAIT_RAMROD_POLL 0x01
1290#define WAIT_RAMROD_COMMON 0x02
1291int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
1292 int *state_p, int flags);
1293
1294/* dmae */
929void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32); 1295void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32);
930void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr, 1296void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
931 u32 len32); 1297 u32 len32);
1298void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
1299 u32 addr, u32 len);
1300void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae, int idx);
1301u32 bnx2x_dmae_opcode_add_comp(u32 opcode, u8 comp_type);
1302u32 bnx2x_dmae_opcode_clr_src_reset(u32 opcode);
1303u32 bnx2x_dmae_opcode(struct bnx2x *bp, u8 src_type, u8 dst_type,
1304 bool with_comp, u8 comp_type);
1305
932int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port); 1306int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port);
933int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port); 1307int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port);
934int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port); 1308int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port);
935u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param); 1309u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param);
936void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val); 1310void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val);
937void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr, 1311
938 u32 addr, u32 len);
939void bnx2x_calc_fc_adv(struct bnx2x *bp); 1312void bnx2x_calc_fc_adv(struct bnx2x *bp);
940int bnx2x_sp_post(struct bnx2x *bp, int command, int cid, 1313int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
941 u32 data_hi, u32 data_lo, int common); 1314 u32 data_hi, u32 data_lo, int common);
942void bnx2x_update_coalesce(struct bnx2x *bp); 1315void bnx2x_update_coalesce(struct bnx2x *bp);
943int bnx2x_get_link_cfg_idx(struct bnx2x *bp); 1316int bnx2x_get_link_cfg_idx(struct bnx2x *bp);
1317
944static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms, 1318static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
945 int wait) 1319 int wait)
946{ 1320{
@@ -958,6 +1332,40 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
958 return val; 1332 return val;
959} 1333}
960 1334
1335#define BNX2X_ILT_ZALLOC(x, y, size) \
1336 do { \
1337 x = pci_alloc_consistent(bp->pdev, size, y); \
1338 if (x) \
1339 memset(x, 0, size); \
1340 } while (0)
1341
1342#define BNX2X_ILT_FREE(x, y, size) \
1343 do { \
1344 if (x) { \
1345 pci_free_consistent(bp->pdev, size, x, y); \
1346 x = NULL; \
1347 y = 0; \
1348 } \
1349 } while (0)
1350
1351#define ILOG2(x) (ilog2((x)))
1352
1353#define ILT_NUM_PAGE_ENTRIES (3072)
1354/* In 57710/11 we use whole table since we have 8 func
1355 * In 57712 we have only 4 func, but use same size per func, then only half of
1356 * the table in use
1357 */
1358#define ILT_PER_FUNC (ILT_NUM_PAGE_ENTRIES/8)
1359
1360#define FUNC_ILT_BASE(func) (func * ILT_PER_FUNC)
1361/*
1362 * the phys address is shifted right 12 bits and has an added
1363 * 1=valid bit added to the 53rd bit
1364 * then since this is a wide register(TM)
1365 * we split it into two 32 bit writes
1366 */
1367#define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
1368#define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
961 1369
962/* load/unload mode */ 1370/* load/unload mode */
963#define LOAD_NORMAL 0 1371#define LOAD_NORMAL 0
@@ -965,18 +1373,44 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
965#define LOAD_DIAG 2 1373#define LOAD_DIAG 2
966#define UNLOAD_NORMAL 0 1374#define UNLOAD_NORMAL 0
967#define UNLOAD_CLOSE 1 1375#define UNLOAD_CLOSE 1
968#define UNLOAD_RECOVERY 2 1376#define UNLOAD_RECOVERY 2
969 1377
970 1378
971/* DMAE command defines */ 1379/* DMAE command defines */
972#define DMAE_CMD_SRC_PCI 0 1380#define DMAE_TIMEOUT -1
973#define DMAE_CMD_SRC_GRC DMAE_COMMAND_SRC 1381#define DMAE_PCI_ERROR -2 /* E2 and onward */
1382#define DMAE_NOT_RDY -3
1383#define DMAE_PCI_ERR_FLAG 0x80000000
1384
1385#define DMAE_SRC_PCI 0
1386#define DMAE_SRC_GRC 1
1387
1388#define DMAE_DST_NONE 0
1389#define DMAE_DST_PCI 1
1390#define DMAE_DST_GRC 2
1391
1392#define DMAE_COMP_PCI 0
1393#define DMAE_COMP_GRC 1
1394
1395/* E2 and onward - PCI error handling in the completion */
974 1396
975#define DMAE_CMD_DST_PCI (1 << DMAE_COMMAND_DST_SHIFT) 1397#define DMAE_COMP_REGULAR 0
976#define DMAE_CMD_DST_GRC (2 << DMAE_COMMAND_DST_SHIFT) 1398#define DMAE_COM_SET_ERR 1
977 1399
978#define DMAE_CMD_C_DST_PCI 0 1400#define DMAE_CMD_SRC_PCI (DMAE_SRC_PCI << \
979#define DMAE_CMD_C_DST_GRC (1 << DMAE_COMMAND_C_DST_SHIFT) 1401 DMAE_COMMAND_SRC_SHIFT)
1402#define DMAE_CMD_SRC_GRC (DMAE_SRC_GRC << \
1403 DMAE_COMMAND_SRC_SHIFT)
1404
1405#define DMAE_CMD_DST_PCI (DMAE_DST_PCI << \
1406 DMAE_COMMAND_DST_SHIFT)
1407#define DMAE_CMD_DST_GRC (DMAE_DST_GRC << \
1408 DMAE_COMMAND_DST_SHIFT)
1409
1410#define DMAE_CMD_C_DST_PCI (DMAE_COMP_PCI << \
1411 DMAE_COMMAND_C_DST_SHIFT)
1412#define DMAE_CMD_C_DST_GRC (DMAE_COMP_GRC << \
1413 DMAE_COMMAND_C_DST_SHIFT)
980 1414
981#define DMAE_CMD_C_ENABLE DMAE_COMMAND_C_TYPE_ENABLE 1415#define DMAE_CMD_C_ENABLE DMAE_COMMAND_C_TYPE_ENABLE
982 1416
@@ -992,10 +1426,20 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
992#define DMAE_CMD_DST_RESET DMAE_COMMAND_DST_RESET 1426#define DMAE_CMD_DST_RESET DMAE_COMMAND_DST_RESET
993#define DMAE_CMD_E1HVN_SHIFT DMAE_COMMAND_E1HVN_SHIFT 1427#define DMAE_CMD_E1HVN_SHIFT DMAE_COMMAND_E1HVN_SHIFT
994 1428
1429#define DMAE_SRC_PF 0
1430#define DMAE_SRC_VF 1
1431
1432#define DMAE_DST_PF 0
1433#define DMAE_DST_VF 1
1434
1435#define DMAE_C_SRC 0
1436#define DMAE_C_DST 1
1437
995#define DMAE_LEN32_RD_MAX 0x80 1438#define DMAE_LEN32_RD_MAX 0x80
996#define DMAE_LEN32_WR_MAX(bp) (CHIP_IS_E1(bp) ? 0x400 : 0x2000) 1439#define DMAE_LEN32_WR_MAX(bp) (CHIP_IS_E1(bp) ? 0x400 : 0x2000)
997 1440
998#define DMAE_COMP_VAL 0xe0d0d0ae 1441#define DMAE_COMP_VAL 0x60d0d0ae /* E2 and on - upper bit
1442 indicates eror */
999 1443
1000#define MAX_DMAE_C_PER_PORT 8 1444#define MAX_DMAE_C_PER_PORT 8
1001#define INIT_DMAE_C(bp) (BP_PORT(bp) * MAX_DMAE_C_PER_PORT + \ 1445#define INIT_DMAE_C(bp) (BP_PORT(bp) * MAX_DMAE_C_PER_PORT + \
@@ -1003,7 +1447,6 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
1003#define PMF_DMAE_C(bp) (BP_PORT(bp) * MAX_DMAE_C_PER_PORT + \ 1447#define PMF_DMAE_C(bp) (BP_PORT(bp) * MAX_DMAE_C_PER_PORT + \
1004 E1HVN_MAX) 1448 E1HVN_MAX)
1005 1449
1006
1007/* PCIE link and speed */ 1450/* PCIE link and speed */
1008#define PCICFG_LINK_WIDTH 0x1f00000 1451#define PCICFG_LINK_WIDTH 0x1f00000
1009#define PCICFG_LINK_WIDTH_SHIFT 20 1452#define PCICFG_LINK_WIDTH_SHIFT 20
@@ -1032,7 +1475,7 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
1032#define MAX_SP_DESC_CNT (SP_DESC_CNT - 1) 1475#define MAX_SP_DESC_CNT (SP_DESC_CNT - 1)
1033 1476
1034 1477
1035#define BNX2X_BTR 1 1478#define BNX2X_BTR 4
1036#define MAX_SPQ_PENDING 8 1479#define MAX_SPQ_PENDING 8
1037 1480
1038 1481
@@ -1149,20 +1592,26 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
1149 TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_MODE_SHIFT)) 1592 TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_MODE_SHIFT))
1150#define MULTI_MASK 0x7f 1593#define MULTI_MASK 0x7f
1151 1594
1595#define BNX2X_SP_DSB_INDEX \
1596 (&bp->def_status_blk->sp_sb.\
1597 index_values[HC_SP_INDEX_ETH_DEF_CONS])
1152 1598
1153#define DEF_USB_FUNC_OFF (2 + 2*HC_USTORM_DEF_SB_NUM_INDICES) 1599#define SET_FLAG(value, mask, flag) \
1154#define DEF_CSB_FUNC_OFF (2 + 2*HC_CSTORM_DEF_SB_NUM_INDICES) 1600 do {\
1155#define DEF_XSB_FUNC_OFF (2 + 2*HC_XSTORM_DEF_SB_NUM_INDICES) 1601 (value) &= ~(mask);\
1156#define DEF_TSB_FUNC_OFF (2 + 2*HC_TSTORM_DEF_SB_NUM_INDICES) 1602 (value) |= ((flag) << (mask##_SHIFT));\
1603 } while (0)
1157 1604
1158#define C_DEF_SB_SP_INDEX HC_INDEX_DEF_C_ETH_SLOW_PATH 1605#define GET_FLAG(value, mask) \
1159 1606 (((value) &= (mask)) >> (mask##_SHIFT))
1160#define BNX2X_SP_DSB_INDEX \
1161(&bp->def_status_blk->c_def_status_block.index_values[C_DEF_SB_SP_INDEX])
1162 1607
1608#define GET_FIELD(value, fname) \
1609 (((value) & (fname##_MASK)) >> (fname##_SHIFT))
1163 1610
1164#define CAM_IS_INVALID(x) \ 1611#define CAM_IS_INVALID(x) \
1165(x.target_table_entry.flags == TSTORM_CAM_TARGET_TABLE_ENTRY_ACTION_TYPE) 1612 (GET_FLAG(x.flags, \
1613 MAC_CONFIGURATION_ENTRY_ACTION_TYPE) == \
1614 (T_ETH_MAC_COMMAND_INVALIDATE))
1166 1615
1167#define CAM_INVALIDATE(x) \ 1616#define CAM_INVALIDATE(x) \
1168 (x.target_table_entry.flags = TSTORM_CAM_TARGET_TABLE_ENTRY_ACTION_TYPE) 1617 (x.target_table_entry.flags = TSTORM_CAM_TARGET_TABLE_ENTRY_ACTION_TYPE)
@@ -1178,21 +1627,29 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
1178#define PXP2_REG_PXP2_INT_STS PXP2_REG_PXP2_INT_STS_0 1627#define PXP2_REG_PXP2_INT_STS PXP2_REG_PXP2_INT_STS_0
1179#endif 1628#endif
1180 1629
1630#ifndef ETH_MAX_RX_CLIENTS_E2
1631#define ETH_MAX_RX_CLIENTS_E2 ETH_MAX_RX_CLIENTS_E1H
1632#endif
1633
1181#define BNX2X_VPD_LEN 128 1634#define BNX2X_VPD_LEN 128
1182#define VENDOR_ID_LEN 4 1635#define VENDOR_ID_LEN 4
1183 1636
1637/* Congestion management fairness mode */
1638#define CMNG_FNS_NONE 0
1639#define CMNG_FNS_MINMAX 1
1640
1641#define HC_SEG_ACCESS_DEF 0 /*Driver decision 0-3*/
1642#define HC_SEG_ACCESS_ATTN 4
1643#define HC_SEG_ACCESS_NORM 0 /*Driver decision 0-1*/
1644
1184#ifdef BNX2X_MAIN 1645#ifdef BNX2X_MAIN
1185#define BNX2X_EXTERN 1646#define BNX2X_EXTERN
1186#else 1647#else
1187#define BNX2X_EXTERN extern 1648#define BNX2X_EXTERN extern
1188#endif 1649#endif
1189 1650
1190BNX2X_EXTERN int load_count[3]; /* 0-common, 1-port0, 2-port1 */ 1651BNX2X_EXTERN int load_count[2][3]; /* per path: 0-common, 1-port0, 2-port1 */
1191
1192/* MISC_REG_RESET_REG - this is here for the hsi to work don't touch */
1193 1652
1194extern void bnx2x_set_ethtool_ops(struct net_device *netdev); 1653extern void bnx2x_set_ethtool_ops(struct net_device *netdev);
1195 1654
1196void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae, int idx);
1197
1198#endif /* bnx2x.h */ 1655#endif /* bnx2x.h */
diff --git a/drivers/net/bnx2x/bnx2x_cmn.c b/drivers/net/bnx2x/bnx2x_cmn.c
index efc7be4aefb5..bc5837514074 100644
--- a/drivers/net/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/bnx2x/bnx2x_cmn.c
@@ -15,19 +15,16 @@
15 * 15 *
16 */ 16 */
17 17
18
19#include <linux/etherdevice.h> 18#include <linux/etherdevice.h>
19#include <linux/if_vlan.h>
20#include <linux/ip.h> 20#include <linux/ip.h>
21#include <linux/ipv6.h> 21#include <net/ipv6.h>
22#include <net/ip6_checksum.h> 22#include <net/ip6_checksum.h>
23#include <linux/firmware.h> 23#include <linux/firmware.h>
24#include "bnx2x_cmn.h" 24#include "bnx2x_cmn.h"
25 25
26#ifdef BCM_VLAN 26#include "bnx2x_init.h"
27#include <linux/if_vlan.h>
28#endif
29 27
30static int bnx2x_poll(struct napi_struct *napi, int budget);
31 28
32/* free skb in the packet ring at pos idx 29/* free skb in the packet ring at pos idx
33 * return idx of last bd freed 30 * return idx of last bd freed
@@ -52,7 +49,7 @@ static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
52 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx); 49 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
53 tx_start_bd = &fp->tx_desc_ring[bd_idx].start_bd; 50 tx_start_bd = &fp->tx_desc_ring[bd_idx].start_bd;
54 dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd), 51 dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
55 BD_UNMAP_LEN(tx_start_bd), PCI_DMA_TODEVICE); 52 BD_UNMAP_LEN(tx_start_bd), DMA_TO_DEVICE);
56 53
57 nbd = le16_to_cpu(tx_start_bd->nbd) - 1; 54 nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
58#ifdef BNX2X_STOP_ON_ERROR 55#ifdef BNX2X_STOP_ON_ERROR
@@ -116,16 +113,10 @@ int bnx2x_tx_int(struct bnx2x_fastpath *fp)
116 113
117 pkt_cons = TX_BD(sw_cons); 114 pkt_cons = TX_BD(sw_cons);
118 115
119 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */ 116 DP(NETIF_MSG_TX_DONE, "queue[%d]: hw_cons %u sw_cons %u "
120 117 " pkt_cons %u\n",
121 DP(NETIF_MSG_TX_DONE, "hw_cons %u sw_cons %u pkt_cons %u\n", 118 fp->index, hw_cons, sw_cons, pkt_cons);
122 hw_cons, sw_cons, pkt_cons);
123 119
124/* if (NEXT_TX_IDX(sw_cons) != hw_cons) {
125 rmb();
126 prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
127 }
128*/
129 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons); 120 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
130 sw_cons++; 121 sw_cons++;
131 } 122 }
@@ -141,7 +132,6 @@ int bnx2x_tx_int(struct bnx2x_fastpath *fp)
141 */ 132 */
142 smp_mb(); 133 smp_mb();
143 134
144 /* TBD need a thresh? */
145 if (unlikely(netif_tx_queue_stopped(txq))) { 135 if (unlikely(netif_tx_queue_stopped(txq))) {
146 /* Taking tx_lock() is needed to prevent reenabling the queue 136 /* Taking tx_lock() is needed to prevent reenabling the queue
147 * while it's empty. This could have happen if rx_action() gets 137 * while it's empty. This could have happen if rx_action() gets
@@ -190,14 +180,16 @@ static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
190 180
191 /* First mark all used pages */ 181 /* First mark all used pages */
192 for (i = 0; i < sge_len; i++) 182 for (i = 0; i < sge_len; i++)
193 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i]))); 183 SGE_MASK_CLEAR_BIT(fp,
184 RX_SGE(le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[i])));
194 185
195 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n", 186 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
196 sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1])); 187 sge_len - 1, le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[sge_len - 1]));
197 188
198 /* Here we assume that the last SGE index is the biggest */ 189 /* Here we assume that the last SGE index is the biggest */
199 prefetch((void *)(fp->sge_mask)); 190 prefetch((void *)(fp->sge_mask));
200 bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1])); 191 bnx2x_update_last_max_sge(fp,
192 le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[sge_len - 1]));
201 193
202 last_max = RX_SGE(fp->last_max_sge); 194 last_max = RX_SGE(fp->last_max_sge);
203 last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT; 195 last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
@@ -298,7 +290,8 @@ static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
298 290
299 /* Run through the SGL and compose the fragmented skb */ 291 /* Run through the SGL and compose the fragmented skb */
300 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) { 292 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
301 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j])); 293 u16 sge_idx =
294 RX_SGE(le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[j]));
302 295
303 /* FW gives the indices of the SGE as if the ring is an array 296 /* FW gives the indices of the SGE as if the ring is an array
304 (meaning that "next" element will consume 2 indices) */ 297 (meaning that "next" element will consume 2 indices) */
@@ -350,16 +343,9 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
350 if (likely(new_skb)) { 343 if (likely(new_skb)) {
351 /* fix ip xsum and give it to the stack */ 344 /* fix ip xsum and give it to the stack */
352 /* (no need to map the new skb) */ 345 /* (no need to map the new skb) */
353#ifdef BCM_VLAN
354 int is_vlan_cqe =
355 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
356 PARSING_FLAGS_VLAN);
357 int is_not_hwaccel_vlan_cqe =
358 (is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG)));
359#endif
360 346
361 prefetch(skb); 347 prefetch(skb);
362 prefetch(((char *)(skb)) + 128); 348 prefetch(((char *)(skb)) + L1_CACHE_BYTES);
363 349
364#ifdef BNX2X_STOP_ON_ERROR 350#ifdef BNX2X_STOP_ON_ERROR
365 if (pad + len > bp->rx_buf_size) { 351 if (pad + len > bp->rx_buf_size) {
@@ -381,27 +367,18 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
381 struct iphdr *iph; 367 struct iphdr *iph;
382 368
383 iph = (struct iphdr *)skb->data; 369 iph = (struct iphdr *)skb->data;
384#ifdef BCM_VLAN
385 /* If there is no Rx VLAN offloading -
386 take VLAN tag into an account */
387 if (unlikely(is_not_hwaccel_vlan_cqe))
388 iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN);
389#endif
390 iph->check = 0; 370 iph->check = 0;
391 iph->check = ip_fast_csum((u8 *)iph, iph->ihl); 371 iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
392 } 372 }
393 373
394 if (!bnx2x_fill_frag_skb(bp, fp, skb, 374 if (!bnx2x_fill_frag_skb(bp, fp, skb,
395 &cqe->fast_path_cqe, cqe_idx)) { 375 &cqe->fast_path_cqe, cqe_idx)) {
396#ifdef BCM_VLAN 376 if ((le16_to_cpu(cqe->fast_path_cqe.
397 if ((bp->vlgrp != NULL) && is_vlan_cqe && 377 pars_flags.flags) & PARSING_FLAGS_VLAN))
398 (!is_not_hwaccel_vlan_cqe)) 378 __vlan_hwaccel_put_tag(skb,
399 vlan_gro_receive(&fp->napi, bp->vlgrp,
400 le16_to_cpu(cqe->fast_path_cqe. 379 le16_to_cpu(cqe->fast_path_cqe.
401 vlan_tag), skb); 380 vlan_tag));
402 else 381 napi_gro_receive(&fp->napi, skb);
403#endif
404 napi_gro_receive(&fp->napi, skb);
405 } else { 382 } else {
406 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages" 383 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
407 " - dropping packet!\n"); 384 " - dropping packet!\n");
@@ -510,8 +487,11 @@ int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
510 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len); 487 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
511 pad = cqe->fast_path_cqe.placement_offset; 488 pad = cqe->fast_path_cqe.placement_offset;
512 489
513 /* If CQE is marked both TPA_START and TPA_END 490 /* - If CQE is marked both TPA_START and TPA_END it is
514 it is a non-TPA CQE */ 491 * a non-TPA CQE.
492 * - FP CQE will always have either TPA_START or/and
493 * TPA_STOP flags set.
494 */
515 if ((!fp->disable_tpa) && 495 if ((!fp->disable_tpa) &&
516 (TPA_TYPE(cqe_fp_flags) != 496 (TPA_TYPE(cqe_fp_flags) !=
517 (TPA_TYPE_START | TPA_TYPE_END))) { 497 (TPA_TYPE_START | TPA_TYPE_END))) {
@@ -529,9 +509,7 @@ int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
529 bnx2x_set_skb_rxhash(bp, cqe, skb); 509 bnx2x_set_skb_rxhash(bp, cqe, skb);
530 510
531 goto next_rx; 511 goto next_rx;
532 } 512 } else { /* TPA_STOP */
533
534 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
535 DP(NETIF_MSG_RX_STATUS, 513 DP(NETIF_MSG_RX_STATUS,
536 "calling tpa_stop on queue %d\n", 514 "calling tpa_stop on queue %d\n",
537 queue); 515 queue);
@@ -561,7 +539,7 @@ int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
561 dma_unmap_addr(rx_buf, mapping), 539 dma_unmap_addr(rx_buf, mapping),
562 pad + RX_COPY_THRESH, 540 pad + RX_COPY_THRESH,
563 DMA_FROM_DEVICE); 541 DMA_FROM_DEVICE);
564 prefetch(((char *)(skb)) + 128); 542 prefetch(((char *)(skb)) + L1_CACHE_BYTES);
565 543
566 /* is this an error packet? */ 544 /* is this an error packet? */
567 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) { 545 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
@@ -595,7 +573,7 @@ int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
595 skb_reserve(new_skb, pad); 573 skb_reserve(new_skb, pad);
596 skb_put(new_skb, len); 574 skb_put(new_skb, len);
597 575
598 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod); 576 bnx2x_reuse_rx_skb(fp, bd_cons, bd_prod);
599 577
600 skb = new_skb; 578 skb = new_skb;
601 579
@@ -614,7 +592,7 @@ int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
614 "of alloc failure\n"); 592 "of alloc failure\n");
615 fp->eth_q_stats.rx_skb_alloc_failed++; 593 fp->eth_q_stats.rx_skb_alloc_failed++;
616reuse_rx: 594reuse_rx:
617 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod); 595 bnx2x_reuse_rx_skb(fp, bd_cons, bd_prod);
618 goto next_rx; 596 goto next_rx;
619 } 597 }
620 598
@@ -624,6 +602,7 @@ reuse_rx:
624 bnx2x_set_skb_rxhash(bp, cqe, skb); 602 bnx2x_set_skb_rxhash(bp, cqe, skb);
625 603
626 skb_checksum_none_assert(skb); 604 skb_checksum_none_assert(skb);
605
627 if (bp->rx_csum) { 606 if (bp->rx_csum) {
628 if (likely(BNX2X_RX_CSUM_OK(cqe))) 607 if (likely(BNX2X_RX_CSUM_OK(cqe)))
629 skb->ip_summed = CHECKSUM_UNNECESSARY; 608 skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -634,15 +613,11 @@ reuse_rx:
634 613
635 skb_record_rx_queue(skb, fp->index); 614 skb_record_rx_queue(skb, fp->index);
636 615
637#ifdef BCM_VLAN 616 if (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
638 if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) && 617 PARSING_FLAGS_VLAN)
639 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) & 618 __vlan_hwaccel_put_tag(skb,
640 PARSING_FLAGS_VLAN)) 619 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
641 vlan_gro_receive(&fp->napi, bp->vlgrp, 620 napi_gro_receive(&fp->napi, skb);
642 le16_to_cpu(cqe->fast_path_cqe.vlan_tag), skb);
643 else
644#endif
645 napi_gro_receive(&fp->napi, skb);
646 621
647 622
648next_rx: 623next_rx:
@@ -686,9 +661,10 @@ static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
686 return IRQ_HANDLED; 661 return IRQ_HANDLED;
687 } 662 }
688 663
689 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n", 664 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB "
690 fp->index, fp->sb_id); 665 "[fp %d fw_sd %d igusb %d]\n",
691 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0); 666 fp->index, fp->fw_sb_id, fp->igu_sb_id);
667 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
692 668
693#ifdef BNX2X_STOP_ON_ERROR 669#ifdef BNX2X_STOP_ON_ERROR
694 if (unlikely(bp->panic)) 670 if (unlikely(bp->panic))
@@ -698,14 +674,12 @@ static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
698 /* Handle Rx and Tx according to MSI-X vector */ 674 /* Handle Rx and Tx according to MSI-X vector */
699 prefetch(fp->rx_cons_sb); 675 prefetch(fp->rx_cons_sb);
700 prefetch(fp->tx_cons_sb); 676 prefetch(fp->tx_cons_sb);
701 prefetch(&fp->status_blk->u_status_block.status_block_index); 677 prefetch(&fp->sb_running_index[SM_RX_ID]);
702 prefetch(&fp->status_blk->c_status_block.status_block_index);
703 napi_schedule(&bnx2x_fp(bp, fp->index, napi)); 678 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
704 679
705 return IRQ_HANDLED; 680 return IRQ_HANDLED;
706} 681}
707 682
708
709/* HW Lock for shared dual port PHYs */ 683/* HW Lock for shared dual port PHYs */
710void bnx2x_acquire_phy_lock(struct bnx2x *bp) 684void bnx2x_acquire_phy_lock(struct bnx2x *bp)
711{ 685{
@@ -739,12 +713,13 @@ void bnx2x_link_report(struct bnx2x *bp)
739 netdev_info(bp->dev, "NIC Link is Up, "); 713 netdev_info(bp->dev, "NIC Link is Up, ");
740 714
741 line_speed = bp->link_vars.line_speed; 715 line_speed = bp->link_vars.line_speed;
742 if (IS_E1HMF(bp)) { 716 if (IS_MF(bp)) {
743 u16 vn_max_rate; 717 u16 vn_max_rate;
744 718
745 vn_max_rate = 719 vn_max_rate =
746 ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >> 720 ((bp->mf_config[BP_VN(bp)] &
747 FUNC_MF_CFG_MAX_BW_SHIFT) * 100; 721 FUNC_MF_CFG_MAX_BW_MASK) >>
722 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
748 if (vn_max_rate < line_speed) 723 if (vn_max_rate < line_speed)
749 line_speed = vn_max_rate; 724 line_speed = vn_max_rate;
750 } 725 }
@@ -774,27 +749,73 @@ void bnx2x_link_report(struct bnx2x *bp)
774 } 749 }
775} 750}
776 751
752/* Returns the number of actually allocated BDs */
753static inline int bnx2x_alloc_rx_bds(struct bnx2x_fastpath *fp,
754 int rx_ring_size)
755{
756 struct bnx2x *bp = fp->bp;
757 u16 ring_prod, cqe_ring_prod;
758 int i;
759
760 fp->rx_comp_cons = 0;
761 cqe_ring_prod = ring_prod = 0;
762 for (i = 0; i < rx_ring_size; i++) {
763 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
764 BNX2X_ERR("was only able to allocate "
765 "%d rx skbs on queue[%d]\n", i, fp->index);
766 fp->eth_q_stats.rx_skb_alloc_failed++;
767 break;
768 }
769 ring_prod = NEXT_RX_IDX(ring_prod);
770 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
771 WARN_ON(ring_prod <= i);
772 }
773
774 fp->rx_bd_prod = ring_prod;
775 /* Limit the CQE producer by the CQE ring size */
776 fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT,
777 cqe_ring_prod);
778 fp->rx_pkt = fp->rx_calls = 0;
779
780 return i;
781}
782
783static inline void bnx2x_alloc_rx_bd_ring(struct bnx2x_fastpath *fp)
784{
785 struct bnx2x *bp = fp->bp;
786 int rx_ring_size = bp->rx_ring_size ? bp->rx_ring_size :
787 MAX_RX_AVAIL/bp->num_queues;
788
789 rx_ring_size = max_t(int, MIN_RX_AVAIL, rx_ring_size);
790
791 bnx2x_alloc_rx_bds(fp, rx_ring_size);
792
793 /* Warning!
794 * this will generate an interrupt (to the TSTORM)
795 * must only be done after chip is initialized
796 */
797 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
798 fp->rx_sge_prod);
799}
800
777void bnx2x_init_rx_rings(struct bnx2x *bp) 801void bnx2x_init_rx_rings(struct bnx2x *bp)
778{ 802{
779 int func = BP_FUNC(bp); 803 int func = BP_FUNC(bp);
780 int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 : 804 int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
781 ETH_MAX_AGGREGATION_QUEUES_E1H; 805 ETH_MAX_AGGREGATION_QUEUES_E1H;
782 u16 ring_prod, cqe_ring_prod; 806 u16 ring_prod;
783 int i, j; 807 int i, j;
784 int rx_ring_size = bp->rx_ring_size ? bp->rx_ring_size :
785 MAX_RX_AVAIL/bp->num_queues;
786 808
787 rx_ring_size = max_t(int, MIN_RX_AVAIL, rx_ring_size); 809 bp->rx_buf_size = bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN +
810 IP_HEADER_ALIGNMENT_PADDING;
788 811
789 bp->rx_buf_size = bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN;
790 DP(NETIF_MSG_IFUP, 812 DP(NETIF_MSG_IFUP,
791 "mtu %d rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size); 813 "mtu %d rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size);
792 814
793 if (bp->flags & TPA_ENABLE_FLAG) { 815 for_each_queue(bp, j) {
794 816 struct bnx2x_fastpath *fp = &bp->fp[j];
795 for_each_queue(bp, j) {
796 struct bnx2x_fastpath *fp = &bp->fp[j];
797 817
818 if (!fp->disable_tpa) {
798 for (i = 0; i < max_agg_queues; i++) { 819 for (i = 0; i < max_agg_queues; i++) {
799 fp->tpa_pool[i].skb = 820 fp->tpa_pool[i].skb =
800 netdev_alloc_skb(bp->dev, bp->rx_buf_size); 821 netdev_alloc_skb(bp->dev, bp->rx_buf_size);
@@ -812,6 +833,35 @@ void bnx2x_init_rx_rings(struct bnx2x *bp)
812 mapping, 0); 833 mapping, 0);
813 fp->tpa_state[i] = BNX2X_TPA_STOP; 834 fp->tpa_state[i] = BNX2X_TPA_STOP;
814 } 835 }
836
837 /* "next page" elements initialization */
838 bnx2x_set_next_page_sgl(fp);
839
840 /* set SGEs bit mask */
841 bnx2x_init_sge_ring_bit_mask(fp);
842
843 /* Allocate SGEs and initialize the ring elements */
844 for (i = 0, ring_prod = 0;
845 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
846
847 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
848 BNX2X_ERR("was only able to allocate "
849 "%d rx sges\n", i);
850 BNX2X_ERR("disabling TPA for"
851 " queue[%d]\n", j);
852 /* Cleanup already allocated elements */
853 bnx2x_free_rx_sge_range(bp,
854 fp, ring_prod);
855 bnx2x_free_tpa_pool(bp,
856 fp, max_agg_queues);
857 fp->disable_tpa = 1;
858 ring_prod = 0;
859 break;
860 }
861 ring_prod = NEXT_SGE_IDX(ring_prod);
862 }
863
864 fp->rx_sge_prod = ring_prod;
815 } 865 }
816 } 866 }
817 867
@@ -819,109 +869,29 @@ void bnx2x_init_rx_rings(struct bnx2x *bp)
819 struct bnx2x_fastpath *fp = &bp->fp[j]; 869 struct bnx2x_fastpath *fp = &bp->fp[j];
820 870
821 fp->rx_bd_cons = 0; 871 fp->rx_bd_cons = 0;
822 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
823 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
824
825 /* "next page" elements initialization */
826 /* SGE ring */
827 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
828 struct eth_rx_sge *sge;
829
830 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
831 sge->addr_hi =
832 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
833 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
834 sge->addr_lo =
835 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
836 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
837 }
838
839 bnx2x_init_sge_ring_bit_mask(fp);
840 872
841 /* RX BD ring */ 873 bnx2x_set_next_page_rx_bd(fp);
842 for (i = 1; i <= NUM_RX_RINGS; i++) {
843 struct eth_rx_bd *rx_bd;
844
845 rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
846 rx_bd->addr_hi =
847 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
848 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
849 rx_bd->addr_lo =
850 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
851 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
852 }
853 874
854 /* CQ ring */ 875 /* CQ ring */
855 for (i = 1; i <= NUM_RCQ_RINGS; i++) { 876 bnx2x_set_next_page_rx_cq(fp);
856 struct eth_rx_cqe_next_page *nextpg;
857
858 nextpg = (struct eth_rx_cqe_next_page *)
859 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
860 nextpg->addr_hi =
861 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
862 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
863 nextpg->addr_lo =
864 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
865 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
866 }
867
868 /* Allocate SGEs and initialize the ring elements */
869 for (i = 0, ring_prod = 0;
870 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
871
872 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
873 BNX2X_ERR("was only able to allocate "
874 "%d rx sges\n", i);
875 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
876 /* Cleanup already allocated elements */
877 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
878 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
879 fp->disable_tpa = 1;
880 ring_prod = 0;
881 break;
882 }
883 ring_prod = NEXT_SGE_IDX(ring_prod);
884 }
885 fp->rx_sge_prod = ring_prod;
886 877
887 /* Allocate BDs and initialize BD ring */ 878 /* Allocate BDs and initialize BD ring */
888 fp->rx_comp_cons = 0; 879 bnx2x_alloc_rx_bd_ring(fp);
889 cqe_ring_prod = ring_prod = 0;
890 for (i = 0; i < rx_ring_size; i++) {
891 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
892 BNX2X_ERR("was only able to allocate "
893 "%d rx skbs on queue[%d]\n", i, j);
894 fp->eth_q_stats.rx_skb_alloc_failed++;
895 break;
896 }
897 ring_prod = NEXT_RX_IDX(ring_prod);
898 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
899 WARN_ON(ring_prod <= i);
900 }
901 880
902 fp->rx_bd_prod = ring_prod;
903 /* must not have more available CQEs than BDs */
904 fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT,
905 cqe_ring_prod);
906 fp->rx_pkt = fp->rx_calls = 0;
907
908 /* Warning!
909 * this will generate an interrupt (to the TSTORM)
910 * must only be done after chip is initialized
911 */
912 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
913 fp->rx_sge_prod);
914 if (j != 0) 881 if (j != 0)
915 continue; 882 continue;
916 883
917 REG_WR(bp, BAR_USTRORM_INTMEM + 884 if (!CHIP_IS_E2(bp)) {
918 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func), 885 REG_WR(bp, BAR_USTRORM_INTMEM +
919 U64_LO(fp->rx_comp_mapping)); 886 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
920 REG_WR(bp, BAR_USTRORM_INTMEM + 887 U64_LO(fp->rx_comp_mapping));
921 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4, 888 REG_WR(bp, BAR_USTRORM_INTMEM +
922 U64_HI(fp->rx_comp_mapping)); 889 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
890 U64_HI(fp->rx_comp_mapping));
891 }
923 } 892 }
924} 893}
894
925static void bnx2x_free_tx_skbs(struct bnx2x *bp) 895static void bnx2x_free_tx_skbs(struct bnx2x *bp)
926{ 896{
927 int i; 897 int i;
@@ -994,55 +964,49 @@ static void bnx2x_free_msix_irqs(struct bnx2x *bp)
994 } 964 }
995} 965}
996 966
997void bnx2x_free_irq(struct bnx2x *bp, bool disable_only) 967void bnx2x_free_irq(struct bnx2x *bp)
998{ 968{
999 if (bp->flags & USING_MSIX_FLAG) { 969 if (bp->flags & USING_MSIX_FLAG)
1000 if (!disable_only) 970 bnx2x_free_msix_irqs(bp);
1001 bnx2x_free_msix_irqs(bp); 971 else if (bp->flags & USING_MSI_FLAG)
1002 pci_disable_msix(bp->pdev); 972 free_irq(bp->pdev->irq, bp->dev);
1003 bp->flags &= ~USING_MSIX_FLAG; 973 else
1004
1005 } else if (bp->flags & USING_MSI_FLAG) {
1006 if (!disable_only)
1007 free_irq(bp->pdev->irq, bp->dev);
1008 pci_disable_msi(bp->pdev);
1009 bp->flags &= ~USING_MSI_FLAG;
1010
1011 } else if (!disable_only)
1012 free_irq(bp->pdev->irq, bp->dev); 974 free_irq(bp->pdev->irq, bp->dev);
1013} 975}
1014 976
1015static int bnx2x_enable_msix(struct bnx2x *bp) 977int bnx2x_enable_msix(struct bnx2x *bp)
1016{ 978{
1017 int i, rc, offset = 1; 979 int msix_vec = 0, i, rc, req_cnt;
1018 int igu_vec = 0;
1019 980
1020 bp->msix_table[0].entry = igu_vec; 981 bp->msix_table[msix_vec].entry = msix_vec;
1021 DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n", igu_vec); 982 DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n",
983 bp->msix_table[0].entry);
984 msix_vec++;
1022 985
1023#ifdef BCM_CNIC 986#ifdef BCM_CNIC
1024 igu_vec = BP_L_ID(bp) + offset; 987 bp->msix_table[msix_vec].entry = msix_vec;
1025 bp->msix_table[1].entry = igu_vec; 988 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d (CNIC)\n",
1026 DP(NETIF_MSG_IFUP, "msix_table[1].entry = %d (CNIC)\n", igu_vec); 989 bp->msix_table[msix_vec].entry, bp->msix_table[msix_vec].entry);
1027 offset++; 990 msix_vec++;
1028#endif 991#endif
1029 for_each_queue(bp, i) { 992 for_each_queue(bp, i) {
1030 igu_vec = BP_L_ID(bp) + offset + i; 993 bp->msix_table[msix_vec].entry = msix_vec;
1031 bp->msix_table[i + offset].entry = igu_vec;
1032 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d " 994 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
1033 "(fastpath #%u)\n", i + offset, igu_vec, i); 995 "(fastpath #%u)\n", msix_vec, msix_vec, i);
996 msix_vec++;
1034 } 997 }
1035 998
1036 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], 999 req_cnt = BNX2X_NUM_QUEUES(bp) + CNIC_CONTEXT_USE + 1;
1037 BNX2X_NUM_QUEUES(bp) + offset); 1000
1001 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], req_cnt);
1038 1002
1039 /* 1003 /*
1040 * reconfigure number of tx/rx queues according to available 1004 * reconfigure number of tx/rx queues according to available
1041 * MSI-X vectors 1005 * MSI-X vectors
1042 */ 1006 */
1043 if (rc >= BNX2X_MIN_MSIX_VEC_CNT) { 1007 if (rc >= BNX2X_MIN_MSIX_VEC_CNT) {
1044 /* vectors available for FP */ 1008 /* how less vectors we will have? */
1045 int fp_vec = rc - BNX2X_MSIX_VEC_FP_START; 1009 int diff = req_cnt - rc;
1046 1010
1047 DP(NETIF_MSG_IFUP, 1011 DP(NETIF_MSG_IFUP,
1048 "Trying to use less MSI-X vectors: %d\n", rc); 1012 "Trying to use less MSI-X vectors: %d\n", rc);
@@ -1054,12 +1018,17 @@ static int bnx2x_enable_msix(struct bnx2x *bp)
1054 "MSI-X is not attainable rc %d\n", rc); 1018 "MSI-X is not attainable rc %d\n", rc);
1055 return rc; 1019 return rc;
1056 } 1020 }
1057 1021 /*
1058 bp->num_queues = min(bp->num_queues, fp_vec); 1022 * decrease number of queues by number of unallocated entries
1023 */
1024 bp->num_queues -= diff;
1059 1025
1060 DP(NETIF_MSG_IFUP, "New queue configuration set: %d\n", 1026 DP(NETIF_MSG_IFUP, "New queue configuration set: %d\n",
1061 bp->num_queues); 1027 bp->num_queues);
1062 } else if (rc) { 1028 } else if (rc) {
1029 /* fall to INTx if not enough memory */
1030 if (rc == -ENOMEM)
1031 bp->flags |= DISABLE_MSI_FLAG;
1063 DP(NETIF_MSG_IFUP, "MSI-X is not attainable rc %d\n", rc); 1032 DP(NETIF_MSG_IFUP, "MSI-X is not attainable rc %d\n", rc);
1064 return rc; 1033 return rc;
1065 } 1034 }
@@ -1088,7 +1057,7 @@ static int bnx2x_req_msix_irqs(struct bnx2x *bp)
1088 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d", 1057 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
1089 bp->dev->name, i); 1058 bp->dev->name, i);
1090 1059
1091 rc = request_irq(bp->msix_table[i + offset].vector, 1060 rc = request_irq(bp->msix_table[offset].vector,
1092 bnx2x_msix_fp_int, 0, fp->name, fp); 1061 bnx2x_msix_fp_int, 0, fp->name, fp);
1093 if (rc) { 1062 if (rc) {
1094 BNX2X_ERR("request fp #%d irq failed rc %d\n", i, rc); 1063 BNX2X_ERR("request fp #%d irq failed rc %d\n", i, rc);
@@ -1096,10 +1065,12 @@ static int bnx2x_req_msix_irqs(struct bnx2x *bp)
1096 return -EBUSY; 1065 return -EBUSY;
1097 } 1066 }
1098 1067
1068 offset++;
1099 fp->state = BNX2X_FP_STATE_IRQ; 1069 fp->state = BNX2X_FP_STATE_IRQ;
1100 } 1070 }
1101 1071
1102 i = BNX2X_NUM_QUEUES(bp); 1072 i = BNX2X_NUM_QUEUES(bp);
1073 offset = 1 + CNIC_CONTEXT_USE;
1103 netdev_info(bp->dev, "using MSI-X IRQs: sp %d fp[%d] %d" 1074 netdev_info(bp->dev, "using MSI-X IRQs: sp %d fp[%d] %d"
1104 " ... fp[%d] %d\n", 1075 " ... fp[%d] %d\n",
1105 bp->msix_table[0].vector, 1076 bp->msix_table[0].vector,
@@ -1109,7 +1080,7 @@ static int bnx2x_req_msix_irqs(struct bnx2x *bp)
1109 return 0; 1080 return 0;
1110} 1081}
1111 1082
1112static int bnx2x_enable_msi(struct bnx2x *bp) 1083int bnx2x_enable_msi(struct bnx2x *bp)
1113{ 1084{
1114 int rc; 1085 int rc;
1115 1086
@@ -1180,35 +1151,21 @@ void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
1180 bnx2x_napi_disable(bp); 1151 bnx2x_napi_disable(bp);
1181 netif_tx_disable(bp->dev); 1152 netif_tx_disable(bp->dev);
1182} 1153}
1183static int bnx2x_set_num_queues(struct bnx2x *bp)
1184{
1185 int rc = 0;
1186 1154
1187 switch (bp->int_mode) { 1155void bnx2x_set_num_queues(struct bnx2x *bp)
1188 case INT_MODE_INTx: 1156{
1189 case INT_MODE_MSI: 1157 switch (bp->multi_mode) {
1158 case ETH_RSS_MODE_DISABLED:
1190 bp->num_queues = 1; 1159 bp->num_queues = 1;
1191 DP(NETIF_MSG_IFUP, "set number of queues to 1\n");
1192 break; 1160 break;
1193 default: 1161 case ETH_RSS_MODE_REGULAR:
1194 /* Set number of queues according to bp->multi_mode value */ 1162 bp->num_queues = bnx2x_calc_num_queues(bp);
1195 bnx2x_set_num_queues_msix(bp); 1163 break;
1196
1197 DP(NETIF_MSG_IFUP, "set number of queues to %d\n",
1198 bp->num_queues);
1199 1164
1200 /* if we can't use MSI-X we only need one fp, 1165 default:
1201 * so try to enable MSI-X with the requested number of fp's 1166 bp->num_queues = 1;
1202 * and fallback to MSI or legacy INTx with one fp
1203 */
1204 rc = bnx2x_enable_msix(bp);
1205 if (rc)
1206 /* failed to enable MSI-X */
1207 bp->num_queues = 1;
1208 break; 1167 break;
1209 } 1168 }
1210 bp->dev->real_num_tx_queues = bp->num_queues;
1211 return rc;
1212} 1169}
1213 1170
1214static void bnx2x_release_firmware(struct bnx2x *bp) 1171static void bnx2x_release_firmware(struct bnx2x *bp)
@@ -1239,48 +1196,25 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
1239 1196
1240 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD; 1197 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
1241 1198
1242 rc = bnx2x_set_num_queues(bp); 1199 /* must be called before memory allocation and HW init */
1200 bnx2x_ilt_set_info(bp);
1243 1201
1244 if (bnx2x_alloc_mem(bp)) { 1202 if (bnx2x_alloc_mem(bp))
1245 bnx2x_free_irq(bp, true);
1246 return -ENOMEM; 1203 return -ENOMEM;
1204
1205 netif_set_real_num_tx_queues(bp->dev, bp->num_queues);
1206 rc = netif_set_real_num_rx_queues(bp->dev, bp->num_queues);
1207 if (rc) {
1208 BNX2X_ERR("Unable to update real_num_rx_queues\n");
1209 goto load_error0;
1247 } 1210 }
1248 1211
1249 for_each_queue(bp, i) 1212 for_each_queue(bp, i)
1250 bnx2x_fp(bp, i, disable_tpa) = 1213 bnx2x_fp(bp, i, disable_tpa) =
1251 ((bp->flags & TPA_ENABLE_FLAG) == 0); 1214 ((bp->flags & TPA_ENABLE_FLAG) == 0);
1252 1215
1253 for_each_queue(bp, i)
1254 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
1255 bnx2x_poll, 128);
1256
1257 bnx2x_napi_enable(bp); 1216 bnx2x_napi_enable(bp);
1258 1217
1259 if (bp->flags & USING_MSIX_FLAG) {
1260 rc = bnx2x_req_msix_irqs(bp);
1261 if (rc) {
1262 bnx2x_free_irq(bp, true);
1263 goto load_error1;
1264 }
1265 } else {
1266 /* Fall to INTx if failed to enable MSI-X due to lack of
1267 memory (in bnx2x_set_num_queues()) */
1268 if ((rc != -ENOMEM) && (bp->int_mode != INT_MODE_INTx))
1269 bnx2x_enable_msi(bp);
1270 bnx2x_ack_int(bp);
1271 rc = bnx2x_req_irq(bp);
1272 if (rc) {
1273 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
1274 bnx2x_free_irq(bp, true);
1275 goto load_error1;
1276 }
1277 if (bp->flags & USING_MSI_FLAG) {
1278 bp->dev->irq = bp->pdev->irq;
1279 netdev_info(bp->dev, "using MSI IRQ %d\n",
1280 bp->pdev->irq);
1281 }
1282 }
1283
1284 /* Send LOAD_REQUEST command to MCP 1218 /* Send LOAD_REQUEST command to MCP
1285 Returns the type of LOAD command: 1219 Returns the type of LOAD command:
1286 if it is the first port to be initialized 1220 if it is the first port to be initialized
@@ -1291,31 +1225,35 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
1291 if (!load_code) { 1225 if (!load_code) {
1292 BNX2X_ERR("MCP response failure, aborting\n"); 1226 BNX2X_ERR("MCP response failure, aborting\n");
1293 rc = -EBUSY; 1227 rc = -EBUSY;
1294 goto load_error2; 1228 goto load_error1;
1295 } 1229 }
1296 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) { 1230 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
1297 rc = -EBUSY; /* other port in diagnostic mode */ 1231 rc = -EBUSY; /* other port in diagnostic mode */
1298 goto load_error2; 1232 goto load_error1;
1299 } 1233 }
1300 1234
1301 } else { 1235 } else {
1236 int path = BP_PATH(bp);
1302 int port = BP_PORT(bp); 1237 int port = BP_PORT(bp);
1303 1238
1304 DP(NETIF_MSG_IFUP, "NO MCP - load counts %d, %d, %d\n", 1239 DP(NETIF_MSG_IFUP, "NO MCP - load counts[%d] %d, %d, %d\n",
1305 load_count[0], load_count[1], load_count[2]); 1240 path, load_count[path][0], load_count[path][1],
1306 load_count[0]++; 1241 load_count[path][2]);
1307 load_count[1 + port]++; 1242 load_count[path][0]++;
1308 DP(NETIF_MSG_IFUP, "NO MCP - new load counts %d, %d, %d\n", 1243 load_count[path][1 + port]++;
1309 load_count[0], load_count[1], load_count[2]); 1244 DP(NETIF_MSG_IFUP, "NO MCP - new load counts[%d] %d, %d, %d\n",
1310 if (load_count[0] == 1) 1245 path, load_count[path][0], load_count[path][1],
1246 load_count[path][2]);
1247 if (load_count[path][0] == 1)
1311 load_code = FW_MSG_CODE_DRV_LOAD_COMMON; 1248 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
1312 else if (load_count[1 + port] == 1) 1249 else if (load_count[path][1 + port] == 1)
1313 load_code = FW_MSG_CODE_DRV_LOAD_PORT; 1250 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
1314 else 1251 else
1315 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION; 1252 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
1316 } 1253 }
1317 1254
1318 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) || 1255 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
1256 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
1319 (load_code == FW_MSG_CODE_DRV_LOAD_PORT)) 1257 (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
1320 bp->port.pmf = 1; 1258 bp->port.pmf = 1;
1321 else 1259 else
@@ -1327,15 +1265,21 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
1327 if (rc) { 1265 if (rc) {
1328 BNX2X_ERR("HW init failed, aborting\n"); 1266 BNX2X_ERR("HW init failed, aborting\n");
1329 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0); 1267 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
1330 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0); 1268 goto load_error2;
1331 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0); 1269 }
1270
1271 /* Connect to IRQs */
1272 rc = bnx2x_setup_irqs(bp);
1273 if (rc) {
1274 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
1332 goto load_error2; 1275 goto load_error2;
1333 } 1276 }
1334 1277
1335 /* Setup NIC internals and enable interrupts */ 1278 /* Setup NIC internals and enable interrupts */
1336 bnx2x_nic_init(bp, load_code); 1279 bnx2x_nic_init(bp, load_code);
1337 1280
1338 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) && 1281 if (((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
1282 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP)) &&
1339 (bp->common.shmem2_base)) 1283 (bp->common.shmem2_base))
1340 SHMEM2_WR(bp, dcc_support, 1284 SHMEM2_WR(bp, dcc_support,
1341 (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV | 1285 (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
@@ -1353,7 +1297,18 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
1353 1297
1354 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT; 1298 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
1355 1299
1356 rc = bnx2x_setup_leading(bp); 1300 rc = bnx2x_func_start(bp);
1301 if (rc) {
1302 BNX2X_ERR("Function start failed!\n");
1303#ifndef BNX2X_STOP_ON_ERROR
1304 goto load_error3;
1305#else
1306 bp->panic = 1;
1307 return -EBUSY;
1308#endif
1309 }
1310
1311 rc = bnx2x_setup_client(bp, &bp->fp[0], 1 /* Leading */);
1357 if (rc) { 1312 if (rc) {
1358 BNX2X_ERR("Setup leading failed!\n"); 1313 BNX2X_ERR("Setup leading failed!\n");
1359#ifndef BNX2X_STOP_ON_ERROR 1314#ifndef BNX2X_STOP_ON_ERROR
@@ -1364,62 +1319,47 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
1364#endif 1319#endif
1365 } 1320 }
1366 1321
1367 if (CHIP_IS_E1H(bp)) 1322 if (!CHIP_IS_E1(bp) &&
1368 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) { 1323 (bp->mf_config[BP_VN(bp)] & FUNC_MF_CFG_FUNC_DISABLED)) {
1369 DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n"); 1324 DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n");
1370 bp->flags |= MF_FUNC_DIS; 1325 bp->flags |= MF_FUNC_DIS;
1371 } 1326 }
1372 1327
1373 if (bp->state == BNX2X_STATE_OPEN) {
1374#ifdef BCM_CNIC
1375 /* Enable Timer scan */
1376 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 1);
1377#endif
1378 for_each_nondefault_queue(bp, i) {
1379 rc = bnx2x_setup_multi(bp, i);
1380 if (rc)
1381#ifdef BCM_CNIC 1328#ifdef BCM_CNIC
1382 goto load_error4; 1329 /* Enable Timer scan */
1383#else 1330 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 1);
1384 goto load_error3;
1385#endif 1331#endif
1386 }
1387 1332
1388 if (CHIP_IS_E1(bp)) 1333 for_each_nondefault_queue(bp, i) {
1389 bnx2x_set_eth_mac_addr_e1(bp, 1); 1334 rc = bnx2x_setup_client(bp, &bp->fp[i], 0);
1390 else 1335 if (rc)
1391 bnx2x_set_eth_mac_addr_e1h(bp, 1);
1392#ifdef BCM_CNIC 1336#ifdef BCM_CNIC
1393 /* Set iSCSI L2 MAC */ 1337 goto load_error4;
1394 mutex_lock(&bp->cnic_mutex); 1338#else
1395 if (bp->cnic_eth_dev.drv_state & CNIC_DRV_STATE_REGD) { 1339 goto load_error3;
1396 bnx2x_set_iscsi_eth_mac_addr(bp, 1);
1397 bp->cnic_flags |= BNX2X_CNIC_FLAG_MAC_SET;
1398 bnx2x_init_sb(bp, bp->cnic_sb, bp->cnic_sb_mapping,
1399 CNIC_SB_ID(bp));
1400 }
1401 mutex_unlock(&bp->cnic_mutex);
1402#endif 1340#endif
1403 } 1341 }
1404 1342
1343 /* Now when Clients are configured we are ready to work */
1344 bp->state = BNX2X_STATE_OPEN;
1345
1346 bnx2x_set_eth_mac(bp, 1);
1347
1405 if (bp->port.pmf) 1348 if (bp->port.pmf)
1406 bnx2x_initial_phy_init(bp, load_mode); 1349 bnx2x_initial_phy_init(bp, load_mode);
1407 1350
1408 /* Start fast path */ 1351 /* Start fast path */
1409 switch (load_mode) { 1352 switch (load_mode) {
1410 case LOAD_NORMAL: 1353 case LOAD_NORMAL:
1411 if (bp->state == BNX2X_STATE_OPEN) { 1354 /* Tx queue should be only reenabled */
1412 /* Tx queue should be only reenabled */ 1355 netif_tx_wake_all_queues(bp->dev);
1413 netif_tx_wake_all_queues(bp->dev);
1414 }
1415 /* Initialize the receive filter. */ 1356 /* Initialize the receive filter. */
1416 bnx2x_set_rx_mode(bp->dev); 1357 bnx2x_set_rx_mode(bp->dev);
1417 break; 1358 break;
1418 1359
1419 case LOAD_OPEN: 1360 case LOAD_OPEN:
1420 netif_tx_start_all_queues(bp->dev); 1361 netif_tx_start_all_queues(bp->dev);
1421 if (bp->state != BNX2X_STATE_OPEN) 1362 smp_mb__after_clear_bit();
1422 netif_tx_disable(bp->dev);
1423 /* Initialize the receive filter. */ 1363 /* Initialize the receive filter. */
1424 bnx2x_set_rx_mode(bp->dev); 1364 bnx2x_set_rx_mode(bp->dev);
1425 break; 1365 break;
@@ -1458,22 +1398,24 @@ load_error4:
1458#endif 1398#endif
1459load_error3: 1399load_error3:
1460 bnx2x_int_disable_sync(bp, 1); 1400 bnx2x_int_disable_sync(bp, 1);
1461 if (!BP_NOMCP(bp)) { 1401
1462 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
1463 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
1464 }
1465 bp->port.pmf = 0;
1466 /* Free SKBs, SGEs, TPA pool and driver internals */ 1402 /* Free SKBs, SGEs, TPA pool and driver internals */
1467 bnx2x_free_skbs(bp); 1403 bnx2x_free_skbs(bp);
1468 for_each_queue(bp, i) 1404 for_each_queue(bp, i)
1469 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE); 1405 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
1470load_error2: 1406
1471 /* Release IRQs */ 1407 /* Release IRQs */
1472 bnx2x_free_irq(bp, false); 1408 bnx2x_free_irq(bp);
1409load_error2:
1410 if (!BP_NOMCP(bp)) {
1411 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
1412 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
1413 }
1414
1415 bp->port.pmf = 0;
1473load_error1: 1416load_error1:
1474 bnx2x_napi_disable(bp); 1417 bnx2x_napi_disable(bp);
1475 for_each_queue(bp, i) 1418load_error0:
1476 netif_napi_del(&bnx2x_fp(bp, i, napi));
1477 bnx2x_free_mem(bp); 1419 bnx2x_free_mem(bp);
1478 1420
1479 bnx2x_release_firmware(bp); 1421 bnx2x_release_firmware(bp);
@@ -1505,21 +1447,26 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
1505 bp->rx_mode = BNX2X_RX_MODE_NONE; 1447 bp->rx_mode = BNX2X_RX_MODE_NONE;
1506 bnx2x_set_storm_rx_mode(bp); 1448 bnx2x_set_storm_rx_mode(bp);
1507 1449
1508 /* Disable HW interrupts, NAPI and Tx */ 1450 /* Stop Tx */
1509 bnx2x_netif_stop(bp, 1); 1451 bnx2x_tx_disable(bp);
1510 netif_carrier_off(bp->dev);
1511 1452
1512 del_timer_sync(&bp->timer); 1453 del_timer_sync(&bp->timer);
1513 SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb, 1454
1455 SHMEM_WR(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb,
1514 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq)); 1456 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
1515 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
1516 1457
1517 /* Release IRQs */ 1458 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
1518 bnx2x_free_irq(bp, false);
1519 1459
1520 /* Cleanup the chip if needed */ 1460 /* Cleanup the chip if needed */
1521 if (unload_mode != UNLOAD_RECOVERY) 1461 if (unload_mode != UNLOAD_RECOVERY)
1522 bnx2x_chip_cleanup(bp, unload_mode); 1462 bnx2x_chip_cleanup(bp, unload_mode);
1463 else {
1464 /* Disable HW interrupts, NAPI and Tx */
1465 bnx2x_netif_stop(bp, 1);
1466
1467 /* Release IRQs */
1468 bnx2x_free_irq(bp);
1469 }
1523 1470
1524 bp->port.pmf = 0; 1471 bp->port.pmf = 0;
1525 1472
@@ -1527,8 +1474,7 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
1527 bnx2x_free_skbs(bp); 1474 bnx2x_free_skbs(bp);
1528 for_each_queue(bp, i) 1475 for_each_queue(bp, i)
1529 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE); 1476 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
1530 for_each_queue(bp, i) 1477
1531 netif_napi_del(&bnx2x_fp(bp, i, napi));
1532 bnx2x_free_mem(bp); 1478 bnx2x_free_mem(bp);
1533 1479
1534 bp->state = BNX2X_STATE_CLOSED; 1480 bp->state = BNX2X_STATE_CLOSED;
@@ -1546,10 +1492,17 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
1546 1492
1547 return 0; 1493 return 0;
1548} 1494}
1495
1549int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state) 1496int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
1550{ 1497{
1551 u16 pmcsr; 1498 u16 pmcsr;
1552 1499
1500 /* If there is no power capability, silently succeed */
1501 if (!bp->pm_cap) {
1502 DP(NETIF_MSG_HW, "No power capability. Breaking.\n");
1503 return 0;
1504 }
1505
1553 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr); 1506 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
1554 1507
1555 switch (state) { 1508 switch (state) {
@@ -1592,13 +1545,10 @@ int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
1592 return 0; 1545 return 0;
1593} 1546}
1594 1547
1595
1596
1597/* 1548/*
1598 * net_device service functions 1549 * net_device service functions
1599 */ 1550 */
1600 1551int bnx2x_poll(struct napi_struct *napi, int budget)
1601static int bnx2x_poll(struct napi_struct *napi, int budget)
1602{ 1552{
1603 int work_done = 0; 1553 int work_done = 0;
1604 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath, 1554 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
@@ -1627,27 +1577,28 @@ static int bnx2x_poll(struct napi_struct *napi, int budget)
1627 /* Fall out from the NAPI loop if needed */ 1577 /* Fall out from the NAPI loop if needed */
1628 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) { 1578 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
1629 bnx2x_update_fpsb_idx(fp); 1579 bnx2x_update_fpsb_idx(fp);
1630 /* bnx2x_has_rx_work() reads the status block, thus we need 1580 /* bnx2x_has_rx_work() reads the status block,
1631 * to ensure that status block indices have been actually read 1581 * thus we need to ensure that status block indices
1632 * (bnx2x_update_fpsb_idx) prior to this check 1582 * have been actually read (bnx2x_update_fpsb_idx)
1633 * (bnx2x_has_rx_work) so that we won't write the "newer" 1583 * prior to this check (bnx2x_has_rx_work) so that
1634 * value of the status block to IGU (if there was a DMA right 1584 * we won't write the "newer" value of the status block
1635 * after bnx2x_has_rx_work and if there is no rmb, the memory 1585 * to IGU (if there was a DMA right after
1636 * reading (bnx2x_update_fpsb_idx) may be postponed to right 1586 * bnx2x_has_rx_work and if there is no rmb, the memory
1637 * before bnx2x_ack_sb). In this case there will never be 1587 * reading (bnx2x_update_fpsb_idx) may be postponed
1638 * another interrupt until there is another update of the 1588 * to right before bnx2x_ack_sb). In this case there
1639 * status block, while there is still unhandled work. 1589 * will never be another interrupt until there is
1640 */ 1590 * another update of the status block, while there
1591 * is still unhandled work.
1592 */
1641 rmb(); 1593 rmb();
1642 1594
1643 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) { 1595 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
1644 napi_complete(napi); 1596 napi_complete(napi);
1645 /* Re-enable interrupts */ 1597 /* Re-enable interrupts */
1646 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID, 1598 DP(NETIF_MSG_HW,
1647 le16_to_cpu(fp->fp_c_idx), 1599 "Update index to %d\n", fp->fp_hc_idx);
1648 IGU_INT_NOP, 1); 1600 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID,
1649 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID, 1601 le16_to_cpu(fp->fp_hc_idx),
1650 le16_to_cpu(fp->fp_u_idx),
1651 IGU_INT_ENABLE, 1); 1602 IGU_INT_ENABLE, 1);
1652 break; 1603 break;
1653 } 1604 }
@@ -1657,7 +1608,6 @@ static int bnx2x_poll(struct napi_struct *napi, int budget)
1657 return work_done; 1608 return work_done;
1658} 1609}
1659 1610
1660
1661/* we split the first BD into headers and data BDs 1611/* we split the first BD into headers and data BDs
1662 * to ease the pain of our fellow microcode engineers 1612 * to ease the pain of our fellow microcode engineers
1663 * we use one mapping for both BDs 1613 * we use one mapping for both BDs
@@ -1831,6 +1781,122 @@ exit_lbl:
1831} 1781}
1832#endif 1782#endif
1833 1783
1784static inline void bnx2x_set_pbd_gso_e2(struct sk_buff *skb,
1785 struct eth_tx_parse_bd_e2 *pbd,
1786 u32 xmit_type)
1787{
1788 pbd->parsing_data |= cpu_to_le16(skb_shinfo(skb)->gso_size) <<
1789 ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT;
1790 if ((xmit_type & XMIT_GSO_V6) &&
1791 (ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6))
1792 pbd->parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR;
1793}
1794
1795/**
1796 * Update PBD in GSO case.
1797 *
1798 * @param skb
1799 * @param tx_start_bd
1800 * @param pbd
1801 * @param xmit_type
1802 */
1803static inline void bnx2x_set_pbd_gso(struct sk_buff *skb,
1804 struct eth_tx_parse_bd_e1x *pbd,
1805 u32 xmit_type)
1806{
1807 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
1808 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
1809 pbd->tcp_flags = pbd_tcp_flags(skb);
1810
1811 if (xmit_type & XMIT_GSO_V4) {
1812 pbd->ip_id = swab16(ip_hdr(skb)->id);
1813 pbd->tcp_pseudo_csum =
1814 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
1815 ip_hdr(skb)->daddr,
1816 0, IPPROTO_TCP, 0));
1817
1818 } else
1819 pbd->tcp_pseudo_csum =
1820 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
1821 &ipv6_hdr(skb)->daddr,
1822 0, IPPROTO_TCP, 0));
1823
1824 pbd->global_data |= ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN;
1825}
1826
1827/**
1828 *
1829 * @param skb
1830 * @param tx_start_bd
1831 * @param pbd_e2
1832 * @param xmit_type
1833 *
1834 * @return header len
1835 */
1836static inline u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
1837 struct eth_tx_parse_bd_e2 *pbd,
1838 u32 xmit_type)
1839{
1840 pbd->parsing_data |= cpu_to_le16(tcp_hdrlen(skb)/4) <<
1841 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT;
1842
1843 pbd->parsing_data |= cpu_to_le16(((unsigned char *)tcp_hdr(skb) -
1844 skb->data) / 2) <<
1845 ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W_SHIFT;
1846
1847 return skb_transport_header(skb) + tcp_hdrlen(skb) - skb->data;
1848}
1849
1850/**
1851 *
1852 * @param skb
1853 * @param tx_start_bd
1854 * @param pbd
1855 * @param xmit_type
1856 *
1857 * @return Header length
1858 */
1859static inline u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb,
1860 struct eth_tx_parse_bd_e1x *pbd,
1861 u32 xmit_type)
1862{
1863 u8 hlen = (skb_network_header(skb) - skb->data) / 2;
1864
1865 /* for now NS flag is not used in Linux */
1866 pbd->global_data =
1867 (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
1868 ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT));
1869
1870 pbd->ip_hlen_w = (skb_transport_header(skb) -
1871 skb_network_header(skb)) / 2;
1872
1873 hlen += pbd->ip_hlen_w + tcp_hdrlen(skb) / 2;
1874
1875 pbd->total_hlen_w = cpu_to_le16(hlen);
1876 hlen = hlen*2;
1877
1878 if (xmit_type & XMIT_CSUM_TCP) {
1879 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
1880
1881 } else {
1882 s8 fix = SKB_CS_OFF(skb); /* signed! */
1883
1884 DP(NETIF_MSG_TX_QUEUED,
1885 "hlen %d fix %d csum before fix %x\n",
1886 le16_to_cpu(pbd->total_hlen_w), fix, SKB_CS(skb));
1887
1888 /* HW bug: fixup the CSUM */
1889 pbd->tcp_pseudo_csum =
1890 bnx2x_csum_fix(skb_transport_header(skb),
1891 SKB_CS(skb), fix);
1892
1893 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
1894 pbd->tcp_pseudo_csum);
1895 }
1896
1897 return hlen;
1898}
1899
1834/* called with netif_tx_lock 1900/* called with netif_tx_lock
1835 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call 1901 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
1836 * netif_wake_queue() 1902 * netif_wake_queue()
@@ -1843,7 +1909,8 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
1843 struct sw_tx_bd *tx_buf; 1909 struct sw_tx_bd *tx_buf;
1844 struct eth_tx_start_bd *tx_start_bd; 1910 struct eth_tx_start_bd *tx_start_bd;
1845 struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL; 1911 struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
1846 struct eth_tx_parse_bd *pbd = NULL; 1912 struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
1913 struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
1847 u16 pkt_prod, bd_prod; 1914 u16 pkt_prod, bd_prod;
1848 int nbd, fp_index; 1915 int nbd, fp_index;
1849 dma_addr_t mapping; 1916 dma_addr_t mapping;
@@ -1871,9 +1938,9 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
1871 return NETDEV_TX_BUSY; 1938 return NETDEV_TX_BUSY;
1872 } 1939 }
1873 1940
1874 DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x protocol %x protocol(%x,%x)" 1941 DP(NETIF_MSG_TX_QUEUED, "queue[%d]: SKB: summed %x protocol %x "
1875 " gso type %x xmit_type %x\n", 1942 "protocol(%x,%x) gso type %x xmit_type %x\n",
1876 skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr, 1943 fp_index, skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
1877 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type); 1944 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
1878 1945
1879 eth = (struct ethhdr *)skb->data; 1946 eth = (struct ethhdr *)skb->data;
@@ -1919,10 +1986,11 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
1919 tx_start_bd = &fp->tx_desc_ring[bd_prod].start_bd; 1986 tx_start_bd = &fp->tx_desc_ring[bd_prod].start_bd;
1920 1987
1921 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD; 1988 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
1922 tx_start_bd->general_data = (mac_type << 1989 SET_FLAG(tx_start_bd->general_data, ETH_TX_START_BD_ETH_ADDR_TYPE,
1923 ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT); 1990 mac_type);
1991
1924 /* header nbd */ 1992 /* header nbd */
1925 tx_start_bd->general_data |= (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT); 1993 SET_FLAG(tx_start_bd->general_data, ETH_TX_START_BD_HDR_NBDS, 1);
1926 1994
1927 /* remember the first BD of the packet */ 1995 /* remember the first BD of the packet */
1928 tx_buf->first_bd = fp->tx_bd_prod; 1996 tx_buf->first_bd = fp->tx_bd_prod;
@@ -1933,37 +2001,18 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
1933 "sending pkt %u @%p next_idx %u bd %u @%p\n", 2001 "sending pkt %u @%p next_idx %u bd %u @%p\n",
1934 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_start_bd); 2002 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_start_bd);
1935 2003
1936#ifdef BCM_VLAN 2004 if (vlan_tx_tag_present(skb)) {
1937 if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb) && 2005 tx_start_bd->vlan_or_ethertype =
1938 (bp->flags & HW_VLAN_TX_FLAG)) { 2006 cpu_to_le16(vlan_tx_tag_get(skb));
1939 tx_start_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb)); 2007 tx_start_bd->bd_flags.as_bitfield |=
1940 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG; 2008 (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
1941 } else 2009 } else
1942#endif 2010 tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod);
1943 tx_start_bd->vlan = cpu_to_le16(pkt_prod);
1944 2011
1945 /* turn on parsing and get a BD */ 2012 /* turn on parsing and get a BD */
1946 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod)); 2013 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
1947 pbd = &fp->tx_desc_ring[bd_prod].parse_bd;
1948
1949 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
1950 2014
1951 if (xmit_type & XMIT_CSUM) { 2015 if (xmit_type & XMIT_CSUM) {
1952 hlen = (skb_network_header(skb) - skb->data) / 2;
1953
1954 /* for now NS flag is not used in Linux */
1955 pbd->global_data =
1956 (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
1957 ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
1958
1959 pbd->ip_hlen = (skb_transport_header(skb) -
1960 skb_network_header(skb)) / 2;
1961
1962 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
1963
1964 pbd->total_hlen = cpu_to_le16(hlen);
1965 hlen = hlen*2;
1966
1967 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM; 2016 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
1968 2017
1969 if (xmit_type & XMIT_CSUM_V4) 2018 if (xmit_type & XMIT_CSUM_V4)
@@ -1973,31 +2022,32 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
1973 tx_start_bd->bd_flags.as_bitfield |= 2022 tx_start_bd->bd_flags.as_bitfield |=
1974 ETH_TX_BD_FLAGS_IPV6; 2023 ETH_TX_BD_FLAGS_IPV6;
1975 2024
1976 if (xmit_type & XMIT_CSUM_TCP) { 2025 if (!(xmit_type & XMIT_CSUM_TCP))
1977 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check); 2026 tx_start_bd->bd_flags.as_bitfield |=
1978 2027 ETH_TX_BD_FLAGS_IS_UDP;
1979 } else { 2028 }
1980 s8 fix = SKB_CS_OFF(skb); /* signed! */
1981
1982 pbd->global_data |= ETH_TX_PARSE_BD_UDP_CS_FLG;
1983
1984 DP(NETIF_MSG_TX_QUEUED,
1985 "hlen %d fix %d csum before fix %x\n",
1986 le16_to_cpu(pbd->total_hlen), fix, SKB_CS(skb));
1987 2029
1988 /* HW bug: fixup the CSUM */ 2030 if (CHIP_IS_E2(bp)) {
1989 pbd->tcp_pseudo_csum = 2031 pbd_e2 = &fp->tx_desc_ring[bd_prod].parse_bd_e2;
1990 bnx2x_csum_fix(skb_transport_header(skb), 2032 memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
1991 SKB_CS(skb), fix); 2033 /* Set PBD in checksum offload case */
2034 if (xmit_type & XMIT_CSUM)
2035 hlen = bnx2x_set_pbd_csum_e2(bp,
2036 skb, pbd_e2, xmit_type);
2037 } else {
2038 pbd_e1x = &fp->tx_desc_ring[bd_prod].parse_bd_e1x;
2039 memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
2040 /* Set PBD in checksum offload case */
2041 if (xmit_type & XMIT_CSUM)
2042 hlen = bnx2x_set_pbd_csum(bp, skb, pbd_e1x, xmit_type);
1992 2043
1993 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
1994 pbd->tcp_pseudo_csum);
1995 }
1996 } 2044 }
1997 2045
2046 /* Map skb linear data for DMA */
1998 mapping = dma_map_single(&bp->pdev->dev, skb->data, 2047 mapping = dma_map_single(&bp->pdev->dev, skb->data,
1999 skb_headlen(skb), DMA_TO_DEVICE); 2048 skb_headlen(skb), DMA_TO_DEVICE);
2000 2049
2050 /* Setup the data pointer of the first BD of the packet */
2001 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping)); 2051 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
2002 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping)); 2052 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
2003 nbd = skb_shinfo(skb)->nr_frags + 2; /* start_bd + pbd + frags */ 2053 nbd = skb_shinfo(skb)->nr_frags + 2; /* start_bd + pbd + frags */
@@ -2009,7 +2059,8 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
2009 " nbytes %d flags %x vlan %x\n", 2059 " nbytes %d flags %x vlan %x\n",
2010 tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo, 2060 tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
2011 le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes), 2061 le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes),
2012 tx_start_bd->bd_flags.as_bitfield, le16_to_cpu(tx_start_bd->vlan)); 2062 tx_start_bd->bd_flags.as_bitfield,
2063 le16_to_cpu(tx_start_bd->vlan_or_ethertype));
2013 2064
2014 if (xmit_type & XMIT_GSO) { 2065 if (xmit_type & XMIT_GSO) {
2015 2066
@@ -2023,28 +2074,14 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
2023 if (unlikely(skb_headlen(skb) > hlen)) 2074 if (unlikely(skb_headlen(skb) > hlen))
2024 bd_prod = bnx2x_tx_split(bp, fp, tx_buf, &tx_start_bd, 2075 bd_prod = bnx2x_tx_split(bp, fp, tx_buf, &tx_start_bd,
2025 hlen, bd_prod, ++nbd); 2076 hlen, bd_prod, ++nbd);
2026 2077 if (CHIP_IS_E2(bp))
2027 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size); 2078 bnx2x_set_pbd_gso_e2(skb, pbd_e2, xmit_type);
2028 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq); 2079 else
2029 pbd->tcp_flags = pbd_tcp_flags(skb); 2080 bnx2x_set_pbd_gso(skb, pbd_e1x, xmit_type);
2030
2031 if (xmit_type & XMIT_GSO_V4) {
2032 pbd->ip_id = swab16(ip_hdr(skb)->id);
2033 pbd->tcp_pseudo_csum =
2034 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
2035 ip_hdr(skb)->daddr,
2036 0, IPPROTO_TCP, 0));
2037
2038 } else
2039 pbd->tcp_pseudo_csum =
2040 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2041 &ipv6_hdr(skb)->daddr,
2042 0, IPPROTO_TCP, 0));
2043
2044 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
2045 } 2081 }
2046 tx_data_bd = (struct eth_tx_bd *)tx_start_bd; 2082 tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
2047 2083
2084 /* Handle fragmented skb */
2048 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 2085 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2049 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 2086 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2050 2087
@@ -2081,14 +2118,21 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
2081 if (total_pkt_bd != NULL) 2118 if (total_pkt_bd != NULL)
2082 total_pkt_bd->total_pkt_bytes = pkt_size; 2119 total_pkt_bd->total_pkt_bytes = pkt_size;
2083 2120
2084 if (pbd) 2121 if (pbd_e1x)
2085 DP(NETIF_MSG_TX_QUEUED, 2122 DP(NETIF_MSG_TX_QUEUED,
2086 "PBD @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u" 2123 "PBD (E1X) @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
2087 " tcp_flags %x xsum %x seq %u hlen %u\n", 2124 " tcp_flags %x xsum %x seq %u hlen %u\n",
2088 pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id, 2125 pbd_e1x, pbd_e1x->global_data, pbd_e1x->ip_hlen_w,
2089 pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum, 2126 pbd_e1x->ip_id, pbd_e1x->lso_mss, pbd_e1x->tcp_flags,
2090 pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen)); 2127 pbd_e1x->tcp_pseudo_csum, pbd_e1x->tcp_send_seq,
2091 2128 le16_to_cpu(pbd_e1x->total_hlen_w));
2129 if (pbd_e2)
2130 DP(NETIF_MSG_TX_QUEUED,
2131 "PBD (E2) @%p dst %x %x %x src %x %x %x parsing_data %x\n",
2132 pbd_e2, pbd_e2->dst_mac_addr_hi, pbd_e2->dst_mac_addr_mid,
2133 pbd_e2->dst_mac_addr_lo, pbd_e2->src_mac_addr_hi,
2134 pbd_e2->src_mac_addr_mid, pbd_e2->src_mac_addr_lo,
2135 pbd_e2->parsing_data);
2092 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod); 2136 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
2093 2137
2094 /* 2138 /*
@@ -2102,7 +2146,8 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
2102 2146
2103 fp->tx_db.data.prod += nbd; 2147 fp->tx_db.data.prod += nbd;
2104 barrier(); 2148 barrier();
2105 DOORBELL(bp, fp->index, fp->tx_db.raw); 2149
2150 DOORBELL(bp, fp->cid, fp->tx_db.raw);
2106 2151
2107 mmiowb(); 2152 mmiowb();
2108 2153
@@ -2124,6 +2169,7 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
2124 2169
2125 return NETDEV_TX_OK; 2170 return NETDEV_TX_OK;
2126} 2171}
2172
2127/* called with rtnl_lock */ 2173/* called with rtnl_lock */
2128int bnx2x_change_mac_addr(struct net_device *dev, void *p) 2174int bnx2x_change_mac_addr(struct net_device *dev, void *p)
2129{ 2175{
@@ -2134,16 +2180,76 @@ int bnx2x_change_mac_addr(struct net_device *dev, void *p)
2134 return -EINVAL; 2180 return -EINVAL;
2135 2181
2136 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); 2182 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
2137 if (netif_running(dev)) { 2183 if (netif_running(dev))
2138 if (CHIP_IS_E1(bp)) 2184 bnx2x_set_eth_mac(bp, 1);
2139 bnx2x_set_eth_mac_addr_e1(bp, 1); 2185
2140 else 2186 return 0;
2141 bnx2x_set_eth_mac_addr_e1h(bp, 1); 2187}
2188
2189
2190int bnx2x_setup_irqs(struct bnx2x *bp)
2191{
2192 int rc = 0;
2193 if (bp->flags & USING_MSIX_FLAG) {
2194 rc = bnx2x_req_msix_irqs(bp);
2195 if (rc)
2196 return rc;
2197 } else {
2198 bnx2x_ack_int(bp);
2199 rc = bnx2x_req_irq(bp);
2200 if (rc) {
2201 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
2202 return rc;
2203 }
2204 if (bp->flags & USING_MSI_FLAG) {
2205 bp->dev->irq = bp->pdev->irq;
2206 netdev_info(bp->dev, "using MSI IRQ %d\n",
2207 bp->pdev->irq);
2208 }
2142 } 2209 }
2143 2210
2144 return 0; 2211 return 0;
2145} 2212}
2146 2213
2214void bnx2x_free_mem_bp(struct bnx2x *bp)
2215{
2216 kfree(bp->fp);
2217 kfree(bp->msix_table);
2218 kfree(bp->ilt);
2219}
2220
2221int __devinit bnx2x_alloc_mem_bp(struct bnx2x *bp)
2222{
2223 struct bnx2x_fastpath *fp;
2224 struct msix_entry *tbl;
2225 struct bnx2x_ilt *ilt;
2226
2227 /* fp array */
2228 fp = kzalloc(L2_FP_COUNT(bp->l2_cid_count)*sizeof(*fp), GFP_KERNEL);
2229 if (!fp)
2230 goto alloc_err;
2231 bp->fp = fp;
2232
2233 /* msix table */
2234 tbl = kzalloc((bp->l2_cid_count + 1) * sizeof(*tbl),
2235 GFP_KERNEL);
2236 if (!tbl)
2237 goto alloc_err;
2238 bp->msix_table = tbl;
2239
2240 /* ilt */
2241 ilt = kzalloc(sizeof(*ilt), GFP_KERNEL);
2242 if (!ilt)
2243 goto alloc_err;
2244 bp->ilt = ilt;
2245
2246 return 0;
2247alloc_err:
2248 bnx2x_free_mem_bp(bp);
2249 return -ENOMEM;
2250
2251}
2252
2147/* called with rtnl_lock */ 2253/* called with rtnl_lock */
2148int bnx2x_change_mtu(struct net_device *dev, int new_mtu) 2254int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
2149{ 2255{
@@ -2185,29 +2291,6 @@ void bnx2x_tx_timeout(struct net_device *dev)
2185 schedule_delayed_work(&bp->reset_task, 0); 2291 schedule_delayed_work(&bp->reset_task, 0);
2186} 2292}
2187 2293
2188#ifdef BCM_VLAN
2189/* called with rtnl_lock */
2190void bnx2x_vlan_rx_register(struct net_device *dev,
2191 struct vlan_group *vlgrp)
2192{
2193 struct bnx2x *bp = netdev_priv(dev);
2194
2195 bp->vlgrp = vlgrp;
2196
2197 /* Set flags according to the required capabilities */
2198 bp->flags &= ~(HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
2199
2200 if (dev->features & NETIF_F_HW_VLAN_TX)
2201 bp->flags |= HW_VLAN_TX_FLAG;
2202
2203 if (dev->features & NETIF_F_HW_VLAN_RX)
2204 bp->flags |= HW_VLAN_RX_FLAG;
2205
2206 if (netif_running(dev))
2207 bnx2x_set_client_config(bp);
2208}
2209
2210#endif
2211int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state) 2294int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
2212{ 2295{
2213 struct net_device *dev = pci_get_drvdata(pdev); 2296 struct net_device *dev = pci_get_drvdata(pdev);
@@ -2268,6 +2351,8 @@ int bnx2x_resume(struct pci_dev *pdev)
2268 bnx2x_set_power_state(bp, PCI_D0); 2351 bnx2x_set_power_state(bp, PCI_D0);
2269 netif_device_attach(dev); 2352 netif_device_attach(dev);
2270 2353
2354 /* Since the chip was reset, clear the FW sequence number */
2355 bp->fw_seq = 0;
2271 rc = bnx2x_nic_load(bp, LOAD_OPEN); 2356 rc = bnx2x_nic_load(bp, LOAD_OPEN);
2272 2357
2273 rtnl_unlock(); 2358 rtnl_unlock();
diff --git a/drivers/net/bnx2x/bnx2x_cmn.h b/drivers/net/bnx2x/bnx2x_cmn.h
index d1e6a8c977d1..5bfe0ab1d2d4 100644
--- a/drivers/net/bnx2x/bnx2x_cmn.h
+++ b/drivers/net/bnx2x/bnx2x_cmn.h
@@ -23,6 +23,7 @@
23 23
24#include "bnx2x.h" 24#include "bnx2x.h"
25 25
26extern int num_queues;
26 27
27/*********************** Interfaces **************************** 28/*********************** Interfaces ****************************
28 * Functions that need to be implemented by each driver version 29 * Functions that need to be implemented by each driver version
@@ -63,6 +64,15 @@ u8 bnx2x_link_test(struct bnx2x *bp, u8 is_serdes);
63void bnx2x__link_status_update(struct bnx2x *bp); 64void bnx2x__link_status_update(struct bnx2x *bp);
64 65
65/** 66/**
67 * Report link status to upper layer
68 *
69 * @param bp
70 *
71 * @return int
72 */
73void bnx2x_link_report(struct bnx2x *bp);
74
75/**
66 * MSI-X slowpath interrupt handler 76 * MSI-X slowpath interrupt handler
67 * 77 *
68 * @param irq 78 * @param irq
@@ -107,6 +117,13 @@ void bnx2x_setup_cnic_irq_info(struct bnx2x *bp);
107void bnx2x_int_enable(struct bnx2x *bp); 117void bnx2x_int_enable(struct bnx2x *bp);
108 118
109/** 119/**
120 * Disable HW interrupts.
121 *
122 * @param bp
123 */
124void bnx2x_int_disable(struct bnx2x *bp);
125
126/**
110 * Disable interrupts. This function ensures that there are no 127 * Disable interrupts. This function ensures that there are no
111 * ISRs or SP DPCs (sp_task) are running after it returns. 128 * ISRs or SP DPCs (sp_task) are running after it returns.
112 * 129 *
@@ -163,32 +180,35 @@ int bnx2x_alloc_mem(struct bnx2x *bp);
163void bnx2x_free_mem(struct bnx2x *bp); 180void bnx2x_free_mem(struct bnx2x *bp);
164 181
165/** 182/**
166 * Bring up a leading (the first) eth Client. 183 * Setup eth Client.
167 * 184 *
168 * @param bp 185 * @param bp
186 * @param fp
187 * @param is_leading
169 * 188 *
170 * @return int 189 * @return int
171 */ 190 */
172int bnx2x_setup_leading(struct bnx2x *bp); 191int bnx2x_setup_client(struct bnx2x *bp, struct bnx2x_fastpath *fp,
192 int is_leading);
173 193
174/** 194/**
175 * Setup non-leading eth Client. 195 * Bring down an eth client.
176 * 196 *
177 * @param bp 197 * @param bp
178 * @param fp 198 * @param p
179 * 199 *
180 * @return int 200 * @return int
181 */ 201 */
182int bnx2x_setup_multi(struct bnx2x *bp, int index); 202int bnx2x_stop_fw_client(struct bnx2x *bp,
203 struct bnx2x_client_ramrod_params *p);
183 204
184/** 205/**
185 * Set number of quueus according to mode and number of available 206 * Set number of queues according to mode
186 * msi-x vectors
187 * 207 *
188 * @param bp 208 * @param bp
189 * 209 *
190 */ 210 */
191void bnx2x_set_num_queues_msix(struct bnx2x *bp); 211void bnx2x_set_num_queues(struct bnx2x *bp);
192 212
193/** 213/**
194 * Cleanup chip internals: 214 * Cleanup chip internals:
@@ -223,21 +243,12 @@ int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource);
223 243
224/** 244/**
225 * Configure eth MAC address in the HW according to the value in 245 * Configure eth MAC address in the HW according to the value in
226 * netdev->dev_addr for 57711 246 * netdev->dev_addr.
227 *
228 * @param bp driver handle
229 * @param set
230 */
231void bnx2x_set_eth_mac_addr_e1h(struct bnx2x *bp, int set);
232
233/**
234 * Configure eth MAC address in the HW according to the value in
235 * netdev->dev_addr for 57710
236 * 247 *
237 * @param bp driver handle 248 * @param bp driver handle
238 * @param set 249 * @param set
239 */ 250 */
240void bnx2x_set_eth_mac_addr_e1(struct bnx2x *bp, int set); 251void bnx2x_set_eth_mac(struct bnx2x *bp, int set);
241 252
242#ifdef BCM_CNIC 253#ifdef BCM_CNIC
243/** 254/**
@@ -257,18 +268,22 @@ int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set);
257 * Initialize status block in FW and HW 268 * Initialize status block in FW and HW
258 * 269 *
259 * @param bp driver handle 270 * @param bp driver handle
260 * @param sb host_status_block
261 * @param dma_addr_t mapping 271 * @param dma_addr_t mapping
262 * @param int sb_id 272 * @param int sb_id
273 * @param int vfid
274 * @param u8 vf_valid
275 * @param int fw_sb_id
276 * @param int igu_sb_id
263 */ 277 */
264void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb, 278void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid,
265 dma_addr_t mapping, int sb_id); 279 u8 vf_valid, int fw_sb_id, int igu_sb_id);
266 280
267/** 281/**
268 * Reconfigure FW/HW according to dev->flags rx mode 282 * Set MAC filtering configurations.
269 * 283 *
270 * @param dev net_device 284 * @remarks called with netif_tx_lock from dev_mcast.c
271 * 285 *
286 * @param dev net_device
272 */ 287 */
273void bnx2x_set_rx_mode(struct net_device *dev); 288void bnx2x_set_rx_mode(struct net_device *dev);
274 289
@@ -290,34 +305,162 @@ void bnx2x_disable_close_the_gate(struct bnx2x *bp);
290 * Perform statistics handling according to event 305 * Perform statistics handling according to event
291 * 306 *
292 * @param bp driver handle 307 * @param bp driver handle
293 * @param even tbnx2x_stats_event 308 * @param event bnx2x_stats_event
294 */ 309 */
295void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event); 310void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
296 311
297/** 312/**
298 * Configures FW with client paramteres (like HW VLAN removal) 313 * Handle ramrods completion
299 * for each active client. 314 *
315 * @param fp fastpath handle for the event
316 * @param rr_cqe eth_rx_cqe
317 */
318void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe);
319
320/**
321 * Init/halt function before/after sending
322 * CLIENT_SETUP/CFC_DEL for the first/last client.
300 * 323 *
301 * @param bp 324 * @param bp
325 *
326 * @return int
302 */ 327 */
303void bnx2x_set_client_config(struct bnx2x *bp); 328int bnx2x_func_start(struct bnx2x *bp);
329int bnx2x_func_stop(struct bnx2x *bp);
304 330
305/** 331/**
306 * Handle sp events 332 * Prepare ILT configurations according to current driver
333 * parameters.
307 * 334 *
308 * @param fp fastpath handle for the event 335 * @param bp
309 * @param rr_cqe eth_rx_cqe
310 */ 336 */
311void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe); 337void bnx2x_ilt_set_info(struct bnx2x *bp);
312 338
339/**
340 * Set power state to the requested value. Currently only D0 and
341 * D3hot are supported.
342 *
343 * @param bp
344 * @param state D0 or D3hot
345 *
346 * @return int
347 */
348int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state);
349
350/* dev_close main block */
351int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode);
352
353/* dev_open main block */
354int bnx2x_nic_load(struct bnx2x *bp, int load_mode);
355
356/* hard_xmit callback */
357netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev);
358
359int bnx2x_change_mac_addr(struct net_device *dev, void *p);
360
361/* NAPI poll Rx part */
362int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget);
363
364/* NAPI poll Tx part */
365int bnx2x_tx_int(struct bnx2x_fastpath *fp);
366
367/* suspend/resume callbacks */
368int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state);
369int bnx2x_resume(struct pci_dev *pdev);
370
371/* Release IRQ vectors */
372void bnx2x_free_irq(struct bnx2x *bp);
373
374void bnx2x_init_rx_rings(struct bnx2x *bp);
375void bnx2x_free_skbs(struct bnx2x *bp);
376void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw);
377void bnx2x_netif_start(struct bnx2x *bp);
378
379/**
380 * Fill msix_table, request vectors, update num_queues according
381 * to number of available vectors
382 *
383 * @param bp
384 *
385 * @return int
386 */
387int bnx2x_enable_msix(struct bnx2x *bp);
388
389/**
390 * Request msi mode from OS, updated internals accordingly
391 *
392 * @param bp
393 *
394 * @return int
395 */
396int bnx2x_enable_msi(struct bnx2x *bp);
397
398/**
399 * Request IRQ vectors from OS.
400 *
401 * @param bp
402 *
403 * @return int
404 */
405int bnx2x_setup_irqs(struct bnx2x *bp);
406/**
407 * NAPI callback
408 *
409 * @param napi
410 * @param budget
411 *
412 * @return int
413 */
414int bnx2x_poll(struct napi_struct *napi, int budget);
415
416/**
417 * Allocate/release memories outsize main driver structure
418 *
419 * @param bp
420 *
421 * @return int
422 */
423int __devinit bnx2x_alloc_mem_bp(struct bnx2x *bp);
424void bnx2x_free_mem_bp(struct bnx2x *bp);
425
426/**
427 * Change mtu netdev callback
428 *
429 * @param dev
430 * @param new_mtu
431 *
432 * @return int
433 */
434int bnx2x_change_mtu(struct net_device *dev, int new_mtu);
435
436/**
437 * tx timeout netdev callback
438 *
439 * @param dev
440 * @param new_mtu
441 *
442 * @return int
443 */
444void bnx2x_tx_timeout(struct net_device *dev);
445
446#ifdef BCM_VLAN
447/**
448 * vlan rx register netdev callback
449 *
450 * @param dev
451 * @param new_mtu
452 *
453 * @return int
454 */
455void bnx2x_vlan_rx_register(struct net_device *dev,
456 struct vlan_group *vlgrp);
457
458#endif
313 459
314static inline void bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp) 460static inline void bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
315{ 461{
316 struct host_status_block *fpsb = fp->status_blk;
317
318 barrier(); /* status block is written to by the chip */ 462 barrier(); /* status block is written to by the chip */
319 fp->fp_c_idx = fpsb->c_status_block.status_block_index; 463 fp->fp_hc_idx = fp->sb_running_index[SM_RX_ID];
320 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
321} 464}
322 465
323static inline void bnx2x_update_rx_prod(struct bnx2x *bp, 466static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
@@ -344,8 +487,8 @@ static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
344 wmb(); 487 wmb();
345 488
346 for (i = 0; i < sizeof(struct ustorm_eth_rx_producers)/4; i++) 489 for (i = 0; i < sizeof(struct ustorm_eth_rx_producers)/4; i++)
347 REG_WR(bp, BAR_USTRORM_INTMEM + 490 REG_WR(bp,
348 USTORM_RX_PRODS_OFFSET(BP_PORT(bp), fp->cl_id) + i*4, 491 BAR_USTRORM_INTMEM + fp->ustorm_rx_prods_offset + i*4,
349 ((u32 *)&rx_prods)[i]); 492 ((u32 *)&rx_prods)[i]);
350 493
351 mmiowb(); /* keep prod updates ordered */ 494 mmiowb(); /* keep prod updates ordered */
@@ -355,10 +498,77 @@ static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
355 fp->index, bd_prod, rx_comp_prod, rx_sge_prod); 498 fp->index, bd_prod, rx_comp_prod, rx_sge_prod);
356} 499}
357 500
501static inline void bnx2x_igu_ack_sb_gen(struct bnx2x *bp, u8 igu_sb_id,
502 u8 segment, u16 index, u8 op,
503 u8 update, u32 igu_addr)
504{
505 struct igu_regular cmd_data = {0};
506
507 cmd_data.sb_id_and_flags =
508 ((index << IGU_REGULAR_SB_INDEX_SHIFT) |
509 (segment << IGU_REGULAR_SEGMENT_ACCESS_SHIFT) |
510 (update << IGU_REGULAR_BUPDATE_SHIFT) |
511 (op << IGU_REGULAR_ENABLE_INT_SHIFT));
358 512
513 DP(NETIF_MSG_HW, "write 0x%08x to IGU addr 0x%x\n",
514 cmd_data.sb_id_and_flags, igu_addr);
515 REG_WR(bp, igu_addr, cmd_data.sb_id_and_flags);
516
517 /* Make sure that ACK is written */
518 mmiowb();
519 barrier();
520}
359 521
360static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id, 522static inline void bnx2x_igu_clear_sb_gen(struct bnx2x *bp,
361 u8 storm, u16 index, u8 op, u8 update) 523 u8 idu_sb_id, bool is_Pf)
524{
525 u32 data, ctl, cnt = 100;
526 u32 igu_addr_data = IGU_REG_COMMAND_REG_32LSB_DATA;
527 u32 igu_addr_ctl = IGU_REG_COMMAND_REG_CTRL;
528 u32 igu_addr_ack = IGU_REG_CSTORM_TYPE_0_SB_CLEANUP + (idu_sb_id/32)*4;
529 u32 sb_bit = 1 << (idu_sb_id%32);
530 u32 func_encode = BP_FUNC(bp) |
531 ((is_Pf == true ? 1 : 0) << IGU_FID_ENCODE_IS_PF_SHIFT);
532 u32 addr_encode = IGU_CMD_E2_PROD_UPD_BASE + idu_sb_id;
533
534 /* Not supported in BC mode */
535 if (CHIP_INT_MODE_IS_BC(bp))
536 return;
537
538 data = (IGU_USE_REGISTER_cstorm_type_0_sb_cleanup
539 << IGU_REGULAR_CLEANUP_TYPE_SHIFT) |
540 IGU_REGULAR_CLEANUP_SET |
541 IGU_REGULAR_BCLEANUP;
542
543 ctl = addr_encode << IGU_CTRL_REG_ADDRESS_SHIFT |
544 func_encode << IGU_CTRL_REG_FID_SHIFT |
545 IGU_CTRL_CMD_TYPE_WR << IGU_CTRL_REG_TYPE_SHIFT;
546
547 DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
548 data, igu_addr_data);
549 REG_WR(bp, igu_addr_data, data);
550 mmiowb();
551 barrier();
552 DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
553 ctl, igu_addr_ctl);
554 REG_WR(bp, igu_addr_ctl, ctl);
555 mmiowb();
556 barrier();
557
558 /* wait for clean up to finish */
559 while (!(REG_RD(bp, igu_addr_ack) & sb_bit) && --cnt)
560 msleep(20);
561
562
563 if (!(REG_RD(bp, igu_addr_ack) & sb_bit)) {
564 DP(NETIF_MSG_HW, "Unable to finish IGU cleanup: "
565 "idu_sb_id %d offset %d bit %d (cnt %d)\n",
566 idu_sb_id, idu_sb_id/32, idu_sb_id%32, cnt);
567 }
568}
569
570static inline void bnx2x_hc_ack_sb(struct bnx2x *bp, u8 sb_id,
571 u8 storm, u16 index, u8 op, u8 update)
362{ 572{
363 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 + 573 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
364 COMMAND_REG_INT_ACK); 574 COMMAND_REG_INT_ACK);
@@ -379,7 +589,37 @@ static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
379 mmiowb(); 589 mmiowb();
380 barrier(); 590 barrier();
381} 591}
382static inline u16 bnx2x_ack_int(struct bnx2x *bp) 592
593static inline void bnx2x_igu_ack_sb(struct bnx2x *bp, u8 igu_sb_id, u8 segment,
594 u16 index, u8 op, u8 update)
595{
596 u32 igu_addr = BAR_IGU_INTMEM + (IGU_CMD_INT_ACK_BASE + igu_sb_id)*8;
597
598 bnx2x_igu_ack_sb_gen(bp, igu_sb_id, segment, index, op, update,
599 igu_addr);
600}
601
602static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 igu_sb_id, u8 storm,
603 u16 index, u8 op, u8 update)
604{
605 if (bp->common.int_block == INT_BLOCK_HC)
606 bnx2x_hc_ack_sb(bp, igu_sb_id, storm, index, op, update);
607 else {
608 u8 segment;
609
610 if (CHIP_INT_MODE_IS_BC(bp))
611 segment = storm;
612 else if (igu_sb_id != bp->igu_dsb_id)
613 segment = IGU_SEG_ACCESS_DEF;
614 else if (storm == ATTENTION_ID)
615 segment = IGU_SEG_ACCESS_ATTN;
616 else
617 segment = IGU_SEG_ACCESS_DEF;
618 bnx2x_igu_ack_sb(bp, igu_sb_id, segment, index, op, update);
619 }
620}
621
622static inline u16 bnx2x_hc_ack_int(struct bnx2x *bp)
383{ 623{
384 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 + 624 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
385 COMMAND_REG_SIMD_MASK); 625 COMMAND_REG_SIMD_MASK);
@@ -388,18 +628,36 @@ static inline u16 bnx2x_ack_int(struct bnx2x *bp)
388 DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n", 628 DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
389 result, hc_addr); 629 result, hc_addr);
390 630
631 barrier();
391 return result; 632 return result;
392} 633}
393 634
394/* 635static inline u16 bnx2x_igu_ack_int(struct bnx2x *bp)
395 * fast path service functions 636{
396 */ 637 u32 igu_addr = (BAR_IGU_INTMEM + IGU_REG_SISR_MDPC_WMASK_LSB_UPPER*8);
638 u32 result = REG_RD(bp, igu_addr);
639
640 DP(NETIF_MSG_HW, "read 0x%08x from IGU addr 0x%x\n",
641 result, igu_addr);
642
643 barrier();
644 return result;
645}
646
647static inline u16 bnx2x_ack_int(struct bnx2x *bp)
648{
649 barrier();
650 if (bp->common.int_block == INT_BLOCK_HC)
651 return bnx2x_hc_ack_int(bp);
652 else
653 return bnx2x_igu_ack_int(bp);
654}
397 655
398static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath *fp) 656static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath *fp)
399{ 657{
400 /* Tell compiler that consumer and producer can change */ 658 /* Tell compiler that consumer and producer can change */
401 barrier(); 659 barrier();
402 return (fp->tx_pkt_prod != fp->tx_pkt_cons); 660 return fp->tx_pkt_prod != fp->tx_pkt_cons;
403} 661}
404 662
405static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp) 663static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
@@ -434,6 +692,29 @@ static inline int bnx2x_has_tx_work(struct bnx2x_fastpath *fp)
434 return hw_cons != fp->tx_pkt_cons; 692 return hw_cons != fp->tx_pkt_cons;
435} 693}
436 694
695static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp)
696{
697 u16 rx_cons_sb;
698
699 /* Tell compiler that status block fields can change */
700 barrier();
701 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
702 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
703 rx_cons_sb++;
704 return (fp->rx_comp_cons != rx_cons_sb);
705}
706
707/**
708 * disables tx from stack point of view
709 *
710 * @param bp
711 */
712static inline void bnx2x_tx_disable(struct bnx2x *bp)
713{
714 netif_tx_disable(bp->dev);
715 netif_carrier_off(bp->dev);
716}
717
437static inline void bnx2x_free_rx_sge(struct bnx2x *bp, 718static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
438 struct bnx2x_fastpath *fp, u16 index) 719 struct bnx2x_fastpath *fp, u16 index)
439{ 720{
@@ -446,7 +727,7 @@ static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
446 return; 727 return;
447 728
448 dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(sw_buf, mapping), 729 dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(sw_buf, mapping),
449 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE); 730 SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
450 __free_pages(page, PAGES_PER_SGE_SHIFT); 731 __free_pages(page, PAGES_PER_SGE_SHIFT);
451 732
452 sw_buf->page = NULL; 733 sw_buf->page = NULL;
@@ -454,13 +735,67 @@ static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
454 sge->addr_lo = 0; 735 sge->addr_lo = 0;
455} 736}
456 737
457static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp, 738static inline void bnx2x_add_all_napi(struct bnx2x *bp)
458 struct bnx2x_fastpath *fp, int last)
459{ 739{
460 int i; 740 int i;
461 741
462 for (i = 0; i < last; i++) 742 /* Add NAPI objects */
463 bnx2x_free_rx_sge(bp, fp, i); 743 for_each_queue(bp, i)
744 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
745 bnx2x_poll, BNX2X_NAPI_WEIGHT);
746}
747
748static inline void bnx2x_del_all_napi(struct bnx2x *bp)
749{
750 int i;
751
752 for_each_queue(bp, i)
753 netif_napi_del(&bnx2x_fp(bp, i, napi));
754}
755
756static inline void bnx2x_disable_msi(struct bnx2x *bp)
757{
758 if (bp->flags & USING_MSIX_FLAG) {
759 pci_disable_msix(bp->pdev);
760 bp->flags &= ~USING_MSIX_FLAG;
761 } else if (bp->flags & USING_MSI_FLAG) {
762 pci_disable_msi(bp->pdev);
763 bp->flags &= ~USING_MSI_FLAG;
764 }
765}
766
767static inline int bnx2x_calc_num_queues(struct bnx2x *bp)
768{
769 return num_queues ?
770 min_t(int, num_queues, BNX2X_MAX_QUEUES(bp)) :
771 min_t(int, num_online_cpus(), BNX2X_MAX_QUEUES(bp));
772}
773
774static inline void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
775{
776 int i, j;
777
778 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
779 int idx = RX_SGE_CNT * i - 1;
780
781 for (j = 0; j < 2; j++) {
782 SGE_MASK_CLEAR_BIT(fp, idx);
783 idx--;
784 }
785 }
786}
787
788static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
789{
790 /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
791 memset(fp->sge_mask, 0xff,
792 (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
793
794 /* Clear the two last indices in the page to 1:
795 these are the indices that correspond to the "next" element,
796 hence will never be indicated and should be removed from
797 the calculations. */
798 bnx2x_clear_sge_mask_next_elems(fp);
464} 799}
465 800
466static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp, 801static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
@@ -489,6 +824,7 @@ static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
489 824
490 return 0; 825 return 0;
491} 826}
827
492static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp, 828static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
493 struct bnx2x_fastpath *fp, u16 index) 829 struct bnx2x_fastpath *fp, u16 index)
494{ 830{
@@ -523,7 +859,7 @@ static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
523 * so there is no need to check for dma_mapping_error(). 859 * so there is no need to check for dma_mapping_error().
524 */ 860 */
525static inline void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp, 861static inline void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
526 struct sk_buff *skb, u16 cons, u16 prod) 862 u16 cons, u16 prod)
527{ 863{
528 struct bnx2x *bp = fp->bp; 864 struct bnx2x *bp = fp->bp;
529 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons]; 865 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
@@ -541,32 +877,15 @@ static inline void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
541 *prod_bd = *cons_bd; 877 *prod_bd = *cons_bd;
542} 878}
543 879
544static inline void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp) 880static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
881 struct bnx2x_fastpath *fp, int last)
545{ 882{
546 int i, j; 883 int i;
547
548 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
549 int idx = RX_SGE_CNT * i - 1;
550 884
551 for (j = 0; j < 2; j++) { 885 for (i = 0; i < last; i++)
552 SGE_MASK_CLEAR_BIT(fp, idx); 886 bnx2x_free_rx_sge(bp, fp, i);
553 idx--;
554 }
555 }
556} 887}
557 888
558static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
559{
560 /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
561 memset(fp->sge_mask, 0xff,
562 (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
563
564 /* Clear the two last indices in the page to 1:
565 these are the indices that correspond to the "next" element,
566 hence will never be indicated and should be removed from
567 the calculations. */
568 bnx2x_clear_sge_mask_next_elems(fp);
569}
570static inline void bnx2x_free_tpa_pool(struct bnx2x *bp, 889static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
571 struct bnx2x_fastpath *fp, int last) 890 struct bnx2x_fastpath *fp, int last)
572{ 891{
@@ -592,7 +911,7 @@ static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
592} 911}
593 912
594 913
595static inline void bnx2x_init_tx_ring(struct bnx2x *bp) 914static inline void bnx2x_init_tx_rings(struct bnx2x *bp)
596{ 915{
597 int i, j; 916 int i, j;
598 917
@@ -611,7 +930,7 @@ static inline void bnx2x_init_tx_ring(struct bnx2x *bp)
611 BCM_PAGE_SIZE*(i % NUM_TX_RINGS))); 930 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
612 } 931 }
613 932
614 fp->tx_db.data.header.header = DOORBELL_HDR_DB_TYPE; 933 SET_FLAG(fp->tx_db.data.header.header, DOORBELL_HDR_DB_TYPE, 1);
615 fp->tx_db.data.zero_fill1 = 0; 934 fp->tx_db.data.zero_fill1 = 0;
616 fp->tx_db.data.prod = 0; 935 fp->tx_db.data.prod = 0;
617 936
@@ -619,44 +938,98 @@ static inline void bnx2x_init_tx_ring(struct bnx2x *bp)
619 fp->tx_pkt_cons = 0; 938 fp->tx_pkt_cons = 0;
620 fp->tx_bd_prod = 0; 939 fp->tx_bd_prod = 0;
621 fp->tx_bd_cons = 0; 940 fp->tx_bd_cons = 0;
622 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
623 fp->tx_pkt = 0; 941 fp->tx_pkt = 0;
624 } 942 }
625} 943}
626static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp) 944
945static inline void bnx2x_set_next_page_rx_bd(struct bnx2x_fastpath *fp)
627{ 946{
628 u16 rx_cons_sb; 947 int i;
629 948
630 /* Tell compiler that status block fields can change */ 949 for (i = 1; i <= NUM_RX_RINGS; i++) {
631 barrier(); 950 struct eth_rx_bd *rx_bd;
632 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb); 951
633 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT) 952 rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
634 rx_cons_sb++; 953 rx_bd->addr_hi =
635 return (fp->rx_comp_cons != rx_cons_sb); 954 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
955 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
956 rx_bd->addr_lo =
957 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
958 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
959 }
960}
961
962static inline void bnx2x_set_next_page_sgl(struct bnx2x_fastpath *fp)
963{
964 int i;
965
966 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
967 struct eth_rx_sge *sge;
968
969 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
970 sge->addr_hi =
971 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
972 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
973
974 sge->addr_lo =
975 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
976 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
977 }
978}
979
980static inline void bnx2x_set_next_page_rx_cq(struct bnx2x_fastpath *fp)
981{
982 int i;
983 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
984 struct eth_rx_cqe_next_page *nextpg;
985
986 nextpg = (struct eth_rx_cqe_next_page *)
987 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
988 nextpg->addr_hi =
989 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
990 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
991 nextpg->addr_lo =
992 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
993 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
994 }
995}
996
997
998
999static inline void __storm_memset_struct(struct bnx2x *bp,
1000 u32 addr, size_t size, u32 *data)
1001{
1002 int i;
1003 for (i = 0; i < size/4; i++)
1004 REG_WR(bp, addr + (i * 4), data[i]);
1005}
1006
1007static inline void storm_memset_mac_filters(struct bnx2x *bp,
1008 struct tstorm_eth_mac_filter_config *mac_filters,
1009 u16 abs_fid)
1010{
1011 size_t size = sizeof(struct tstorm_eth_mac_filter_config);
1012
1013 u32 addr = BAR_TSTRORM_INTMEM +
1014 TSTORM_MAC_FILTER_CONFIG_OFFSET(abs_fid);
1015
1016 __storm_memset_struct(bp, addr, size, (u32 *)mac_filters);
1017}
1018
1019static inline void storm_memset_cmng(struct bnx2x *bp,
1020 struct cmng_struct_per_port *cmng,
1021 u8 port)
1022{
1023 size_t size = sizeof(struct cmng_struct_per_port);
1024
1025 u32 addr = BAR_XSTRORM_INTMEM +
1026 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port);
1027
1028 __storm_memset_struct(bp, addr, size, (u32 *)cmng);
636} 1029}
637 1030
638/* HW Lock for shared dual port PHYs */ 1031/* HW Lock for shared dual port PHYs */
639void bnx2x_acquire_phy_lock(struct bnx2x *bp); 1032void bnx2x_acquire_phy_lock(struct bnx2x *bp);
640void bnx2x_release_phy_lock(struct bnx2x *bp); 1033void bnx2x_release_phy_lock(struct bnx2x *bp);
641 1034
642void bnx2x_link_report(struct bnx2x *bp);
643int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget);
644int bnx2x_tx_int(struct bnx2x_fastpath *fp);
645void bnx2x_init_rx_rings(struct bnx2x *bp);
646netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev);
647
648int bnx2x_change_mac_addr(struct net_device *dev, void *p);
649void bnx2x_tx_timeout(struct net_device *dev);
650void bnx2x_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp);
651void bnx2x_netif_start(struct bnx2x *bp);
652void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw);
653void bnx2x_free_irq(struct bnx2x *bp, bool disable_only);
654int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state);
655int bnx2x_resume(struct pci_dev *pdev);
656void bnx2x_free_skbs(struct bnx2x *bp);
657int bnx2x_change_mtu(struct net_device *dev, int new_mtu);
658int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode);
659int bnx2x_nic_load(struct bnx2x *bp, int load_mode);
660int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state);
661
662#endif /* BNX2X_CMN_H */ 1035#endif /* BNX2X_CMN_H */
diff --git a/drivers/net/bnx2x/bnx2x_dump.h b/drivers/net/bnx2x/bnx2x_dump.h
index 3bb9a91bb3f7..dc18c25ca9e5 100644
--- a/drivers/net/bnx2x/bnx2x_dump.h
+++ b/drivers/net/bnx2x/bnx2x_dump.h
@@ -31,14 +31,24 @@ struct dump_sign {
31 31
32#define RI_E1 0x1 32#define RI_E1 0x1
33#define RI_E1H 0x2 33#define RI_E1H 0x2
34#define RI_E2 0x4
34#define RI_ONLINE 0x100 35#define RI_ONLINE 0x100
35 36#define RI_PATH0_DUMP 0x200
37#define RI_PATH1_DUMP 0x400
36#define RI_E1_OFFLINE (RI_E1) 38#define RI_E1_OFFLINE (RI_E1)
37#define RI_E1_ONLINE (RI_E1 | RI_ONLINE) 39#define RI_E1_ONLINE (RI_E1 | RI_ONLINE)
38#define RI_E1H_OFFLINE (RI_E1H) 40#define RI_E1H_OFFLINE (RI_E1H)
39#define RI_E1H_ONLINE (RI_E1H | RI_ONLINE) 41#define RI_E1H_ONLINE (RI_E1H | RI_ONLINE)
40#define RI_ALL_OFFLINE (RI_E1 | RI_E1H) 42#define RI_E2_OFFLINE (RI_E2)
41#define RI_ALL_ONLINE (RI_E1 | RI_E1H | RI_ONLINE) 43#define RI_E2_ONLINE (RI_E2 | RI_ONLINE)
44#define RI_E1E1H_OFFLINE (RI_E1 | RI_E1H)
45#define RI_E1E1H_ONLINE (RI_E1 | RI_E1H | RI_ONLINE)
46#define RI_E1HE2_OFFLINE (RI_E2 | RI_E1H)
47#define RI_E1HE2_ONLINE (RI_E2 | RI_E1H | RI_ONLINE)
48#define RI_E1E2_OFFLINE (RI_E2 | RI_E1)
49#define RI_E1E2_ONLINE (RI_E2 | RI_E1 | RI_ONLINE)
50#define RI_ALL_OFFLINE (RI_E1 | RI_E1H | RI_E2)
51#define RI_ALL_ONLINE (RI_E1 | RI_E1H | RI_E2 | RI_ONLINE)
42 52
43#define MAX_TIMER_PENDING 200 53#define MAX_TIMER_PENDING 200
44#define TIMER_SCAN_DONT_CARE 0xFF 54#define TIMER_SCAN_DONT_CARE 0xFF
@@ -513,6 +523,12 @@ static const struct wreg_addr wreg_addrs_e1h[WREGS_COUNT_E1H] = {
513 { 0x1b0c00, 256, 2, read_reg_e1h_0, RI_E1H_OFFLINE } 523 { 0x1b0c00, 256, 2, read_reg_e1h_0, RI_E1H_OFFLINE }
514}; 524};
515 525
526#define WREGS_COUNT_E2 1
527static const u32 read_reg_e2_0[] = { 0x1b1040, 0x1b1000 };
528
529static const struct wreg_addr wreg_addrs_e2[WREGS_COUNT_E2] = {
530 { 0x1b0c00, 128, 2, read_reg_e2_0, RI_E2_OFFLINE }
531};
516 532
517static const struct dump_sign dump_sign_all = { 0x49aa93ee, 0x40835, 0x22 }; 533static const struct dump_sign dump_sign_all = { 0x49aa93ee, 0x40835, 0x22 };
518 534
@@ -531,4 +547,17 @@ static const u32 timer_scan_regs_e1h[TIMER_REGS_COUNT_E1H] =
531 { 0x1640d0, 0x1640d4 }; 547 { 0x1640d0, 0x1640d4 };
532 548
533 549
550#define PAGE_MODE_VALUES_E2 2
551
552#define PAGE_READ_REGS_E2 1
553
554#define PAGE_WRITE_REGS_E2 1
555
556static const u32 page_vals_e2[PAGE_MODE_VALUES_E2] = { 0, 128 };
557
558static const u32 page_write_regs_e2[PAGE_WRITE_REGS_E2] = { 328476 };
559
560static const struct reg_addr page_read_regs_e2[PAGE_READ_REGS_E2] = {
561 { 0x58000, 4608, RI_E2_ONLINE } };
562
534#endif /* BNX2X_DUMP_H */ 563#endif /* BNX2X_DUMP_H */
diff --git a/drivers/net/bnx2x/bnx2x_ethtool.c b/drivers/net/bnx2x/bnx2x_ethtool.c
index d9748e97fad3..daefef618ef7 100644
--- a/drivers/net/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/bnx2x/bnx2x_ethtool.c
@@ -25,7 +25,6 @@
25#include "bnx2x_cmn.h" 25#include "bnx2x_cmn.h"
26#include "bnx2x_dump.h" 26#include "bnx2x_dump.h"
27 27
28
29static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) 28static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
30{ 29{
31 struct bnx2x *bp = netdev_priv(dev); 30 struct bnx2x *bp = netdev_priv(dev);
@@ -41,19 +40,19 @@ static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
41 (bp->link_vars.link_up)) { 40 (bp->link_vars.link_up)) {
42 cmd->speed = bp->link_vars.line_speed; 41 cmd->speed = bp->link_vars.line_speed;
43 cmd->duplex = bp->link_vars.duplex; 42 cmd->duplex = bp->link_vars.duplex;
44 if (IS_E1HMF(bp)) {
45 u16 vn_max_rate;
46
47 vn_max_rate =
48 ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
49 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
50 if (vn_max_rate < cmd->speed)
51 cmd->speed = vn_max_rate;
52 }
53 } else { 43 } else {
44
54 cmd->speed = bp->link_params.req_line_speed[cfg_idx]; 45 cmd->speed = bp->link_params.req_line_speed[cfg_idx];
55 cmd->duplex = bp->link_params.req_duplex[cfg_idx]; 46 cmd->duplex = bp->link_params.req_duplex[cfg_idx];
56 } 47 }
48 if (IS_MF(bp)) {
49 u16 vn_max_rate = ((bp->mf_config[BP_VN(bp)] &
50 FUNC_MF_CFG_MAX_BW_MASK) >> FUNC_MF_CFG_MAX_BW_SHIFT) *
51 100;
52
53 if (vn_max_rate < cmd->speed)
54 cmd->speed = vn_max_rate;
55 }
57 56
58 if (bp->port.supported[cfg_idx] & SUPPORTED_TP) 57 if (bp->port.supported[cfg_idx] & SUPPORTED_TP)
59 cmd->port = PORT_TP; 58 cmd->port = PORT_TP;
@@ -89,7 +88,7 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
89 struct bnx2x *bp = netdev_priv(dev); 88 struct bnx2x *bp = netdev_priv(dev);
90 u32 advertising, cfg_idx, old_multi_phy_config, new_multi_phy_config; 89 u32 advertising, cfg_idx, old_multi_phy_config, new_multi_phy_config;
91 90
92 if (IS_E1HMF(bp)) 91 if (IS_MF(bp))
93 return 0; 92 return 0;
94 93
95 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n" 94 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
@@ -298,6 +297,7 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
298 297
299#define IS_E1_ONLINE(info) (((info) & RI_E1_ONLINE) == RI_E1_ONLINE) 298#define IS_E1_ONLINE(info) (((info) & RI_E1_ONLINE) == RI_E1_ONLINE)
300#define IS_E1H_ONLINE(info) (((info) & RI_E1H_ONLINE) == RI_E1H_ONLINE) 299#define IS_E1H_ONLINE(info) (((info) & RI_E1H_ONLINE) == RI_E1H_ONLINE)
300#define IS_E2_ONLINE(info) (((info) & RI_E2_ONLINE) == RI_E2_ONLINE)
301 301
302static int bnx2x_get_regs_len(struct net_device *dev) 302static int bnx2x_get_regs_len(struct net_device *dev)
303{ 303{
@@ -315,7 +315,7 @@ static int bnx2x_get_regs_len(struct net_device *dev)
315 regdump_len += wreg_addrs_e1[i].size * 315 regdump_len += wreg_addrs_e1[i].size *
316 (1 + wreg_addrs_e1[i].read_regs_count); 316 (1 + wreg_addrs_e1[i].read_regs_count);
317 317
318 } else { /* E1H */ 318 } else if (CHIP_IS_E1H(bp)) {
319 for (i = 0; i < REGS_COUNT; i++) 319 for (i = 0; i < REGS_COUNT; i++)
320 if (IS_E1H_ONLINE(reg_addrs[i].info)) 320 if (IS_E1H_ONLINE(reg_addrs[i].info))
321 regdump_len += reg_addrs[i].size; 321 regdump_len += reg_addrs[i].size;
@@ -324,6 +324,15 @@ static int bnx2x_get_regs_len(struct net_device *dev)
324 if (IS_E1H_ONLINE(wreg_addrs_e1h[i].info)) 324 if (IS_E1H_ONLINE(wreg_addrs_e1h[i].info))
325 regdump_len += wreg_addrs_e1h[i].size * 325 regdump_len += wreg_addrs_e1h[i].size *
326 (1 + wreg_addrs_e1h[i].read_regs_count); 326 (1 + wreg_addrs_e1h[i].read_regs_count);
327 } else if (CHIP_IS_E2(bp)) {
328 for (i = 0; i < REGS_COUNT; i++)
329 if (IS_E2_ONLINE(reg_addrs[i].info))
330 regdump_len += reg_addrs[i].size;
331
332 for (i = 0; i < WREGS_COUNT_E2; i++)
333 if (IS_E2_ONLINE(wreg_addrs_e2[i].info))
334 regdump_len += wreg_addrs_e2[i].size *
335 (1 + wreg_addrs_e2[i].read_regs_count);
327 } 336 }
328 regdump_len *= 4; 337 regdump_len *= 4;
329 regdump_len += sizeof(struct dump_hdr); 338 regdump_len += sizeof(struct dump_hdr);
@@ -331,6 +340,23 @@ static int bnx2x_get_regs_len(struct net_device *dev)
331 return regdump_len; 340 return regdump_len;
332} 341}
333 342
343static inline void bnx2x_read_pages_regs_e2(struct bnx2x *bp, u32 *p)
344{
345 u32 i, j, k, n;
346
347 for (i = 0; i < PAGE_MODE_VALUES_E2; i++) {
348 for (j = 0; j < PAGE_WRITE_REGS_E2; j++) {
349 REG_WR(bp, page_write_regs_e2[j], page_vals_e2[i]);
350 for (k = 0; k < PAGE_READ_REGS_E2; k++)
351 if (IS_E2_ONLINE(page_read_regs_e2[k].info))
352 for (n = 0; n <
353 page_read_regs_e2[k].size; n++)
354 *p++ = REG_RD(bp,
355 page_read_regs_e2[k].addr + n*4);
356 }
357 }
358}
359
334static void bnx2x_get_regs(struct net_device *dev, 360static void bnx2x_get_regs(struct net_device *dev,
335 struct ethtool_regs *regs, void *_p) 361 struct ethtool_regs *regs, void *_p)
336{ 362{
@@ -350,7 +376,14 @@ static void bnx2x_get_regs(struct net_device *dev,
350 dump_hdr.tstorm_waitp = REG_RD(bp, TSTORM_WAITP_ADDR); 376 dump_hdr.tstorm_waitp = REG_RD(bp, TSTORM_WAITP_ADDR);
351 dump_hdr.ustorm_waitp = REG_RD(bp, USTORM_WAITP_ADDR); 377 dump_hdr.ustorm_waitp = REG_RD(bp, USTORM_WAITP_ADDR);
352 dump_hdr.cstorm_waitp = REG_RD(bp, CSTORM_WAITP_ADDR); 378 dump_hdr.cstorm_waitp = REG_RD(bp, CSTORM_WAITP_ADDR);
353 dump_hdr.info = CHIP_IS_E1(bp) ? RI_E1_ONLINE : RI_E1H_ONLINE; 379
380 if (CHIP_IS_E1(bp))
381 dump_hdr.info = RI_E1_ONLINE;
382 else if (CHIP_IS_E1H(bp))
383 dump_hdr.info = RI_E1H_ONLINE;
384 else if (CHIP_IS_E2(bp))
385 dump_hdr.info = RI_E2_ONLINE |
386 (BP_PATH(bp) ? RI_PATH1_DUMP : RI_PATH0_DUMP);
354 387
355 memcpy(p, &dump_hdr, sizeof(struct dump_hdr)); 388 memcpy(p, &dump_hdr, sizeof(struct dump_hdr));
356 p += dump_hdr.hdr_size + 1; 389 p += dump_hdr.hdr_size + 1;
@@ -362,16 +395,25 @@ static void bnx2x_get_regs(struct net_device *dev,
362 *p++ = REG_RD(bp, 395 *p++ = REG_RD(bp,
363 reg_addrs[i].addr + j*4); 396 reg_addrs[i].addr + j*4);
364 397
365 } else { /* E1H */ 398 } else if (CHIP_IS_E1H(bp)) {
366 for (i = 0; i < REGS_COUNT; i++) 399 for (i = 0; i < REGS_COUNT; i++)
367 if (IS_E1H_ONLINE(reg_addrs[i].info)) 400 if (IS_E1H_ONLINE(reg_addrs[i].info))
368 for (j = 0; j < reg_addrs[i].size; j++) 401 for (j = 0; j < reg_addrs[i].size; j++)
369 *p++ = REG_RD(bp, 402 *p++ = REG_RD(bp,
370 reg_addrs[i].addr + j*4); 403 reg_addrs[i].addr + j*4);
404
405 } else if (CHIP_IS_E2(bp)) {
406 for (i = 0; i < REGS_COUNT; i++)
407 if (IS_E2_ONLINE(reg_addrs[i].info))
408 for (j = 0; j < reg_addrs[i].size; j++)
409 *p++ = REG_RD(bp,
410 reg_addrs[i].addr + j*4);
411
412 bnx2x_read_pages_regs_e2(bp, p);
371 } 413 }
372} 414}
373 415
374#define PHY_FW_VER_LEN 10 416#define PHY_FW_VER_LEN 20
375 417
376static void bnx2x_get_drvinfo(struct net_device *dev, 418static void bnx2x_get_drvinfo(struct net_device *dev,
377 struct ethtool_drvinfo *info) 419 struct ethtool_drvinfo *info)
@@ -474,7 +516,7 @@ static u32 bnx2x_get_link(struct net_device *dev)
474{ 516{
475 struct bnx2x *bp = netdev_priv(dev); 517 struct bnx2x *bp = netdev_priv(dev);
476 518
477 if (bp->flags & MF_FUNC_DIS) 519 if (bp->flags & MF_FUNC_DIS || (bp->state != BNX2X_STATE_OPEN))
478 return 0; 520 return 0;
479 521
480 return bp->link_vars.link_up; 522 return bp->link_vars.link_up;
@@ -920,6 +962,7 @@ static int bnx2x_set_eeprom(struct net_device *dev,
920 962
921 return rc; 963 return rc;
922} 964}
965
923static int bnx2x_get_coalesce(struct net_device *dev, 966static int bnx2x_get_coalesce(struct net_device *dev,
924 struct ethtool_coalesce *coal) 967 struct ethtool_coalesce *coal)
925{ 968{
@@ -1027,7 +1070,7 @@ static int bnx2x_set_pauseparam(struct net_device *dev,
1027{ 1070{
1028 struct bnx2x *bp = netdev_priv(dev); 1071 struct bnx2x *bp = netdev_priv(dev);
1029 u32 cfg_idx = bnx2x_get_link_cfg_idx(bp); 1072 u32 cfg_idx = bnx2x_get_link_cfg_idx(bp);
1030 if (IS_E1HMF(bp)) 1073 if (IS_MF(bp))
1031 return 0; 1074 return 0;
1032 1075
1033 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n" 1076 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
@@ -1074,35 +1117,34 @@ static int bnx2x_set_flags(struct net_device *dev, u32 data)
1074 int changed = 0; 1117 int changed = 0;
1075 int rc = 0; 1118 int rc = 0;
1076 1119
1077 if (data & ~(ETH_FLAG_LRO | ETH_FLAG_RXHASH))
1078 return -EINVAL;
1079
1080 if (bp->recovery_state != BNX2X_RECOVERY_DONE) { 1120 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
1081 printk(KERN_ERR "Handling parity error recovery. Try again later\n"); 1121 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
1082 return -EAGAIN; 1122 return -EAGAIN;
1083 } 1123 }
1084 1124
1125 if (!(data & ETH_FLAG_RXVLAN))
1126 return -EOPNOTSUPP;
1127
1128 if ((data & ETH_FLAG_LRO) && bp->rx_csum && bp->disable_tpa)
1129 return -EINVAL;
1130
1131 rc = ethtool_op_set_flags(dev, data, ETH_FLAG_LRO | ETH_FLAG_RXVLAN |
1132 ETH_FLAG_TXVLAN | ETH_FLAG_RXHASH);
1133 if (rc)
1134 return rc;
1135
1085 /* TPA requires Rx CSUM offloading */ 1136 /* TPA requires Rx CSUM offloading */
1086 if ((data & ETH_FLAG_LRO) && bp->rx_csum) { 1137 if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
1087 if (!bp->disable_tpa) { 1138 if (!(bp->flags & TPA_ENABLE_FLAG)) {
1088 if (!(dev->features & NETIF_F_LRO)) { 1139 bp->flags |= TPA_ENABLE_FLAG;
1089 dev->features |= NETIF_F_LRO; 1140 changed = 1;
1090 bp->flags |= TPA_ENABLE_FLAG; 1141 }
1091 changed = 1; 1142 } else if (bp->flags & TPA_ENABLE_FLAG) {
1092 }
1093 } else
1094 rc = -EINVAL;
1095 } else if (dev->features & NETIF_F_LRO) {
1096 dev->features &= ~NETIF_F_LRO; 1143 dev->features &= ~NETIF_F_LRO;
1097 bp->flags &= ~TPA_ENABLE_FLAG; 1144 bp->flags &= ~TPA_ENABLE_FLAG;
1098 changed = 1; 1145 changed = 1;
1099 } 1146 }
1100 1147
1101 if (data & ETH_FLAG_RXHASH)
1102 dev->features |= NETIF_F_RXHASH;
1103 else
1104 dev->features &= ~NETIF_F_RXHASH;
1105
1106 if (changed && netif_running(dev)) { 1148 if (changed && netif_running(dev)) {
1107 bnx2x_nic_unload(bp, UNLOAD_NORMAL); 1149 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
1108 rc = bnx2x_nic_load(bp, LOAD_NORMAL); 1150 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
@@ -1235,6 +1277,9 @@ static int bnx2x_test_registers(struct bnx2x *bp)
1235 1277
1236 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) { 1278 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
1237 u32 offset, mask, save_val, val; 1279 u32 offset, mask, save_val, val;
1280 if (CHIP_IS_E2(bp) &&
1281 reg_tbl[i].offset0 == HC_REG_AGG_INT_0)
1282 continue;
1238 1283
1239 offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1; 1284 offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
1240 mask = reg_tbl[i].mask; 1285 mask = reg_tbl[i].mask;
@@ -1242,6 +1287,7 @@ static int bnx2x_test_registers(struct bnx2x *bp)
1242 save_val = REG_RD(bp, offset); 1287 save_val = REG_RD(bp, offset);
1243 1288
1244 REG_WR(bp, offset, (wr_val & mask)); 1289 REG_WR(bp, offset, (wr_val & mask));
1290
1245 val = REG_RD(bp, offset); 1291 val = REG_RD(bp, offset);
1246 1292
1247 /* Restore the original register's value */ 1293 /* Restore the original register's value */
@@ -1286,20 +1332,33 @@ static int bnx2x_test_memory(struct bnx2x *bp)
1286 u32 offset; 1332 u32 offset;
1287 u32 e1_mask; 1333 u32 e1_mask;
1288 u32 e1h_mask; 1334 u32 e1h_mask;
1335 u32 e2_mask;
1289 } prty_tbl[] = { 1336 } prty_tbl[] = {
1290 { "CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS, 0x3ffc0, 0 }, 1337 { "CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS, 0x3ffc0, 0, 0 },
1291 { "CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS, 0x2, 0x2 }, 1338 { "CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS, 0x2, 0x2, 0 },
1292 { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0, 0 }, 1339 { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0, 0, 0 },
1293 { "TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS, 0x3ffc0, 0 }, 1340 { "TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS, 0x3ffc0, 0, 0 },
1294 { "UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS, 0x3ffc0, 0 }, 1341 { "UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS, 0x3ffc0, 0, 0 },
1295 { "XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS, 0x3ffc1, 0 }, 1342 { "XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS, 0x3ffc1, 0, 0 },
1296 1343
1297 { NULL, 0xffffffff, 0, 0 } 1344 { NULL, 0xffffffff, 0, 0, 0 }
1298 }; 1345 };
1299 1346
1300 if (!netif_running(bp->dev)) 1347 if (!netif_running(bp->dev))
1301 return rc; 1348 return rc;
1302 1349
1350 /* pre-Check the parity status */
1351 for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
1352 val = REG_RD(bp, prty_tbl[i].offset);
1353 if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
1354 (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask))) ||
1355 (CHIP_IS_E2(bp) && (val & ~(prty_tbl[i].e2_mask)))) {
1356 DP(NETIF_MSG_HW,
1357 "%s is 0x%x\n", prty_tbl[i].name, val);
1358 goto test_mem_exit;
1359 }
1360 }
1361
1303 /* Go through all the memories */ 1362 /* Go through all the memories */
1304 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) 1363 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
1305 for (j = 0; j < mem_tbl[i].size; j++) 1364 for (j = 0; j < mem_tbl[i].size; j++)
@@ -1309,7 +1368,8 @@ static int bnx2x_test_memory(struct bnx2x *bp)
1309 for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) { 1368 for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
1310 val = REG_RD(bp, prty_tbl[i].offset); 1369 val = REG_RD(bp, prty_tbl[i].offset);
1311 if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) || 1370 if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
1312 (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) { 1371 (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask))) ||
1372 (CHIP_IS_E2(bp) && (val & ~(prty_tbl[i].e2_mask)))) {
1313 DP(NETIF_MSG_HW, 1373 DP(NETIF_MSG_HW,
1314 "%s is 0x%x\n", prty_tbl[i].name, val); 1374 "%s is 0x%x\n", prty_tbl[i].name, val);
1315 goto test_mem_exit; 1375 goto test_mem_exit;
@@ -1324,7 +1384,7 @@ test_mem_exit:
1324 1384
1325static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up, u8 is_serdes) 1385static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up, u8 is_serdes)
1326{ 1386{
1327 int cnt = 1000; 1387 int cnt = 1400;
1328 1388
1329 if (link_up) 1389 if (link_up)
1330 while (bnx2x_link_test(bp, is_serdes) && cnt--) 1390 while (bnx2x_link_test(bp, is_serdes) && cnt--)
@@ -1343,7 +1403,8 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
1343 u16 pkt_prod, bd_prod; 1403 u16 pkt_prod, bd_prod;
1344 struct sw_tx_bd *tx_buf; 1404 struct sw_tx_bd *tx_buf;
1345 struct eth_tx_start_bd *tx_start_bd; 1405 struct eth_tx_start_bd *tx_start_bd;
1346 struct eth_tx_parse_bd *pbd = NULL; 1406 struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
1407 struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
1347 dma_addr_t mapping; 1408 dma_addr_t mapping;
1348 union eth_rx_cqe *cqe; 1409 union eth_rx_cqe *cqe;
1349 u8 cqe_fp_flags; 1410 u8 cqe_fp_flags;
@@ -1399,16 +1460,23 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
1399 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping)); 1460 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1400 tx_start_bd->nbd = cpu_to_le16(2); /* start + pbd */ 1461 tx_start_bd->nbd = cpu_to_le16(2); /* start + pbd */
1401 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb)); 1462 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
1402 tx_start_bd->vlan = cpu_to_le16(pkt_prod); 1463 tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod);
1403 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD; 1464 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
1404 tx_start_bd->general_data = ((UNICAST_ADDRESS << 1465 SET_FLAG(tx_start_bd->general_data,
1405 ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT) | 1); 1466 ETH_TX_START_BD_ETH_ADDR_TYPE,
1467 UNICAST_ADDRESS);
1468 SET_FLAG(tx_start_bd->general_data,
1469 ETH_TX_START_BD_HDR_NBDS,
1470 1);
1406 1471
1407 /* turn on parsing and get a BD */ 1472 /* turn on parsing and get a BD */
1408 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod)); 1473 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
1409 pbd = &fp_tx->tx_desc_ring[bd_prod].parse_bd;
1410 1474
1411 memset(pbd, 0, sizeof(struct eth_tx_parse_bd)); 1475 pbd_e1x = &fp_tx->tx_desc_ring[bd_prod].parse_bd_e1x;
1476 pbd_e2 = &fp_tx->tx_desc_ring[bd_prod].parse_bd_e2;
1477
1478 memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
1479 memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
1412 1480
1413 wmb(); 1481 wmb();
1414 1482
@@ -1427,6 +1495,13 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
1427 if (tx_idx != tx_start_idx + num_pkts) 1495 if (tx_idx != tx_start_idx + num_pkts)
1428 goto test_loopback_exit; 1496 goto test_loopback_exit;
1429 1497
1498 /* Unlike HC IGU won't generate an interrupt for status block
1499 * updates that have been performed while interrupts were
1500 * disabled.
1501 */
1502 if (bp->common.int_block == INT_BLOCK_IGU)
1503 bnx2x_tx_int(fp_tx);
1504
1430 rx_idx = le16_to_cpu(*fp_rx->rx_cons_sb); 1505 rx_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
1431 if (rx_idx != rx_start_idx + num_pkts) 1506 if (rx_idx != rx_start_idx + num_pkts)
1432 goto test_loopback_exit; 1507 goto test_loopback_exit;
@@ -1569,8 +1644,7 @@ static int bnx2x_test_intr(struct bnx2x *bp)
1569 1644
1570 config->hdr.length = 0; 1645 config->hdr.length = 0;
1571 if (CHIP_IS_E1(bp)) 1646 if (CHIP_IS_E1(bp))
1572 /* use last unicast entries */ 1647 config->hdr.offset = (BP_PORT(bp) ? 32 : 0);
1573 config->hdr.offset = (BP_PORT(bp) ? 63 : 31);
1574 else 1648 else
1575 config->hdr.offset = BP_FUNC(bp); 1649 config->hdr.offset = BP_FUNC(bp);
1576 config->hdr.client_id = bp->fp->cl_id; 1650 config->hdr.client_id = bp->fp->cl_id;
@@ -1578,9 +1652,9 @@ static int bnx2x_test_intr(struct bnx2x *bp)
1578 1652
1579 bp->set_mac_pending++; 1653 bp->set_mac_pending++;
1580 smp_wmb(); 1654 smp_wmb();
1581 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0, 1655 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
1582 U64_HI(bnx2x_sp_mapping(bp, mac_config)), 1656 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
1583 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0); 1657 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 1);
1584 if (rc == 0) { 1658 if (rc == 0) {
1585 for (i = 0; i < 10; i++) { 1659 for (i = 0; i < 10; i++) {
1586 if (!bp->set_mac_pending) 1660 if (!bp->set_mac_pending)
@@ -1612,7 +1686,7 @@ static void bnx2x_self_test(struct net_device *dev,
1612 return; 1686 return;
1613 1687
1614 /* offline tests are not supported in MF mode */ 1688 /* offline tests are not supported in MF mode */
1615 if (IS_E1HMF(bp)) 1689 if (IS_MF(bp))
1616 etest->flags &= ~ETH_TEST_FL_OFFLINE; 1690 etest->flags &= ~ETH_TEST_FL_OFFLINE;
1617 is_serdes = (bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) > 0; 1691 is_serdes = (bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) > 0;
1618 1692
@@ -1641,6 +1715,7 @@ static void bnx2x_self_test(struct net_device *dev,
1641 buf[1] = 1; 1715 buf[1] = 1;
1642 etest->flags |= ETH_TEST_FL_FAILED; 1716 etest->flags |= ETH_TEST_FL_FAILED;
1643 } 1717 }
1718
1644 buf[2] = bnx2x_test_loopback(bp, link_up); 1719 buf[2] = bnx2x_test_loopback(bp, link_up);
1645 if (buf[2] != 0) 1720 if (buf[2] != 0)
1646 etest->flags |= ETH_TEST_FL_FAILED; 1721 etest->flags |= ETH_TEST_FL_FAILED;
@@ -1804,8 +1879,8 @@ static const struct {
1804#define IS_PORT_STAT(i) \ 1879#define IS_PORT_STAT(i) \
1805 ((bnx2x_stats_arr[i].flags & STATS_FLAGS_BOTH) == STATS_FLAGS_PORT) 1880 ((bnx2x_stats_arr[i].flags & STATS_FLAGS_BOTH) == STATS_FLAGS_PORT)
1806#define IS_FUNC_STAT(i) (bnx2x_stats_arr[i].flags & STATS_FLAGS_FUNC) 1881#define IS_FUNC_STAT(i) (bnx2x_stats_arr[i].flags & STATS_FLAGS_FUNC)
1807#define IS_E1HMF_MODE_STAT(bp) \ 1882#define IS_MF_MODE_STAT(bp) \
1808 (IS_E1HMF(bp) && !(bp->msg_enable & BNX2X_MSG_STATS)) 1883 (IS_MF(bp) && !(bp->msg_enable & BNX2X_MSG_STATS))
1809 1884
1810static int bnx2x_get_sset_count(struct net_device *dev, int stringset) 1885static int bnx2x_get_sset_count(struct net_device *dev, int stringset)
1811{ 1886{
@@ -1816,10 +1891,10 @@ static int bnx2x_get_sset_count(struct net_device *dev, int stringset)
1816 case ETH_SS_STATS: 1891 case ETH_SS_STATS:
1817 if (is_multi(bp)) { 1892 if (is_multi(bp)) {
1818 num_stats = BNX2X_NUM_Q_STATS * bp->num_queues; 1893 num_stats = BNX2X_NUM_Q_STATS * bp->num_queues;
1819 if (!IS_E1HMF_MODE_STAT(bp)) 1894 if (!IS_MF_MODE_STAT(bp))
1820 num_stats += BNX2X_NUM_STATS; 1895 num_stats += BNX2X_NUM_STATS;
1821 } else { 1896 } else {
1822 if (IS_E1HMF_MODE_STAT(bp)) { 1897 if (IS_MF_MODE_STAT(bp)) {
1823 num_stats = 0; 1898 num_stats = 0;
1824 for (i = 0; i < BNX2X_NUM_STATS; i++) 1899 for (i = 0; i < BNX2X_NUM_STATS; i++)
1825 if (IS_FUNC_STAT(i)) 1900 if (IS_FUNC_STAT(i))
@@ -1852,14 +1927,14 @@ static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
1852 bnx2x_q_stats_arr[j].string, i); 1927 bnx2x_q_stats_arr[j].string, i);
1853 k += BNX2X_NUM_Q_STATS; 1928 k += BNX2X_NUM_Q_STATS;
1854 } 1929 }
1855 if (IS_E1HMF_MODE_STAT(bp)) 1930 if (IS_MF_MODE_STAT(bp))
1856 break; 1931 break;
1857 for (j = 0; j < BNX2X_NUM_STATS; j++) 1932 for (j = 0; j < BNX2X_NUM_STATS; j++)
1858 strcpy(buf + (k + j)*ETH_GSTRING_LEN, 1933 strcpy(buf + (k + j)*ETH_GSTRING_LEN,
1859 bnx2x_stats_arr[j].string); 1934 bnx2x_stats_arr[j].string);
1860 } else { 1935 } else {
1861 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) { 1936 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
1862 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i)) 1937 if (IS_MF_MODE_STAT(bp) && IS_PORT_STAT(i))
1863 continue; 1938 continue;
1864 strcpy(buf + j*ETH_GSTRING_LEN, 1939 strcpy(buf + j*ETH_GSTRING_LEN,
1865 bnx2x_stats_arr[i].string); 1940 bnx2x_stats_arr[i].string);
@@ -1903,7 +1978,7 @@ static void bnx2x_get_ethtool_stats(struct net_device *dev,
1903 } 1978 }
1904 k += BNX2X_NUM_Q_STATS; 1979 k += BNX2X_NUM_Q_STATS;
1905 } 1980 }
1906 if (IS_E1HMF_MODE_STAT(bp)) 1981 if (IS_MF_MODE_STAT(bp))
1907 return; 1982 return;
1908 hw_stats = (u32 *)&bp->eth_stats; 1983 hw_stats = (u32 *)&bp->eth_stats;
1909 for (j = 0; j < BNX2X_NUM_STATS; j++) { 1984 for (j = 0; j < BNX2X_NUM_STATS; j++) {
@@ -1924,7 +1999,7 @@ static void bnx2x_get_ethtool_stats(struct net_device *dev,
1924 } else { 1999 } else {
1925 hw_stats = (u32 *)&bp->eth_stats; 2000 hw_stats = (u32 *)&bp->eth_stats;
1926 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) { 2001 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
1927 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i)) 2002 if (IS_MF_MODE_STAT(bp) && IS_PORT_STAT(i))
1928 continue; 2003 continue;
1929 if (bnx2x_stats_arr[i].size == 0) { 2004 if (bnx2x_stats_arr[i].size == 0) {
1930 /* skip this counter */ 2005 /* skip this counter */
diff --git a/drivers/net/bnx2x/bnx2x_fw_defs.h b/drivers/net/bnx2x/bnx2x_fw_defs.h
index 08d71bf438d6..f4e5b1ce8149 100644
--- a/drivers/net/bnx2x/bnx2x_fw_defs.h
+++ b/drivers/net/bnx2x/bnx2x_fw_defs.h
@@ -7,369 +7,272 @@
7 * the Free Software Foundation. 7 * the Free Software Foundation.
8 */ 8 */
9 9
10 10#ifndef BNX2X_FW_DEFS_H
11#define CSTORM_ASSERT_LIST_INDEX_OFFSET \ 11#define BNX2X_FW_DEFS_H
12 (IS_E1H_OFFSET ? 0x7000 : 0x1000) 12
13#define CSTORM_ASSERT_LIST_OFFSET(idx) \ 13#define CSTORM_ASSERT_LIST_INDEX_OFFSET (IRO[142].base)
14 (IS_E1H_OFFSET ? (0x7020 + (idx * 0x10)) : (0x1020 + (idx * 0x10))) 14#define CSTORM_ASSERT_LIST_OFFSET(assertListEntry) \
15#define CSTORM_DEF_SB_HC_DISABLE_C_OFFSET(function, index) \ 15 (IRO[141].base + ((assertListEntry) * IRO[141].m1))
16 (IS_E1H_OFFSET ? (0x8622 + ((function>>1) * 0x40) + \ 16#define CSTORM_ETH_STATS_QUERY_ADDR_OFFSET(pfId) \
17 ((function&1) * 0x100) + (index * 0x4)) : (0x3562 + (function * \ 17 (IRO[144].base + ((pfId) * IRO[144].m1))
18 0x40) + (index * 0x4))) 18#define CSTORM_EVENT_RING_DATA_OFFSET(pfId) \
19#define CSTORM_DEF_SB_HC_DISABLE_U_OFFSET(function, index) \ 19 (IRO[149].base + (((pfId)>>1) * IRO[149].m1) + (((pfId)&1) * \
20 (IS_E1H_OFFSET ? (0x8822 + ((function>>1) * 0x80) + \ 20 IRO[149].m2))
21 ((function&1) * 0x200) + (index * 0x4)) : (0x35e2 + (function * \ 21#define CSTORM_EVENT_RING_PROD_OFFSET(pfId) \
22 0x80) + (index * 0x4))) 22 (IRO[150].base + (((pfId)>>1) * IRO[150].m1) + (((pfId)&1) * \
23#define CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(function) \ 23 IRO[150].m2))
24 (IS_E1H_OFFSET ? (0x8600 + ((function>>1) * 0x40) + \ 24#define CSTORM_FINAL_CLEANUP_COMPLETE_OFFSET(funcId) \
25 ((function&1) * 0x100)) : (0x3540 + (function * 0x40))) 25 (IRO[156].base + ((funcId) * IRO[156].m1))
26#define CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(function) \ 26#define CSTORM_FUNC_EN_OFFSET(funcId) \
27 (IS_E1H_OFFSET ? (0x8800 + ((function>>1) * 0x80) + \ 27 (IRO[146].base + ((funcId) * IRO[146].m1))
28 ((function&1) * 0x200)) : (0x35c0 + (function * 0x80))) 28#define CSTORM_FUNCTION_MODE_OFFSET (IRO[153].base)
29#define CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(function) \ 29#define CSTORM_IGU_MODE_OFFSET (IRO[154].base)
30 (IS_E1H_OFFSET ? (0x8608 + ((function>>1) * 0x40) + \ 30#define CSTORM_ISCSI_CQ_SIZE_OFFSET(pfId) \
31 ((function&1) * 0x100)) : (0x3548 + (function * 0x40))) 31 (IRO[311].base + ((pfId) * IRO[311].m1))
32#define CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(function) \ 32#define CSTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfId) \
33 (IS_E1H_OFFSET ? (0x8808 + ((function>>1) * 0x80) + \ 33 (IRO[312].base + ((pfId) * IRO[312].m1))
34 ((function&1) * 0x200)) : (0x35c8 + (function * 0x80))) 34 #define CSTORM_ISCSI_EQ_CONS_OFFSET(pfId, iscsiEqId) \
35#define CSTORM_FUNCTION_MODE_OFFSET \ 35 (IRO[304].base + ((pfId) * IRO[304].m1) + ((iscsiEqId) * \
36 (IS_E1H_OFFSET ? 0x11e8 : 0xffffffff) 36 IRO[304].m2))
37#define CSTORM_HC_BTR_C_OFFSET(port) \ 37 #define CSTORM_ISCSI_EQ_NEXT_EQE_ADDR_OFFSET(pfId, iscsiEqId) \
38 (IS_E1H_OFFSET ? (0x8c04 + (port * 0xf0)) : (0x36c4 + (port * 0xc0))) 38 (IRO[306].base + ((pfId) * IRO[306].m1) + ((iscsiEqId) * \
39#define CSTORM_HC_BTR_U_OFFSET(port) \ 39 IRO[306].m2))
40 (IS_E1H_OFFSET ? (0x8de4 + (port * 0xf0)) : (0x3844 + (port * 0xc0))) 40 #define CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_OFFSET(pfId, iscsiEqId) \
41#define CSTORM_ISCSI_CQ_SIZE_OFFSET(function) \ 41 (IRO[305].base + ((pfId) * IRO[305].m1) + ((iscsiEqId) * \
42 (IS_E1H_OFFSET ? (0x6680 + (function * 0x8)) : (0x25a0 + \ 42 IRO[305].m2))
43 (function * 0x8))) 43 #define \
44#define CSTORM_ISCSI_CQ_SQN_SIZE_OFFSET(function) \ 44 CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_VALID_OFFSET(pfId, iscsiEqId) \
45 (IS_E1H_OFFSET ? (0x66c0 + (function * 0x8)) : (0x25b0 + \ 45 (IRO[307].base + ((pfId) * IRO[307].m1) + ((iscsiEqId) * \
46 (function * 0x8))) 46 IRO[307].m2))
47#define CSTORM_ISCSI_EQ_CONS_OFFSET(function, eqIdx) \ 47 #define CSTORM_ISCSI_EQ_PROD_OFFSET(pfId, iscsiEqId) \
48 (IS_E1H_OFFSET ? (0x6040 + (function * 0xc0) + (eqIdx * 0x18)) : \ 48 (IRO[303].base + ((pfId) * IRO[303].m1) + ((iscsiEqId) * \
49 (0x2410 + (function * 0xc0) + (eqIdx * 0x18))) 49 IRO[303].m2))
50#define CSTORM_ISCSI_EQ_NEXT_EQE_ADDR_OFFSET(function, eqIdx) \ 50 #define CSTORM_ISCSI_EQ_SB_INDEX_OFFSET(pfId, iscsiEqId) \
51 (IS_E1H_OFFSET ? (0x6044 + (function * 0xc0) + (eqIdx * 0x18)) : \ 51 (IRO[309].base + ((pfId) * IRO[309].m1) + ((iscsiEqId) * \
52 (0x2414 + (function * 0xc0) + (eqIdx * 0x18))) 52 IRO[309].m2))
53#define CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_OFFSET(function, eqIdx) \ 53 #define CSTORM_ISCSI_EQ_SB_NUM_OFFSET(pfId, iscsiEqId) \
54 (IS_E1H_OFFSET ? (0x604c + (function * 0xc0) + (eqIdx * 0x18)) : \ 54 (IRO[308].base + ((pfId) * IRO[308].m1) + ((iscsiEqId) * \
55 (0x241c + (function * 0xc0) + (eqIdx * 0x18))) 55 IRO[308].m2))
56#define CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_VALID_OFFSET(function, eqIdx) \ 56#define CSTORM_ISCSI_HQ_SIZE_OFFSET(pfId) \
57 (IS_E1H_OFFSET ? (0x6057 + (function * 0xc0) + (eqIdx * 0x18)) : \ 57 (IRO[310].base + ((pfId) * IRO[310].m1))
58 (0x2427 + (function * 0xc0) + (eqIdx * 0x18))) 58#define CSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \
59#define CSTORM_ISCSI_EQ_PROD_OFFSET(function, eqIdx) \ 59 (IRO[302].base + ((pfId) * IRO[302].m1))
60 (IS_E1H_OFFSET ? (0x6042 + (function * 0xc0) + (eqIdx * 0x18)) : \ 60#define CSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \
61 (0x2412 + (function * 0xc0) + (eqIdx * 0x18))) 61 (IRO[301].base + ((pfId) * IRO[301].m1))
62#define CSTORM_ISCSI_EQ_SB_INDEX_OFFSET(function, eqIdx) \ 62#define CSTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \
63 (IS_E1H_OFFSET ? (0x6056 + (function * 0xc0) + (eqIdx * 0x18)) : \ 63 (IRO[300].base + ((pfId) * IRO[300].m1))
64 (0x2426 + (function * 0xc0) + (eqIdx * 0x18))) 64#define CSTORM_PATH_ID_OFFSET (IRO[159].base)
65#define CSTORM_ISCSI_EQ_SB_NUM_OFFSET(function, eqIdx) \ 65#define CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(pfId) \
66 (IS_E1H_OFFSET ? (0x6054 + (function * 0xc0) + (eqIdx * 0x18)) : \ 66 (IRO[137].base + ((pfId) * IRO[137].m1))
67 (0x2424 + (function * 0xc0) + (eqIdx * 0x18))) 67#define CSTORM_SP_STATUS_BLOCK_OFFSET(pfId) \
68#define CSTORM_ISCSI_HQ_SIZE_OFFSET(function) \ 68 (IRO[136].base + ((pfId) * IRO[136].m1))
69 (IS_E1H_OFFSET ? (0x6640 + (function * 0x8)) : (0x2590 + \ 69#define CSTORM_SP_STATUS_BLOCK_SIZE (IRO[136].size)
70 (function * 0x8))) 70#define CSTORM_SP_SYNC_BLOCK_OFFSET(pfId) \
71#define CSTORM_ISCSI_NUM_OF_TASKS_OFFSET(function) \ 71 (IRO[138].base + ((pfId) * IRO[138].m1))
72 (IS_E1H_OFFSET ? (0x6004 + (function * 0x8)) : (0x2404 + \ 72#define CSTORM_SP_SYNC_BLOCK_SIZE (IRO[138].size)
73 (function * 0x8))) 73#define CSTORM_STATS_FLAGS_OFFSET(pfId) \
74#define CSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(function) \ 74 (IRO[143].base + ((pfId) * IRO[143].m1))
75 (IS_E1H_OFFSET ? (0x6002 + (function * 0x8)) : (0x2402 + \ 75#define CSTORM_STATUS_BLOCK_DATA_OFFSET(sbId) \
76 (function * 0x8))) 76 (IRO[129].base + ((sbId) * IRO[129].m1))
77#define CSTORM_ISCSI_PAGE_SIZE_OFFSET(function) \ 77#define CSTORM_STATUS_BLOCK_OFFSET(sbId) \
78 (IS_E1H_OFFSET ? (0x6000 + (function * 0x8)) : (0x2400 + \ 78 (IRO[128].base + ((sbId) * IRO[128].m1))
79 (function * 0x8))) 79#define CSTORM_STATUS_BLOCK_SIZE (IRO[128].size)
80#define CSTORM_SB_HC_DISABLE_C_OFFSET(port, cpu_id, index) \ 80#define CSTORM_SYNC_BLOCK_OFFSET(sbId) \
81 (IS_E1H_OFFSET ? (0x811a + (port * 0x280) + (cpu_id * 0x28) + \ 81 (IRO[132].base + ((sbId) * IRO[132].m1))
82 (index * 0x4)) : (0x305a + (port * 0x280) + (cpu_id * 0x28) + \ 82#define CSTORM_SYNC_BLOCK_SIZE (IRO[132].size)
83 (index * 0x4))) 83#define CSTORM_VF_PF_CHANNEL_STATE_OFFSET(vfId) \
84#define CSTORM_SB_HC_DISABLE_U_OFFSET(port, cpu_id, index) \ 84 (IRO[151].base + ((vfId) * IRO[151].m1))
85 (IS_E1H_OFFSET ? (0xb01a + (port * 0x800) + (cpu_id * 0x80) + \ 85#define CSTORM_VF_PF_CHANNEL_VALID_OFFSET(vfId) \
86 (index * 0x4)) : (0x401a + (port * 0x800) + (cpu_id * 0x80) + \ 86 (IRO[152].base + ((vfId) * IRO[152].m1))
87 (index * 0x4))) 87#define CSTORM_VF_TO_PF_OFFSET(funcId) \
88#define CSTORM_SB_HC_TIMEOUT_C_OFFSET(port, cpu_id, index) \ 88 (IRO[147].base + ((funcId) * IRO[147].m1))
89 (IS_E1H_OFFSET ? (0x8118 + (port * 0x280) + (cpu_id * 0x28) + \ 89#define TSTORM_ACCEPT_CLASSIFY_FAILED_OFFSET (IRO[199].base)
90 (index * 0x4)) : (0x3058 + (port * 0x280) + (cpu_id * 0x28) + \ 90#define TSTORM_APPROXIMATE_MATCH_MULTICAST_FILTERING_OFFSET(pfId) \
91 (index * 0x4))) 91 (IRO[198].base + ((pfId) * IRO[198].m1))
92#define CSTORM_SB_HC_TIMEOUT_U_OFFSET(port, cpu_id, index) \ 92#define TSTORM_ASSERT_LIST_INDEX_OFFSET (IRO[99].base)
93 (IS_E1H_OFFSET ? (0xb018 + (port * 0x800) + (cpu_id * 0x80) + \ 93#define TSTORM_ASSERT_LIST_OFFSET(assertListEntry) \
94 (index * 0x4)) : (0x4018 + (port * 0x800) + (cpu_id * 0x80) + \ 94 (IRO[98].base + ((assertListEntry) * IRO[98].m1))
95 (index * 0x4))) 95 #define TSTORM_CLIENT_CONFIG_OFFSET(portId, clientId) \
96#define CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, cpu_id) \ 96 (IRO[197].base + ((portId) * IRO[197].m1) + ((clientId) * \
97 (IS_E1H_OFFSET ? (0x8100 + (port * 0x280) + (cpu_id * 0x28)) : \ 97 IRO[197].m2))
98 (0x3040 + (port * 0x280) + (cpu_id * 0x28))) 98#define TSTORM_COMMON_SAFC_WORKAROUND_ENABLE_OFFSET (IRO[104].base)
99#define CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, cpu_id) \
100 (IS_E1H_OFFSET ? (0xb000 + (port * 0x800) + (cpu_id * 0x80)) : \
101 (0x4000 + (port * 0x800) + (cpu_id * 0x80)))
102#define CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, cpu_id) \
103 (IS_E1H_OFFSET ? (0x8108 + (port * 0x280) + (cpu_id * 0x28)) : \
104 (0x3048 + (port * 0x280) + (cpu_id * 0x28)))
105#define CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, cpu_id) \
106 (IS_E1H_OFFSET ? (0xb008 + (port * 0x800) + (cpu_id * 0x80)) : \
107 (0x4008 + (port * 0x800) + (cpu_id * 0x80)))
108#define CSTORM_SB_STATUS_BLOCK_C_SIZE 0x10
109#define CSTORM_SB_STATUS_BLOCK_U_SIZE 0x60
110#define CSTORM_STATS_FLAGS_OFFSET(function) \
111 (IS_E1H_OFFSET ? (0x1108 + (function * 0x8)) : (0x5108 + \
112 (function * 0x8)))
113#define TSTORM_APPROXIMATE_MATCH_MULTICAST_FILTERING_OFFSET(function) \
114 (IS_E1H_OFFSET ? (0x3200 + (function * 0x20)) : 0xffffffff)
115#define TSTORM_ASSERT_LIST_INDEX_OFFSET \
116 (IS_E1H_OFFSET ? 0xa000 : 0x1000)
117#define TSTORM_ASSERT_LIST_OFFSET(idx) \
118 (IS_E1H_OFFSET ? (0xa020 + (idx * 0x10)) : (0x1020 + (idx * 0x10)))
119#define TSTORM_CLIENT_CONFIG_OFFSET(port, client_id) \
120 (IS_E1H_OFFSET ? (0x33a0 + (port * 0x1a0) + (client_id * 0x10)) \
121 : (0x9c0 + (port * 0x120) + (client_id * 0x10)))
122#define TSTORM_COMMON_SAFC_WORKAROUND_ENABLE_OFFSET \
123 (IS_E1H_OFFSET ? 0x1ed8 : 0xffffffff)
124#define TSTORM_COMMON_SAFC_WORKAROUND_TIMEOUT_10USEC_OFFSET \ 99#define TSTORM_COMMON_SAFC_WORKAROUND_TIMEOUT_10USEC_OFFSET \
125 (IS_E1H_OFFSET ? 0x1eda : 0xffffffff) 100 (IRO[105].base)
126#define TSTORM_DEF_SB_HC_DISABLE_OFFSET(function, index) \ 101#define TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(pfId) \
127 (IS_E1H_OFFSET ? (0xb01a + ((function>>1) * 0x28) + \ 102 (IRO[96].base + ((pfId) * IRO[96].m1))
128 ((function&1) * 0xa0) + (index * 0x4)) : (0x141a + (function * \ 103#define TSTORM_FUNC_EN_OFFSET(funcId) \
129 0x28) + (index * 0x4))) 104 (IRO[101].base + ((funcId) * IRO[101].m1))
130#define TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(function) \ 105#define TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(pfId) \
131 (IS_E1H_OFFSET ? (0xb000 + ((function>>1) * 0x28) + \ 106 (IRO[195].base + ((pfId) * IRO[195].m1))
132 ((function&1) * 0xa0)) : (0x1400 + (function * 0x28))) 107#define TSTORM_FUNCTION_MODE_OFFSET (IRO[103].base)
133#define TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(function) \ 108#define TSTORM_INDIRECTION_TABLE_OFFSET(pfId) \
134 (IS_E1H_OFFSET ? (0xb008 + ((function>>1) * 0x28) + \ 109 (IRO[91].base + ((pfId) * IRO[91].m1))
135 ((function&1) * 0xa0)) : (0x1408 + (function * 0x28))) 110#define TSTORM_INDIRECTION_TABLE_SIZE (IRO[91].size)
136#define TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(function) \ 111 #define \
137 (IS_E1H_OFFSET ? (0x2940 + (function * 0x8)) : (0x4928 + \ 112 TSTORM_ISCSI_CONN_BUF_PBL_OFFSET(pfId, iscsiConBufPblEntry) \
138 (function * 0x8))) 113 (IRO[260].base + ((pfId) * IRO[260].m1) + ((iscsiConBufPblEntry) \
139#define TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(function) \ 114 * IRO[260].m2))
140 (IS_E1H_OFFSET ? (0x3000 + (function * 0x40)) : (0x1500 + \ 115#define TSTORM_ISCSI_ERROR_BITMAP_OFFSET(pfId) \
141 (function * 0x40))) 116 (IRO[264].base + ((pfId) * IRO[264].m1))
142#define TSTORM_FUNCTION_MODE_OFFSET \ 117#define TSTORM_ISCSI_L2_ISCSI_OOO_CID_TABLE_OFFSET(pfId) \
143 (IS_E1H_OFFSET ? 0x1ed0 : 0xffffffff) 118 (IRO[265].base + ((pfId) * IRO[265].m1))
144#define TSTORM_HC_BTR_OFFSET(port) \ 119#define TSTORM_ISCSI_L2_ISCSI_OOO_CLIENT_ID_TABLE_OFFSET(pfId) \
145 (IS_E1H_OFFSET ? (0xb144 + (port * 0x30)) : (0x1454 + (port * 0x18))) 120 (IRO[266].base + ((pfId) * IRO[266].m1))
146#define TSTORM_INDIRECTION_TABLE_OFFSET(function) \ 121#define TSTORM_ISCSI_L2_ISCSI_OOO_PROD_OFFSET(pfId) \
147 (IS_E1H_OFFSET ? (0x12c8 + (function * 0x80)) : (0x22c8 + \ 122 (IRO[267].base + ((pfId) * IRO[267].m1))
148 (function * 0x80))) 123#define TSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \
149#define TSTORM_INDIRECTION_TABLE_SIZE 0x80 124 (IRO[263].base + ((pfId) * IRO[263].m1))
150#define TSTORM_ISCSI_CONN_BUF_PBL_OFFSET(function, pblEntry) \ 125#define TSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \
151 (IS_E1H_OFFSET ? (0x60c0 + (function * 0x40) + (pblEntry * 0x8)) \ 126 (IRO[262].base + ((pfId) * IRO[262].m1))
152 : (0x4c30 + (function * 0x40) + (pblEntry * 0x8))) 127#define TSTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \
153#define TSTORM_ISCSI_ERROR_BITMAP_OFFSET(function) \ 128 (IRO[261].base + ((pfId) * IRO[261].m1))
154 (IS_E1H_OFFSET ? (0x6340 + (function * 0x8)) : (0x4cd0 + \ 129#define TSTORM_ISCSI_RQ_SIZE_OFFSET(pfId) \
155 (function * 0x8))) 130 (IRO[259].base + ((pfId) * IRO[259].m1))
156#define TSTORM_ISCSI_NUM_OF_TASKS_OFFSET(function) \ 131#define TSTORM_ISCSI_TCP_LOCAL_ADV_WND_OFFSET(pfId) \
157 (IS_E1H_OFFSET ? (0x6004 + (function * 0x8)) : (0x4c04 + \ 132 (IRO[269].base + ((pfId) * IRO[269].m1))
158 (function * 0x8))) 133#define TSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(pfId) \
159#define TSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(function) \ 134 (IRO[256].base + ((pfId) * IRO[256].m1))
160 (IS_E1H_OFFSET ? (0x6002 + (function * 0x8)) : (0x4c02 + \ 135#define TSTORM_ISCSI_TCP_VARS_LSB_LOCAL_MAC_ADDR_OFFSET(pfId) \
161 (function * 0x8))) 136 (IRO[257].base + ((pfId) * IRO[257].m1))
162#define TSTORM_ISCSI_PAGE_SIZE_OFFSET(function) \ 137#define TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(pfId) \
163 (IS_E1H_OFFSET ? (0x6000 + (function * 0x8)) : (0x4c00 + \ 138 (IRO[258].base + ((pfId) * IRO[258].m1))
164 (function * 0x8))) 139#define TSTORM_MAC_FILTER_CONFIG_OFFSET(pfId) \
165#define TSTORM_ISCSI_RQ_SIZE_OFFSET(function) \ 140 (IRO[196].base + ((pfId) * IRO[196].m1))
166 (IS_E1H_OFFSET ? (0x6080 + (function * 0x8)) : (0x4c20 + \ 141 #define TSTORM_PER_COUNTER_ID_STATS_OFFSET(portId, tStatCntId) \
167 (function * 0x8))) 142 (IRO[100].base + ((portId) * IRO[100].m1) + ((tStatCntId) * \
168#define TSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(function) \ 143 IRO[100].m2))
169 (IS_E1H_OFFSET ? (0x6040 + (function * 0x8)) : (0x4c10 + \ 144#define TSTORM_STATS_FLAGS_OFFSET(pfId) \
170 (function * 0x8))) 145 (IRO[95].base + ((pfId) * IRO[95].m1))
171#define TSTORM_ISCSI_TCP_VARS_LSB_LOCAL_MAC_ADDR_OFFSET(function) \ 146#define TSTORM_TCP_MAX_CWND_OFFSET(pfId) \
172 (IS_E1H_OFFSET ? (0x6042 + (function * 0x8)) : (0x4c12 + \ 147 (IRO[211].base + ((pfId) * IRO[211].m1))
173 (function * 0x8))) 148#define TSTORM_VF_TO_PF_OFFSET(funcId) \
174#define TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(function) \ 149 (IRO[102].base + ((funcId) * IRO[102].m1))
175 (IS_E1H_OFFSET ? (0x6044 + (function * 0x8)) : (0x4c14 + \ 150#define USTORM_AGG_DATA_OFFSET (IRO[201].base)
176 (function * 0x8))) 151#define USTORM_AGG_DATA_SIZE (IRO[201].size)
177#define TSTORM_MAC_FILTER_CONFIG_OFFSET(function) \ 152#define USTORM_ASSERT_LIST_INDEX_OFFSET (IRO[170].base)
178 (IS_E1H_OFFSET ? (0x3008 + (function * 0x40)) : (0x1508 + \ 153#define USTORM_ASSERT_LIST_OFFSET(assertListEntry) \
179 (function * 0x40))) 154 (IRO[169].base + ((assertListEntry) * IRO[169].m1))
180#define TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, stats_counter_id) \ 155#define USTORM_ETH_PAUSE_ENABLED_OFFSET(portId) \
181 (IS_E1H_OFFSET ? (0x2010 + (port * 0x490) + (stats_counter_id * \ 156 (IRO[178].base + ((portId) * IRO[178].m1))
182 0x40)) : (0x4010 + (port * 0x490) + (stats_counter_id * 0x40))) 157#define USTORM_ETH_STATS_QUERY_ADDR_OFFSET(pfId) \
183#define TSTORM_STATS_FLAGS_OFFSET(function) \ 158 (IRO[172].base + ((pfId) * IRO[172].m1))
184 (IS_E1H_OFFSET ? (0x29c0 + (function * 0x8)) : (0x4948 + \ 159#define USTORM_FCOE_EQ_PROD_OFFSET(pfId) \
185 (function * 0x8))) 160 (IRO[313].base + ((pfId) * IRO[313].m1))
186#define TSTORM_TCP_MAX_CWND_OFFSET(function) \ 161#define USTORM_FUNC_EN_OFFSET(funcId) \
187 (IS_E1H_OFFSET ? (0x4004 + (function * 0x8)) : (0x1fb4 + \ 162 (IRO[174].base + ((funcId) * IRO[174].m1))
188 (function * 0x8))) 163#define USTORM_FUNCTION_MODE_OFFSET (IRO[177].base)
189#define USTORM_AGG_DATA_OFFSET (IS_E1H_OFFSET ? 0xa000 : 0x3000) 164#define USTORM_ISCSI_CQ_SIZE_OFFSET(pfId) \
190#define USTORM_AGG_DATA_SIZE (IS_E1H_OFFSET ? 0x2000 : 0x1000) 165 (IRO[277].base + ((pfId) * IRO[277].m1))
191#define USTORM_ASSERT_LIST_INDEX_OFFSET \ 166#define USTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfId) \
192 (IS_E1H_OFFSET ? 0x8000 : 0x1000) 167 (IRO[278].base + ((pfId) * IRO[278].m1))
193#define USTORM_ASSERT_LIST_OFFSET(idx) \ 168#define USTORM_ISCSI_ERROR_BITMAP_OFFSET(pfId) \
194 (IS_E1H_OFFSET ? (0x8020 + (idx * 0x10)) : (0x1020 + (idx * 0x10))) 169 (IRO[282].base + ((pfId) * IRO[282].m1))
195#define USTORM_CQE_PAGE_BASE_OFFSET(port, clientId) \ 170#define USTORM_ISCSI_GLOBAL_BUF_PHYS_ADDR_OFFSET(pfId) \
196 (IS_E1H_OFFSET ? (0x1010 + (port * 0x680) + (clientId * 0x40)) : \ 171 (IRO[279].base + ((pfId) * IRO[279].m1))
197 (0x4010 + (port * 0x360) + (clientId * 0x30))) 172#define USTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \
198#define USTORM_CQE_PAGE_NEXT_OFFSET(port, clientId) \ 173 (IRO[275].base + ((pfId) * IRO[275].m1))
199 (IS_E1H_OFFSET ? (0x1028 + (port * 0x680) + (clientId * 0x40)) : \ 174#define USTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \
200 (0x4028 + (port * 0x360) + (clientId * 0x30))) 175 (IRO[274].base + ((pfId) * IRO[274].m1))
201#define USTORM_ETH_PAUSE_ENABLED_OFFSET(port) \ 176#define USTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \
202 (IS_E1H_OFFSET ? (0x2ad4 + (port * 0x8)) : 0xffffffff) 177 (IRO[273].base + ((pfId) * IRO[273].m1))
203#define USTORM_ETH_RING_PAUSE_DATA_OFFSET(port, clientId) \ 178#define USTORM_ISCSI_R2TQ_SIZE_OFFSET(pfId) \
204 (IS_E1H_OFFSET ? (0x1030 + (port * 0x680) + (clientId * 0x40)) : \ 179 (IRO[276].base + ((pfId) * IRO[276].m1))
205 0xffffffff) 180#define USTORM_ISCSI_RQ_BUFFER_SIZE_OFFSET(pfId) \
206#define USTORM_ETH_STATS_QUERY_ADDR_OFFSET(function) \ 181 (IRO[280].base + ((pfId) * IRO[280].m1))
207 (IS_E1H_OFFSET ? (0x2a50 + (function * 0x8)) : (0x1dd0 + \ 182#define USTORM_ISCSI_RQ_SIZE_OFFSET(pfId) \
208 (function * 0x8))) 183 (IRO[281].base + ((pfId) * IRO[281].m1))
209#define USTORM_FUNCTION_MODE_OFFSET \ 184#define USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(pfId) \
210 (IS_E1H_OFFSET ? 0x2448 : 0xffffffff) 185 (IRO[176].base + ((pfId) * IRO[176].m1))
211#define USTORM_ISCSI_CQ_SIZE_OFFSET(function) \ 186 #define USTORM_PER_COUNTER_ID_STATS_OFFSET(portId, uStatCntId) \
212 (IS_E1H_OFFSET ? (0x7044 + (function * 0x8)) : (0x2414 + \ 187 (IRO[173].base + ((portId) * IRO[173].m1) + ((uStatCntId) * \
213 (function * 0x8))) 188 IRO[173].m2))
214#define USTORM_ISCSI_CQ_SQN_SIZE_OFFSET(function) \ 189 #define USTORM_RX_PRODS_E1X_OFFSET(portId, clientId) \
215 (IS_E1H_OFFSET ? (0x7046 + (function * 0x8)) : (0x2416 + \ 190 (IRO[204].base + ((portId) * IRO[204].m1) + ((clientId) * \
216 (function * 0x8))) 191 IRO[204].m2))
217#define USTORM_ISCSI_ERROR_BITMAP_OFFSET(function) \ 192#define USTORM_RX_PRODS_E2_OFFSET(qzoneId) \
218 (IS_E1H_OFFSET ? (0x7688 + (function * 0x8)) : (0x29c8 + \ 193 (IRO[205].base + ((qzoneId) * IRO[205].m1))
219 (function * 0x8))) 194#define USTORM_STATS_FLAGS_OFFSET(pfId) \
220#define USTORM_ISCSI_GLOBAL_BUF_PHYS_ADDR_OFFSET(function) \ 195 (IRO[171].base + ((pfId) * IRO[171].m1))
221 (IS_E1H_OFFSET ? (0x7648 + (function * 0x8)) : (0x29b8 + \ 196#define USTORM_TPA_BTR_OFFSET (IRO[202].base)
222 (function * 0x8))) 197#define USTORM_TPA_BTR_SIZE (IRO[202].size)
223#define USTORM_ISCSI_NUM_OF_TASKS_OFFSET(function) \ 198#define USTORM_VF_TO_PF_OFFSET(funcId) \
224 (IS_E1H_OFFSET ? (0x7004 + (function * 0x8)) : (0x2404 + \ 199 (IRO[175].base + ((funcId) * IRO[175].m1))
225 (function * 0x8))) 200#define XSTORM_AGG_INT_FINAL_CLEANUP_COMP_TYPE (IRO[59].base)
226#define USTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(function) \ 201#define XSTORM_AGG_INT_FINAL_CLEANUP_INDEX (IRO[58].base)
227 (IS_E1H_OFFSET ? (0x7002 + (function * 0x8)) : (0x2402 + \ 202#define XSTORM_ASSERT_LIST_INDEX_OFFSET (IRO[54].base)
228 (function * 0x8))) 203#define XSTORM_ASSERT_LIST_OFFSET(assertListEntry) \
229#define USTORM_ISCSI_PAGE_SIZE_OFFSET(function) \ 204 (IRO[53].base + ((assertListEntry) * IRO[53].m1))
230 (IS_E1H_OFFSET ? (0x7000 + (function * 0x8)) : (0x2400 + \ 205#define XSTORM_CMNG_PER_PORT_VARS_OFFSET(portId) \
231 (function * 0x8))) 206 (IRO[47].base + ((portId) * IRO[47].m1))
232#define USTORM_ISCSI_R2TQ_SIZE_OFFSET(function) \ 207#define XSTORM_E1HOV_OFFSET(pfId) \
233 (IS_E1H_OFFSET ? (0x7040 + (function * 0x8)) : (0x2410 + \ 208 (IRO[55].base + ((pfId) * IRO[55].m1))
234 (function * 0x8))) 209#define XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(pfId) \
235#define USTORM_ISCSI_RQ_BUFFER_SIZE_OFFSET(function) \ 210 (IRO[45].base + ((pfId) * IRO[45].m1))
236 (IS_E1H_OFFSET ? (0x7080 + (function * 0x8)) : (0x2420 + \ 211#define XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(pfId) \
237 (function * 0x8))) 212 (IRO[49].base + ((pfId) * IRO[49].m1))
238#define USTORM_ISCSI_RQ_SIZE_OFFSET(function) \ 213#define XSTORM_FUNC_EN_OFFSET(funcId) \
239 (IS_E1H_OFFSET ? (0x7084 + (function * 0x8)) : (0x2424 + \ 214 (IRO[51].base + ((funcId) * IRO[51].m1))
240 (function * 0x8))) 215#define XSTORM_FUNCTION_MODE_OFFSET (IRO[56].base)
241#define USTORM_MAX_AGG_SIZE_OFFSET(port, clientId) \ 216#define XSTORM_ISCSI_HQ_SIZE_OFFSET(pfId) \
242 (IS_E1H_OFFSET ? (0x1018 + (port * 0x680) + (clientId * 0x40)) : \ 217 (IRO[290].base + ((pfId) * IRO[290].m1))
243 (0x4018 + (port * 0x360) + (clientId * 0x30))) 218#define XSTORM_ISCSI_LOCAL_MAC_ADDR0_OFFSET(pfId) \
244#define USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(function) \ 219 (IRO[293].base + ((pfId) * IRO[293].m1))
245 (IS_E1H_OFFSET ? (0x2408 + (function * 0x8)) : (0x1da8 + \ 220#define XSTORM_ISCSI_LOCAL_MAC_ADDR1_OFFSET(pfId) \
246 (function * 0x8))) 221 (IRO[294].base + ((pfId) * IRO[294].m1))
247#define USTORM_PER_COUNTER_ID_STATS_OFFSET(port, stats_counter_id) \ 222#define XSTORM_ISCSI_LOCAL_MAC_ADDR2_OFFSET(pfId) \
248 (IS_E1H_OFFSET ? (0x2450 + (port * 0x2d0) + (stats_counter_id * \ 223 (IRO[295].base + ((pfId) * IRO[295].m1))
249 0x28)) : (0x1500 + (port * 0x2d0) + (stats_counter_id * 0x28))) 224#define XSTORM_ISCSI_LOCAL_MAC_ADDR3_OFFSET(pfId) \
250#define USTORM_RX_PRODS_OFFSET(port, client_id) \ 225 (IRO[296].base + ((pfId) * IRO[296].m1))
251 (IS_E1H_OFFSET ? (0x1000 + (port * 0x680) + (client_id * 0x40)) \ 226#define XSTORM_ISCSI_LOCAL_MAC_ADDR4_OFFSET(pfId) \
252 : (0x4000 + (port * 0x360) + (client_id * 0x30))) 227 (IRO[297].base + ((pfId) * IRO[297].m1))
253#define USTORM_STATS_FLAGS_OFFSET(function) \ 228#define XSTORM_ISCSI_LOCAL_MAC_ADDR5_OFFSET(pfId) \
254 (IS_E1H_OFFSET ? (0x29f0 + (function * 0x8)) : (0x1db8 + \ 229 (IRO[298].base + ((pfId) * IRO[298].m1))
255 (function * 0x8))) 230#define XSTORM_ISCSI_LOCAL_VLAN_OFFSET(pfId) \
256#define USTORM_TPA_BTR_OFFSET (IS_E1H_OFFSET ? 0x3da5 : 0x5095) 231 (IRO[299].base + ((pfId) * IRO[299].m1))
257#define USTORM_TPA_BTR_SIZE 0x1 232#define XSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \
258#define XSTORM_ASSERT_LIST_INDEX_OFFSET \ 233 (IRO[289].base + ((pfId) * IRO[289].m1))
259 (IS_E1H_OFFSET ? 0x9000 : 0x1000) 234#define XSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \
260#define XSTORM_ASSERT_LIST_OFFSET(idx) \ 235 (IRO[288].base + ((pfId) * IRO[288].m1))
261 (IS_E1H_OFFSET ? (0x9020 + (idx * 0x10)) : (0x1020 + (idx * 0x10))) 236#define XSTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \
262#define XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) \ 237 (IRO[287].base + ((pfId) * IRO[287].m1))
263 (IS_E1H_OFFSET ? (0x24a8 + (port * 0x50)) : (0x3a80 + (port * 0x50))) 238#define XSTORM_ISCSI_R2TQ_SIZE_OFFSET(pfId) \
264#define XSTORM_DEF_SB_HC_DISABLE_OFFSET(function, index) \ 239 (IRO[292].base + ((pfId) * IRO[292].m1))
265 (IS_E1H_OFFSET ? (0xa01a + ((function>>1) * 0x28) + \ 240#define XSTORM_ISCSI_SQ_SIZE_OFFSET(pfId) \
266 ((function&1) * 0xa0) + (index * 0x4)) : (0x141a + (function * \ 241 (IRO[291].base + ((pfId) * IRO[291].m1))
267 0x28) + (index * 0x4))) 242#define XSTORM_ISCSI_TCP_VARS_ADV_WND_SCL_OFFSET(pfId) \
268#define XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(function) \ 243 (IRO[286].base + ((pfId) * IRO[286].m1))
269 (IS_E1H_OFFSET ? (0xa000 + ((function>>1) * 0x28) + \ 244#define XSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(pfId) \
270 ((function&1) * 0xa0)) : (0x1400 + (function * 0x28))) 245 (IRO[285].base + ((pfId) * IRO[285].m1))
271#define XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(function) \ 246#define XSTORM_ISCSI_TCP_VARS_TOS_OFFSET(pfId) \
272 (IS_E1H_OFFSET ? (0xa008 + ((function>>1) * 0x28) + \ 247 (IRO[284].base + ((pfId) * IRO[284].m1))
273 ((function&1) * 0xa0)) : (0x1408 + (function * 0x28))) 248#define XSTORM_ISCSI_TCP_VARS_TTL_OFFSET(pfId) \
274#define XSTORM_E1HOV_OFFSET(function) \ 249 (IRO[283].base + ((pfId) * IRO[283].m1))
275 (IS_E1H_OFFSET ? (0x2c10 + (function * 0x8)) : 0xffffffff) 250#define XSTORM_PATH_ID_OFFSET (IRO[65].base)
276#define XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(function) \ 251 #define XSTORM_PER_COUNTER_ID_STATS_OFFSET(portId, xStatCntId) \
277 (IS_E1H_OFFSET ? (0x2418 + (function * 0x8)) : (0x3a50 + \ 252 (IRO[50].base + ((portId) * IRO[50].m1) + ((xStatCntId) * \
278 (function * 0x8))) 253 IRO[50].m2))
279#define XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(function) \ 254#define XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(pfId) \
280 (IS_E1H_OFFSET ? (0x2588 + (function * 0x90)) : (0x3b60 + \ 255 (IRO[48].base + ((pfId) * IRO[48].m1))
281 (function * 0x90))) 256#define XSTORM_SPQ_DATA_OFFSET(funcId) \
282#define XSTORM_FUNCTION_MODE_OFFSET \ 257 (IRO[32].base + ((funcId) * IRO[32].m1))
283 (IS_E1H_OFFSET ? 0x2c50 : 0xffffffff) 258#define XSTORM_SPQ_DATA_SIZE (IRO[32].size)
284#define XSTORM_HC_BTR_OFFSET(port) \ 259#define XSTORM_SPQ_PAGE_BASE_OFFSET(funcId) \
285 (IS_E1H_OFFSET ? (0xa144 + (port * 0x30)) : (0x1454 + (port * 0x18))) 260 (IRO[30].base + ((funcId) * IRO[30].m1))
286#define XSTORM_ISCSI_HQ_SIZE_OFFSET(function) \ 261#define XSTORM_SPQ_PROD_OFFSET(funcId) \
287 (IS_E1H_OFFSET ? (0x80c0 + (function * 0x8)) : (0x1c30 + \ 262 (IRO[31].base + ((funcId) * IRO[31].m1))
288 (function * 0x8))) 263#define XSTORM_STATS_FLAGS_OFFSET(pfId) \
289#define XSTORM_ISCSI_LOCAL_MAC_ADDR0_OFFSET(function) \ 264 (IRO[43].base + ((pfId) * IRO[43].m1))
290 (IS_E1H_OFFSET ? (0x8080 + (function * 0x8)) : (0x1c20 + \ 265#define XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_ENABLED_OFFSET(portId) \
291 (function * 0x8))) 266 (IRO[206].base + ((portId) * IRO[206].m1))
292#define XSTORM_ISCSI_LOCAL_MAC_ADDR1_OFFSET(function) \ 267#define XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_MAX_COUNT_OFFSET(portId) \
293 (IS_E1H_OFFSET ? (0x8081 + (function * 0x8)) : (0x1c21 + \ 268 (IRO[207].base + ((portId) * IRO[207].m1))
294 (function * 0x8))) 269#define XSTORM_TCP_TX_SWS_TIMER_VAL_OFFSET(pfId) \
295#define XSTORM_ISCSI_LOCAL_MAC_ADDR2_OFFSET(function) \ 270 (IRO[209].base + (((pfId)>>1) * IRO[209].m1) + (((pfId)&1) * \
296 (IS_E1H_OFFSET ? (0x8082 + (function * 0x8)) : (0x1c22 + \ 271 IRO[209].m2))
297 (function * 0x8))) 272#define XSTORM_VF_TO_PF_OFFSET(funcId) \
298#define XSTORM_ISCSI_LOCAL_MAC_ADDR3_OFFSET(function) \ 273 (IRO[52].base + ((funcId) * IRO[52].m1))
299 (IS_E1H_OFFSET ? (0x8083 + (function * 0x8)) : (0x1c23 + \
300 (function * 0x8)))
301#define XSTORM_ISCSI_LOCAL_MAC_ADDR4_OFFSET(function) \
302 (IS_E1H_OFFSET ? (0x8084 + (function * 0x8)) : (0x1c24 + \
303 (function * 0x8)))
304#define XSTORM_ISCSI_LOCAL_MAC_ADDR5_OFFSET(function) \
305 (IS_E1H_OFFSET ? (0x8085 + (function * 0x8)) : (0x1c25 + \
306 (function * 0x8)))
307#define XSTORM_ISCSI_LOCAL_VLAN_OFFSET(function) \
308 (IS_E1H_OFFSET ? (0x8086 + (function * 0x8)) : (0x1c26 + \
309 (function * 0x8)))
310#define XSTORM_ISCSI_NUM_OF_TASKS_OFFSET(function) \
311 (IS_E1H_OFFSET ? (0x8004 + (function * 0x8)) : (0x1c04 + \
312 (function * 0x8)))
313#define XSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(function) \
314 (IS_E1H_OFFSET ? (0x8002 + (function * 0x8)) : (0x1c02 + \
315 (function * 0x8)))
316#define XSTORM_ISCSI_PAGE_SIZE_OFFSET(function) \
317 (IS_E1H_OFFSET ? (0x8000 + (function * 0x8)) : (0x1c00 + \
318 (function * 0x8)))
319#define XSTORM_ISCSI_R2TQ_SIZE_OFFSET(function) \
320 (IS_E1H_OFFSET ? (0x80c4 + (function * 0x8)) : (0x1c34 + \
321 (function * 0x8)))
322#define XSTORM_ISCSI_SQ_SIZE_OFFSET(function) \
323 (IS_E1H_OFFSET ? (0x80c2 + (function * 0x8)) : (0x1c32 + \
324 (function * 0x8)))
325#define XSTORM_ISCSI_TCP_VARS_ADV_WND_SCL_OFFSET(function) \
326 (IS_E1H_OFFSET ? (0x8043 + (function * 0x8)) : (0x1c13 + \
327 (function * 0x8)))
328#define XSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(function) \
329 (IS_E1H_OFFSET ? (0x8042 + (function * 0x8)) : (0x1c12 + \
330 (function * 0x8)))
331#define XSTORM_ISCSI_TCP_VARS_TOS_OFFSET(function) \
332 (IS_E1H_OFFSET ? (0x8041 + (function * 0x8)) : (0x1c11 + \
333 (function * 0x8)))
334#define XSTORM_ISCSI_TCP_VARS_TTL_OFFSET(function) \
335 (IS_E1H_OFFSET ? (0x8040 + (function * 0x8)) : (0x1c10 + \
336 (function * 0x8)))
337#define XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, stats_counter_id) \
338 (IS_E1H_OFFSET ? (0xc000 + (port * 0x360) + (stats_counter_id * \
339 0x30)) : (0x3378 + (port * 0x360) + (stats_counter_id * 0x30)))
340#define XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(function) \
341 (IS_E1H_OFFSET ? (0x2548 + (function * 0x90)) : (0x3b20 + \
342 (function * 0x90)))
343#define XSTORM_SPQ_PAGE_BASE_OFFSET(function) \
344 (IS_E1H_OFFSET ? (0x2000 + (function * 0x10)) : (0x3328 + \
345 (function * 0x10)))
346#define XSTORM_SPQ_PROD_OFFSET(function) \
347 (IS_E1H_OFFSET ? (0x2008 + (function * 0x10)) : (0x3330 + \
348 (function * 0x10)))
349#define XSTORM_STATS_FLAGS_OFFSET(function) \
350 (IS_E1H_OFFSET ? (0x23d8 + (function * 0x8)) : (0x3a40 + \
351 (function * 0x8)))
352#define XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_ENABLED_OFFSET(port) \
353 (IS_E1H_OFFSET ? (0x4000 + (port * 0x8)) : (0x1960 + (port * 0x8)))
354#define XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_MAX_COUNT_OFFSET(port) \
355 (IS_E1H_OFFSET ? (0x4001 + (port * 0x8)) : (0x1961 + (port * 0x8)))
356#define XSTORM_TCP_TX_SWS_TIMER_VAL_OFFSET(function) \
357 (IS_E1H_OFFSET ? (0x4060 + ((function>>1) * 0x8) + ((function&1) \
358 * 0x4)) : (0x1978 + (function * 0x4)))
359#define COMMON_ASM_INVALID_ASSERT_OPCODE 0x0 274#define COMMON_ASM_INVALID_ASSERT_OPCODE 0x0
360 275
361/**
362* This file defines HSI constants for the ETH flow
363*/
364#ifdef _EVEREST_MICROCODE
365#include "microcode_constants.h"
366#include "eth_rx_bd.h"
367#include "eth_tx_bd.h"
368#include "eth_rx_cqe.h"
369#include "eth_rx_sge.h"
370#include "eth_rx_cqe_next_page.h"
371#endif
372
373/* RSS hash types */ 276/* RSS hash types */
374#define DEFAULT_HASH_TYPE 0 277#define DEFAULT_HASH_TYPE 0
375#define IPV4_HASH_TYPE 1 278#define IPV4_HASH_TYPE 1
@@ -389,11 +292,17 @@
389#define U_ETH_NUM_OF_SGES_TO_FETCH 8 292#define U_ETH_NUM_OF_SGES_TO_FETCH 8
390#define U_ETH_MAX_SGES_FOR_PACKET 3 293#define U_ETH_MAX_SGES_FOR_PACKET 3
391 294
295/*Tx params*/
296#define X_ETH_NO_VLAN 0
297#define X_ETH_OUTBAND_VLAN 1
298#define X_ETH_INBAND_VLAN 2
392/* Rx ring params */ 299/* Rx ring params */
393#define U_ETH_LOCAL_BD_RING_SIZE 8 300#define U_ETH_LOCAL_BD_RING_SIZE 8
394#define U_ETH_LOCAL_SGE_RING_SIZE 10 301#define U_ETH_LOCAL_SGE_RING_SIZE 10
395#define U_ETH_SGL_SIZE 8 302#define U_ETH_SGL_SIZE 8
396 303 /* The fw will padd the buffer with this value, so the IP header \
304 will be align to 4 Byte */
305#define IP_HEADER_ALIGNMENT_PADDING 2
397 306
398#define U_ETH_SGES_PER_PAGE_INVERSE_MASK \ 307#define U_ETH_SGES_PER_PAGE_INVERSE_MASK \
399 (0xFFFF - ((PAGE_SIZE/((STRUCT_SIZE(eth_rx_sge))/8))-1)) 308 (0xFFFF - ((PAGE_SIZE/((STRUCT_SIZE(eth_rx_sge))/8))-1))
@@ -409,16 +318,15 @@
409#define U_ETH_UNDEFINED_Q 0xFF 318#define U_ETH_UNDEFINED_Q 0xFF
410 319
411/* values of command IDs in the ramrod message */ 320/* values of command IDs in the ramrod message */
412#define RAMROD_CMD_ID_ETH_PORT_SETUP 80 321#define RAMROD_CMD_ID_ETH_UNUSED 0
413#define RAMROD_CMD_ID_ETH_CLIENT_SETUP 85 322#define RAMROD_CMD_ID_ETH_CLIENT_SETUP 1
414#define RAMROD_CMD_ID_ETH_STAT_QUERY 90 323#define RAMROD_CMD_ID_ETH_UPDATE 2
415#define RAMROD_CMD_ID_ETH_UPDATE 100 324#define RAMROD_CMD_ID_ETH_HALT 3
416#define RAMROD_CMD_ID_ETH_HALT 105 325#define RAMROD_CMD_ID_ETH_FORWARD_SETUP 4
417#define RAMROD_CMD_ID_ETH_SET_MAC 110 326#define RAMROD_CMD_ID_ETH_ACTIVATE 5
418#define RAMROD_CMD_ID_ETH_CFC_DEL 115 327#define RAMROD_CMD_ID_ETH_DEACTIVATE 6
419#define RAMROD_CMD_ID_ETH_PORT_DEL 120 328#define RAMROD_CMD_ID_ETH_EMPTY 7
420#define RAMROD_CMD_ID_ETH_FORWARD_SETUP 125 329#define RAMROD_CMD_ID_ETH_TERMINATE 8
421
422 330
423/* command values for set mac command */ 331/* command values for set mac command */
424#define T_ETH_MAC_COMMAND_SET 0 332#define T_ETH_MAC_COMMAND_SET 0
@@ -431,7 +339,9 @@
431 339
432/* Maximal L2 clients supported */ 340/* Maximal L2 clients supported */
433#define ETH_MAX_RX_CLIENTS_E1 18 341#define ETH_MAX_RX_CLIENTS_E1 18
434#define ETH_MAX_RX_CLIENTS_E1H 26 342#define ETH_MAX_RX_CLIENTS_E1H 28
343
344#define MAX_STAT_COUNTER_ID ETH_MAX_RX_CLIENTS_E1H
435 345
436/* Maximal aggregation queues supported */ 346/* Maximal aggregation queues supported */
437#define ETH_MAX_AGGREGATION_QUEUES_E1 32 347#define ETH_MAX_AGGREGATION_QUEUES_E1 32
@@ -443,6 +353,20 @@
443#define ETH_RSS_MODE_VLAN_PRI 2 353#define ETH_RSS_MODE_VLAN_PRI 2
444#define ETH_RSS_MODE_E1HOV_PRI 3 354#define ETH_RSS_MODE_E1HOV_PRI 3
445#define ETH_RSS_MODE_IP_DSCP 4 355#define ETH_RSS_MODE_IP_DSCP 4
356#define ETH_RSS_MODE_E2_INTEG 5
357
358
359/* ETH vlan filtering modes */
360#define ETH_VLAN_FILTER_ANY_VLAN 0 /* Don't filter by vlan */
361#define ETH_VLAN_FILTER_SPECIFIC_VLAN \
362 1 /* Only the vlan_id is allowed */
363#define ETH_VLAN_FILTER_CLASSIFY \
364 2 /* vlan will be added to CAM for classification */
365
366/* Fast path CQE selection */
367#define ETH_FP_CQE_REGULAR 0
368#define ETH_FP_CQE_SGL 1
369#define ETH_FP_CQE_RAW 2
446 370
447 371
448/** 372/**
@@ -458,6 +382,7 @@
458#define RESERVED_CONNECTION_TYPE_0 5 382#define RESERVED_CONNECTION_TYPE_0 5
459#define RESERVED_CONNECTION_TYPE_1 6 383#define RESERVED_CONNECTION_TYPE_1 6
460#define RESERVED_CONNECTION_TYPE_2 7 384#define RESERVED_CONNECTION_TYPE_2 7
385#define NONE_CONNECTION_TYPE 8
461 386
462 387
463#define PROTOCOL_STATE_BIT_OFFSET 6 388#define PROTOCOL_STATE_BIT_OFFSET 6
@@ -466,6 +391,16 @@
466#define TOE_STATE (TOE_CONNECTION_TYPE << PROTOCOL_STATE_BIT_OFFSET) 391#define TOE_STATE (TOE_CONNECTION_TYPE << PROTOCOL_STATE_BIT_OFFSET)
467#define RDMA_STATE (RDMA_CONNECTION_TYPE << PROTOCOL_STATE_BIT_OFFSET) 392#define RDMA_STATE (RDMA_CONNECTION_TYPE << PROTOCOL_STATE_BIT_OFFSET)
468 393
394/* values of command IDs in the ramrod message */
395#define RAMROD_CMD_ID_COMMON_FUNCTION_START 1
396#define RAMROD_CMD_ID_COMMON_FUNCTION_STOP 2
397#define RAMROD_CMD_ID_COMMON_CFC_DEL 3
398#define RAMROD_CMD_ID_COMMON_CFC_DEL_WB 4
399#define RAMROD_CMD_ID_COMMON_SET_MAC 5
400#define RAMROD_CMD_ID_COMMON_STAT_QUERY 6
401#define RAMROD_CMD_ID_COMMON_STOP_TRAFFIC 7
402#define RAMROD_CMD_ID_COMMON_START_TRAFFIC 8
403
469/* microcode fixed page page size 4K (chains and ring segments) */ 404/* microcode fixed page page size 4K (chains and ring segments) */
470#define MC_PAGE_SIZE 4096 405#define MC_PAGE_SIZE 4096
471 406
@@ -473,46 +408,26 @@
473/* Host coalescing constants */ 408/* Host coalescing constants */
474#define HC_IGU_BC_MODE 0 409#define HC_IGU_BC_MODE 0
475#define HC_IGU_NBC_MODE 1 410#define HC_IGU_NBC_MODE 1
411/* Host coalescing constants. E1 includes E1H as well */
412
413/* Number of indices per slow-path SB */
414#define HC_SP_SB_MAX_INDICES 16
415
416/* Number of indices per SB */
417#define HC_SB_MAX_INDICES_E1X 8
418#define HC_SB_MAX_INDICES_E2 8
419
420#define HC_SB_MAX_SB_E1X 32
421#define HC_SB_MAX_SB_E2 136
422
423#define HC_SP_SB_ID 0xde
476 424
477#define HC_REGULAR_SEGMENT 0 425#define HC_REGULAR_SEGMENT 0
478#define HC_DEFAULT_SEGMENT 1 426#define HC_DEFAULT_SEGMENT 1
427#define HC_SB_MAX_SM 2
479 428
480/* index numbers */ 429#define HC_SB_MAX_DYNAMIC_INDICES 4
481#define HC_USTORM_DEF_SB_NUM_INDICES 8 430#define HC_FUNCTION_DISABLED 0xff
482#define HC_CSTORM_DEF_SB_NUM_INDICES 8
483#define HC_XSTORM_DEF_SB_NUM_INDICES 4
484#define HC_TSTORM_DEF_SB_NUM_INDICES 4
485#define HC_USTORM_SB_NUM_INDICES 4
486#define HC_CSTORM_SB_NUM_INDICES 4
487
488/* index values - which counter to update */
489
490#define HC_INDEX_U_TOE_RX_CQ_CONS 0
491#define HC_INDEX_U_ETH_RX_CQ_CONS 1
492#define HC_INDEX_U_ETH_RX_BD_CONS 2
493#define HC_INDEX_U_FCOE_EQ_CONS 3
494
495#define HC_INDEX_C_TOE_TX_CQ_CONS 0
496#define HC_INDEX_C_ETH_TX_CQ_CONS 1
497#define HC_INDEX_C_ISCSI_EQ_CONS 2
498
499#define HC_INDEX_DEF_X_SPQ_CONS 0
500
501#define HC_INDEX_DEF_C_RDMA_EQ_CONS 0
502#define HC_INDEX_DEF_C_RDMA_NAL_PROD 1
503#define HC_INDEX_DEF_C_ETH_FW_TX_CQ_CONS 2
504#define HC_INDEX_DEF_C_ETH_SLOW_PATH 3
505#define HC_INDEX_DEF_C_ETH_RDMA_CQ_CONS 4
506#define HC_INDEX_DEF_C_ETH_ISCSI_CQ_CONS 5
507#define HC_INDEX_DEF_C_ETH_FCOE_CQ_CONS 6
508
509#define HC_INDEX_DEF_U_ETH_RDMA_RX_CQ_CONS 0
510#define HC_INDEX_DEF_U_ETH_ISCSI_RX_CQ_CONS 1
511#define HC_INDEX_DEF_U_ETH_RDMA_RX_BD_CONS 2
512#define HC_INDEX_DEF_U_ETH_ISCSI_RX_BD_CONS 3
513#define HC_INDEX_DEF_U_ETH_FCOE_RX_CQ_CONS 4
514#define HC_INDEX_DEF_U_ETH_FCOE_RX_BD_CONS 5
515
516/* used by the driver to get the SB offset */ 431/* used by the driver to get the SB offset */
517#define USTORM_ID 0 432#define USTORM_ID 0
518#define CSTORM_ID 1 433#define CSTORM_ID 1
@@ -529,45 +444,17 @@
529 444
530 445
531/**** DEFINES FOR TIMERS/CLOCKS RESOLUTIONS ****/ 446/**** DEFINES FOR TIMERS/CLOCKS RESOLUTIONS ****/
532#define EMULATION_FREQUENCY_FACTOR 1600
533#define FPGA_FREQUENCY_FACTOR 100
534 447
535#define TIMERS_TICK_SIZE_CHIP (1e-3) 448#define TIMERS_TICK_SIZE_CHIP (1e-3)
536#define TIMERS_TICK_SIZE_EMUL \
537 ((TIMERS_TICK_SIZE_CHIP)/((EMULATION_FREQUENCY_FACTOR)))
538#define TIMERS_TICK_SIZE_FPGA \
539 ((TIMERS_TICK_SIZE_CHIP)/((FPGA_FREQUENCY_FACTOR)))
540 449
541#define TSEMI_CLK1_RESUL_CHIP (1e-3) 450#define TSEMI_CLK1_RESUL_CHIP (1e-3)
542#define TSEMI_CLK1_RESUL_EMUL \
543 ((TSEMI_CLK1_RESUL_CHIP)/(EMULATION_FREQUENCY_FACTOR))
544#define TSEMI_CLK1_RESUL_FPGA \
545 ((TSEMI_CLK1_RESUL_CHIP)/(FPGA_FREQUENCY_FACTOR))
546
547#define USEMI_CLK1_RESUL_CHIP (TIMERS_TICK_SIZE_CHIP)
548#define USEMI_CLK1_RESUL_EMUL (TIMERS_TICK_SIZE_EMUL)
549#define USEMI_CLK1_RESUL_FPGA (TIMERS_TICK_SIZE_FPGA)
550 451
551#define XSEMI_CLK1_RESUL_CHIP (1e-3) 452#define XSEMI_CLK1_RESUL_CHIP (1e-3)
552#define XSEMI_CLK1_RESUL_EMUL \
553 ((XSEMI_CLK1_RESUL_CHIP)/(EMULATION_FREQUENCY_FACTOR))
554#define XSEMI_CLK1_RESUL_FPGA \
555 ((XSEMI_CLK1_RESUL_CHIP)/(FPGA_FREQUENCY_FACTOR))
556
557#define XSEMI_CLK2_RESUL_CHIP (1e-6)
558#define XSEMI_CLK2_RESUL_EMUL \
559 ((XSEMI_CLK2_RESUL_CHIP)/(EMULATION_FREQUENCY_FACTOR))
560#define XSEMI_CLK2_RESUL_FPGA \
561 ((XSEMI_CLK2_RESUL_CHIP)/(FPGA_FREQUENCY_FACTOR))
562 453
563#define SDM_TIMER_TICK_RESUL_CHIP (4*(1e-6)) 454#define SDM_TIMER_TICK_RESUL_CHIP (4*(1e-6))
564#define SDM_TIMER_TICK_RESUL_EMUL \
565 ((SDM_TIMER_TICK_RESUL_CHIP)/(EMULATION_FREQUENCY_FACTOR))
566#define SDM_TIMER_TICK_RESUL_FPGA \
567 ((SDM_TIMER_TICK_RESUL_CHIP)/(FPGA_FREQUENCY_FACTOR))
568
569 455
570/**** END DEFINES FOR TIMERS/CLOCKS RESOLUTIONS ****/ 456/**** END DEFINES FOR TIMERS/CLOCKS RESOLUTIONS ****/
457
571#define XSTORM_IP_ID_ROLL_HALF 0x8000 458#define XSTORM_IP_ID_ROLL_HALF 0x8000
572#define XSTORM_IP_ID_ROLL_ALL 0 459#define XSTORM_IP_ID_ROLL_ALL 0
573 460
@@ -576,10 +463,36 @@
576#define NUM_OF_PROTOCOLS 4 463#define NUM_OF_PROTOCOLS 4
577#define NUM_OF_SAFC_BITS 16 464#define NUM_OF_SAFC_BITS 16
578#define MAX_COS_NUMBER 4 465#define MAX_COS_NUMBER 4
579#define MAX_T_STAT_COUNTER_ID 18
580#define MAX_X_STAT_COUNTER_ID 18
581#define MAX_U_STAT_COUNTER_ID 18
582 466
467#define FAIRNESS_COS_WRR_MODE 0
468#define FAIRNESS_COS_ETS_MODE 1
469
470
471/* Priority Flow Control (PFC) */
472#define MAX_PFC_PRIORITIES 8
473#define MAX_PFC_TRAFFIC_TYPES 8
474
475/* Available Traffic Types for Link Layer Flow Control */
476#define LLFC_TRAFFIC_TYPE_NW 0
477#define LLFC_TRAFFIC_TYPE_FCOE 1
478#define LLFC_TRAFFIC_TYPE_ISCSI 2
479 /***************** START OF E2 INTEGRATION \
480 CODE***************************************/
481#define LLFC_TRAFFIC_TYPE_NW_COS1_E2INTEG 3
482 /***************** END OF E2 INTEGRATION \
483 CODE***************************************/
484#define LLFC_TRAFFIC_TYPE_MAX 4
485
486 /* used by array traffic_type_to_priority[] to mark traffic type \
487 that is not mapped to priority*/
488#define LLFC_TRAFFIC_TYPE_TO_PRIORITY_UNMAPPED 0xFF
489
490#define LLFC_MODE_NONE 0
491#define LLFC_MODE_PFC 1
492#define LLFC_MODE_SAFC 2
493
494#define DCB_DISABLED 0
495#define DCB_ENABLED 1
583 496
584#define UNKNOWN_ADDRESS 0 497#define UNKNOWN_ADDRESS 0
585#define UNICAST_ADDRESS 1 498#define UNICAST_ADDRESS 1
@@ -587,8 +500,32 @@
587#define BROADCAST_ADDRESS 3 500#define BROADCAST_ADDRESS 3
588 501
589#define SINGLE_FUNCTION 0 502#define SINGLE_FUNCTION 0
590#define MULTI_FUNCTION 1 503#define MULTI_FUNCTION_SD 1
504#define MULTI_FUNCTION_SI 2
591 505
592#define IP_V4 0 506#define IP_V4 0
593#define IP_V6 1 507#define IP_V6 1
594 508
509
510#define C_ERES_PER_PAGE \
511 (PAGE_SIZE / BITS_TO_BYTES(STRUCT_SIZE(event_ring_elem)))
512#define C_ERE_PER_PAGE_MASK (C_ERES_PER_PAGE - 1)
513
514#define EVENT_RING_OPCODE_VF_PF_CHANNEL 0
515#define EVENT_RING_OPCODE_FUNCTION_START 1
516#define EVENT_RING_OPCODE_FUNCTION_STOP 2
517#define EVENT_RING_OPCODE_CFC_DEL 3
518#define EVENT_RING_OPCODE_CFC_DEL_WB 4
519#define EVENT_RING_OPCODE_SET_MAC 5
520#define EVENT_RING_OPCODE_STAT_QUERY 6
521#define EVENT_RING_OPCODE_STOP_TRAFFIC 7
522#define EVENT_RING_OPCODE_START_TRAFFIC 8
523#define EVENT_RING_OPCODE_FORWARD_SETUP 9
524
525#define VF_PF_CHANNEL_STATE_READY 0
526#define VF_PF_CHANNEL_STATE_WAITING_FOR_ACK 1
527
528#define VF_PF_CHANNEL_STATE_MAX_NUMBER 2
529
530
531#endif /* BNX2X_FW_DEFS_H */
diff --git a/drivers/net/bnx2x/bnx2x_fw_file_hdr.h b/drivers/net/bnx2x/bnx2x_fw_file_hdr.h
index 3f5ee5d7cc2a..f807262911e5 100644
--- a/drivers/net/bnx2x/bnx2x_fw_file_hdr.h
+++ b/drivers/net/bnx2x/bnx2x_fw_file_hdr.h
@@ -31,6 +31,7 @@ struct bnx2x_fw_file_hdr {
31 struct bnx2x_fw_file_section csem_pram_data; 31 struct bnx2x_fw_file_section csem_pram_data;
32 struct bnx2x_fw_file_section xsem_int_table_data; 32 struct bnx2x_fw_file_section xsem_int_table_data;
33 struct bnx2x_fw_file_section xsem_pram_data; 33 struct bnx2x_fw_file_section xsem_pram_data;
34 struct bnx2x_fw_file_section iro_arr;
34 struct bnx2x_fw_file_section fw_version; 35 struct bnx2x_fw_file_section fw_version;
35}; 36};
36 37
diff --git a/drivers/net/bnx2x/bnx2x_hsi.h b/drivers/net/bnx2x/bnx2x_hsi.h
index 60d141cd9950..18c8e23a0e82 100644
--- a/drivers/net/bnx2x/bnx2x_hsi.h
+++ b/drivers/net/bnx2x/bnx2x_hsi.h
@@ -6,6 +6,10 @@
6 * it under the terms of the GNU General Public License as published by 6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation. 7 * the Free Software Foundation.
8 */ 8 */
9#ifndef BNX2X_HSI_H
10#define BNX2X_HSI_H
11
12#include "bnx2x_fw_defs.h"
9 13
10struct license_key { 14struct license_key {
11 u32 reserved[6]; 15 u32 reserved[6];
@@ -326,6 +330,7 @@ struct port_hw_cfg { /* port 0: 0x12c port 1: 0x2bc */
326 u32 lane_config; 330 u32 lane_config;
327#define PORT_HW_CFG_LANE_SWAP_CFG_MASK 0x0000ffff 331#define PORT_HW_CFG_LANE_SWAP_CFG_MASK 0x0000ffff
328#define PORT_HW_CFG_LANE_SWAP_CFG_SHIFT 0 332#define PORT_HW_CFG_LANE_SWAP_CFG_SHIFT 0
333
329#define PORT_HW_CFG_LANE_SWAP_CFG_TX_MASK 0x000000ff 334#define PORT_HW_CFG_LANE_SWAP_CFG_TX_MASK 0x000000ff
330#define PORT_HW_CFG_LANE_SWAP_CFG_TX_SHIFT 0 335#define PORT_HW_CFG_LANE_SWAP_CFG_TX_SHIFT 0
331#define PORT_HW_CFG_LANE_SWAP_CFG_RX_MASK 0x0000ff00 336#define PORT_HW_CFG_LANE_SWAP_CFG_RX_MASK 0x0000ff00
@@ -658,6 +663,7 @@ struct shm_dev_info { /* size */
658#define FUNC_7 7 663#define FUNC_7 7
659#define E1_FUNC_MAX 2 664#define E1_FUNC_MAX 2
660#define E1H_FUNC_MAX 8 665#define E1H_FUNC_MAX 8
666#define E2_FUNC_MAX 4 /* per path */
661 667
662#define VN_0 0 668#define VN_0 0
663#define VN_1 1 669#define VN_1 1
@@ -816,6 +822,9 @@ struct drv_func_mb {
816#define FW_MSG_CODE_DRV_LOAD_COMMON 0x10100000 822#define FW_MSG_CODE_DRV_LOAD_COMMON 0x10100000
817#define FW_MSG_CODE_DRV_LOAD_PORT 0x10110000 823#define FW_MSG_CODE_DRV_LOAD_PORT 0x10110000
818#define FW_MSG_CODE_DRV_LOAD_FUNCTION 0x10120000 824#define FW_MSG_CODE_DRV_LOAD_FUNCTION 0x10120000
825 /* Load common chip is supported from bc 6.0.0 */
826#define REQ_BC_VER_4_DRV_LOAD_COMMON_CHIP 0x00060000
827#define FW_MSG_CODE_DRV_LOAD_COMMON_CHIP 0x10130000
819#define FW_MSG_CODE_DRV_LOAD_REFUSED 0x10200000 828#define FW_MSG_CODE_DRV_LOAD_REFUSED 0x10200000
820#define FW_MSG_CODE_DRV_LOAD_DONE 0x11100000 829#define FW_MSG_CODE_DRV_LOAD_DONE 0x11100000
821#define FW_MSG_CODE_DRV_UNLOAD_COMMON 0x20100000 830#define FW_MSG_CODE_DRV_UNLOAD_COMMON 0x20100000
@@ -1016,11 +1025,22 @@ struct shmem_region { /* SharedMem Offset (size) */
1016 struct mgmtfw_state mgmtfw_state; /* 0x4ac (0x1b8) */ 1025 struct mgmtfw_state mgmtfw_state; /* 0x4ac (0x1b8) */
1017 1026
1018 struct drv_port_mb port_mb[PORT_MAX]; /* 0x664 (16*2=0x20) */ 1027 struct drv_port_mb port_mb[PORT_MAX]; /* 0x664 (16*2=0x20) */
1019 struct drv_func_mb func_mb[E1H_FUNC_MAX]; 1028 struct drv_func_mb func_mb[]; /* 0x684
1029 (44*2/4/8=0x58/0xb0/0x160) */
1020 1030
1021 struct mf_cfg mf_cfg; 1031}; /* 57710 = 0x6dc | 57711 = 0x7E4 | 57712 = 0x734 */
1022 1032
1023}; /* 0x6dc */ 1033struct fw_flr_ack {
1034 u32 pf_ack;
1035 u32 vf_ack[1];
1036 u32 iov_dis_ack;
1037};
1038
1039struct fw_flr_mb {
1040 u32 aggint;
1041 u32 opgen_addr;
1042 struct fw_flr_ack ack;
1043};
1024 1044
1025 1045
1026struct shmem2_region { 1046struct shmem2_region {
@@ -1040,7 +1060,20 @@ struct shmem2_region {
1040 * For backwards compatibility, if the mf_cfg_addr does not exist 1060 * For backwards compatibility, if the mf_cfg_addr does not exist
1041 * (the size filed is smaller than 0xc) the mf_cfg resides at the 1061 * (the size filed is smaller than 0xc) the mf_cfg resides at the
1042 * end of struct shmem_region 1062 * end of struct shmem_region
1063 */
1064 u32 mf_cfg_addr;
1065#define SHMEM_MF_CFG_ADDR_NONE 0x00000000
1066
1067 struct fw_flr_mb flr_mb;
1068 u32 reserved[3];
1069 /*
1070 * The other shmemX_base_addr holds the other path's shmem address
1071 * required for example in case of common phy init, or for path1 to know
1072 * the address of mcp debug trace which is located in offset from shmem
1073 * of path0
1043 */ 1074 */
1075 u32 other_shmem_base_addr;
1076 u32 other_shmem2_base_addr;
1044}; 1077};
1045 1078
1046 1079
@@ -1096,7 +1129,7 @@ struct emac_stats {
1096}; 1129};
1097 1130
1098 1131
1099struct bmac_stats { 1132struct bmac1_stats {
1100 u32 tx_stat_gtpkt_lo; 1133 u32 tx_stat_gtpkt_lo;
1101 u32 tx_stat_gtpkt_hi; 1134 u32 tx_stat_gtpkt_hi;
1102 u32 tx_stat_gtxpf_lo; 1135 u32 tx_stat_gtxpf_lo;
@@ -1200,10 +1233,126 @@ struct bmac_stats {
1200 u32 rx_stat_gripj_hi; 1233 u32 rx_stat_gripj_hi;
1201}; 1234};
1202 1235
1236struct bmac2_stats {
1237 u32 tx_stat_gtpk_lo; /* gtpok */
1238 u32 tx_stat_gtpk_hi; /* gtpok */
1239 u32 tx_stat_gtxpf_lo; /* gtpf */
1240 u32 tx_stat_gtxpf_hi; /* gtpf */
1241 u32 tx_stat_gtpp_lo; /* NEW BMAC2 */
1242 u32 tx_stat_gtpp_hi; /* NEW BMAC2 */
1243 u32 tx_stat_gtfcs_lo;
1244 u32 tx_stat_gtfcs_hi;
1245 u32 tx_stat_gtuca_lo; /* NEW BMAC2 */
1246 u32 tx_stat_gtuca_hi; /* NEW BMAC2 */
1247 u32 tx_stat_gtmca_lo;
1248 u32 tx_stat_gtmca_hi;
1249 u32 tx_stat_gtbca_lo;
1250 u32 tx_stat_gtbca_hi;
1251 u32 tx_stat_gtovr_lo;
1252 u32 tx_stat_gtovr_hi;
1253 u32 tx_stat_gtfrg_lo;
1254 u32 tx_stat_gtfrg_hi;
1255 u32 tx_stat_gtpkt1_lo; /* gtpkt */
1256 u32 tx_stat_gtpkt1_hi; /* gtpkt */
1257 u32 tx_stat_gt64_lo;
1258 u32 tx_stat_gt64_hi;
1259 u32 tx_stat_gt127_lo;
1260 u32 tx_stat_gt127_hi;
1261 u32 tx_stat_gt255_lo;
1262 u32 tx_stat_gt255_hi;
1263 u32 tx_stat_gt511_lo;
1264 u32 tx_stat_gt511_hi;
1265 u32 tx_stat_gt1023_lo;
1266 u32 tx_stat_gt1023_hi;
1267 u32 tx_stat_gt1518_lo;
1268 u32 tx_stat_gt1518_hi;
1269 u32 tx_stat_gt2047_lo;
1270 u32 tx_stat_gt2047_hi;
1271 u32 tx_stat_gt4095_lo;
1272 u32 tx_stat_gt4095_hi;
1273 u32 tx_stat_gt9216_lo;
1274 u32 tx_stat_gt9216_hi;
1275 u32 tx_stat_gt16383_lo;
1276 u32 tx_stat_gt16383_hi;
1277 u32 tx_stat_gtmax_lo;
1278 u32 tx_stat_gtmax_hi;
1279 u32 tx_stat_gtufl_lo;
1280 u32 tx_stat_gtufl_hi;
1281 u32 tx_stat_gterr_lo;
1282 u32 tx_stat_gterr_hi;
1283 u32 tx_stat_gtbyt_lo;
1284 u32 tx_stat_gtbyt_hi;
1285
1286 u32 rx_stat_gr64_lo;
1287 u32 rx_stat_gr64_hi;
1288 u32 rx_stat_gr127_lo;
1289 u32 rx_stat_gr127_hi;
1290 u32 rx_stat_gr255_lo;
1291 u32 rx_stat_gr255_hi;
1292 u32 rx_stat_gr511_lo;
1293 u32 rx_stat_gr511_hi;
1294 u32 rx_stat_gr1023_lo;
1295 u32 rx_stat_gr1023_hi;
1296 u32 rx_stat_gr1518_lo;
1297 u32 rx_stat_gr1518_hi;
1298 u32 rx_stat_gr2047_lo;
1299 u32 rx_stat_gr2047_hi;
1300 u32 rx_stat_gr4095_lo;
1301 u32 rx_stat_gr4095_hi;
1302 u32 rx_stat_gr9216_lo;
1303 u32 rx_stat_gr9216_hi;
1304 u32 rx_stat_gr16383_lo;
1305 u32 rx_stat_gr16383_hi;
1306 u32 rx_stat_grmax_lo;
1307 u32 rx_stat_grmax_hi;
1308 u32 rx_stat_grpkt_lo;
1309 u32 rx_stat_grpkt_hi;
1310 u32 rx_stat_grfcs_lo;
1311 u32 rx_stat_grfcs_hi;
1312 u32 rx_stat_gruca_lo;
1313 u32 rx_stat_gruca_hi;
1314 u32 rx_stat_grmca_lo;
1315 u32 rx_stat_grmca_hi;
1316 u32 rx_stat_grbca_lo;
1317 u32 rx_stat_grbca_hi;
1318 u32 rx_stat_grxpf_lo; /* grpf */
1319 u32 rx_stat_grxpf_hi; /* grpf */
1320 u32 rx_stat_grpp_lo;
1321 u32 rx_stat_grpp_hi;
1322 u32 rx_stat_grxuo_lo; /* gruo */
1323 u32 rx_stat_grxuo_hi; /* gruo */
1324 u32 rx_stat_grjbr_lo;
1325 u32 rx_stat_grjbr_hi;
1326 u32 rx_stat_grovr_lo;
1327 u32 rx_stat_grovr_hi;
1328 u32 rx_stat_grxcf_lo; /* grcf */
1329 u32 rx_stat_grxcf_hi; /* grcf */
1330 u32 rx_stat_grflr_lo;
1331 u32 rx_stat_grflr_hi;
1332 u32 rx_stat_grpok_lo;
1333 u32 rx_stat_grpok_hi;
1334 u32 rx_stat_grmeg_lo;
1335 u32 rx_stat_grmeg_hi;
1336 u32 rx_stat_grmeb_lo;
1337 u32 rx_stat_grmeb_hi;
1338 u32 rx_stat_grbyt_lo;
1339 u32 rx_stat_grbyt_hi;
1340 u32 rx_stat_grund_lo;
1341 u32 rx_stat_grund_hi;
1342 u32 rx_stat_grfrg_lo;
1343 u32 rx_stat_grfrg_hi;
1344 u32 rx_stat_grerb_lo; /* grerrbyt */
1345 u32 rx_stat_grerb_hi; /* grerrbyt */
1346 u32 rx_stat_grfre_lo; /* grfrerr */
1347 u32 rx_stat_grfre_hi; /* grfrerr */
1348 u32 rx_stat_gripj_lo;
1349 u32 rx_stat_gripj_hi;
1350};
1203 1351
1204union mac_stats { 1352union mac_stats {
1205 struct emac_stats emac_stats; 1353 struct emac_stats emac_stats;
1206 struct bmac_stats bmac_stats; 1354 struct bmac1_stats bmac1_stats;
1355 struct bmac2_stats bmac2_stats;
1207}; 1356};
1208 1357
1209 1358
@@ -1377,17 +1526,17 @@ struct host_func_stats {
1377}; 1526};
1378 1527
1379 1528
1380#define BCM_5710_FW_MAJOR_VERSION 5 1529#define BCM_5710_FW_MAJOR_VERSION 6
1381#define BCM_5710_FW_MINOR_VERSION 2 1530#define BCM_5710_FW_MINOR_VERSION 0
1382#define BCM_5710_FW_REVISION_VERSION 13 1531#define BCM_5710_FW_REVISION_VERSION 34
1383#define BCM_5710_FW_ENGINEERING_VERSION 0 1532#define BCM_5710_FW_ENGINEERING_VERSION 0
1384#define BCM_5710_FW_COMPILE_FLAGS 1 1533#define BCM_5710_FW_COMPILE_FLAGS 1
1385 1534
1386 1535
1387/* 1536/*
1388 * attention bits 1537 * attention bits
1389 */ 1538 */
1390struct atten_def_status_block { 1539struct atten_sp_status_block {
1391 __le32 attn_bits; 1540 __le32 attn_bits;
1392 __le32 attn_bits_ack; 1541 __le32 attn_bits_ack;
1393 u8 status_block_id; 1542 u8 status_block_id;
@@ -1445,7 +1594,60 @@ struct doorbell_set_prod {
1445 1594
1446 1595
1447/* 1596/*
1448 * IGU driver acknowledgement register 1597 * 3 lines. status block
1598 */
1599struct hc_status_block_e1x {
1600 __le16 index_values[HC_SB_MAX_INDICES_E1X];
1601 __le16 running_index[HC_SB_MAX_SM];
1602 u32 rsrv;
1603};
1604
1605/*
1606 * host status block
1607 */
1608struct host_hc_status_block_e1x {
1609 struct hc_status_block_e1x sb;
1610};
1611
1612
1613/*
1614 * 3 lines. status block
1615 */
1616struct hc_status_block_e2 {
1617 __le16 index_values[HC_SB_MAX_INDICES_E2];
1618 __le16 running_index[HC_SB_MAX_SM];
1619 u32 reserved;
1620};
1621
1622/*
1623 * host status block
1624 */
1625struct host_hc_status_block_e2 {
1626 struct hc_status_block_e2 sb;
1627};
1628
1629
1630/*
1631 * 5 lines. slow-path status block
1632 */
1633struct hc_sp_status_block {
1634 __le16 index_values[HC_SP_SB_MAX_INDICES];
1635 __le16 running_index;
1636 __le16 rsrv;
1637 u32 rsrv1;
1638};
1639
1640/*
1641 * host status block
1642 */
1643struct host_sp_status_block {
1644 struct atten_sp_status_block atten_status_block;
1645 struct hc_sp_status_block sp_sb;
1646};
1647
1648
1649/*
1650 * IGU driver acknowledgment register
1449 */ 1651 */
1450struct igu_ack_register { 1652struct igu_ack_register {
1451#if defined(__BIG_ENDIAN) 1653#if defined(__BIG_ENDIAN)
@@ -1535,6 +1737,24 @@ union igu_consprod_reg {
1535 1737
1536 1738
1537/* 1739/*
1740 * Control register for the IGU command register
1741 */
1742struct igu_ctrl_reg {
1743 u32 ctrl_data;
1744#define IGU_CTRL_REG_ADDRESS (0xFFF<<0)
1745#define IGU_CTRL_REG_ADDRESS_SHIFT 0
1746#define IGU_CTRL_REG_FID (0x7F<<12)
1747#define IGU_CTRL_REG_FID_SHIFT 12
1748#define IGU_CTRL_REG_RESERVED (0x1<<19)
1749#define IGU_CTRL_REG_RESERVED_SHIFT 19
1750#define IGU_CTRL_REG_TYPE (0x1<<20)
1751#define IGU_CTRL_REG_TYPE_SHIFT 20
1752#define IGU_CTRL_REG_UNUSED (0x7FF<<21)
1753#define IGU_CTRL_REG_UNUSED_SHIFT 21
1754};
1755
1756
1757/*
1538 * Parser parsing flags field 1758 * Parser parsing flags field
1539 */ 1759 */
1540struct parsing_flags { 1760struct parsing_flags {
@@ -1603,8 +1823,14 @@ struct dmae_command {
1603#define DMAE_COMMAND_DST_RESET_SHIFT 14 1823#define DMAE_COMMAND_DST_RESET_SHIFT 14
1604#define DMAE_COMMAND_E1HVN (0x3<<15) 1824#define DMAE_COMMAND_E1HVN (0x3<<15)
1605#define DMAE_COMMAND_E1HVN_SHIFT 15 1825#define DMAE_COMMAND_E1HVN_SHIFT 15
1606#define DMAE_COMMAND_RESERVED0 (0x7FFF<<17) 1826#define DMAE_COMMAND_DST_VN (0x3<<17)
1607#define DMAE_COMMAND_RESERVED0_SHIFT 17 1827#define DMAE_COMMAND_DST_VN_SHIFT 17
1828#define DMAE_COMMAND_C_FUNC (0x1<<19)
1829#define DMAE_COMMAND_C_FUNC_SHIFT 19
1830#define DMAE_COMMAND_ERR_POLICY (0x3<<20)
1831#define DMAE_COMMAND_ERR_POLICY_SHIFT 20
1832#define DMAE_COMMAND_RESERVED0 (0x3FF<<22)
1833#define DMAE_COMMAND_RESERVED0_SHIFT 22
1608 u32 src_addr_lo; 1834 u32 src_addr_lo;
1609 u32 src_addr_hi; 1835 u32 src_addr_hi;
1610 u32 dst_addr_lo; 1836 u32 dst_addr_lo;
@@ -1629,11 +1855,11 @@ struct dmae_command {
1629 u16 crc16_c; 1855 u16 crc16_c;
1630#endif 1856#endif
1631#if defined(__BIG_ENDIAN) 1857#if defined(__BIG_ENDIAN)
1632 u16 reserved2; 1858 u16 reserved3;
1633 u16 crc_t10; 1859 u16 crc_t10;
1634#elif defined(__LITTLE_ENDIAN) 1860#elif defined(__LITTLE_ENDIAN)
1635 u16 crc_t10; 1861 u16 crc_t10;
1636 u16 reserved2; 1862 u16 reserved3;
1637#endif 1863#endif
1638#if defined(__BIG_ENDIAN) 1864#if defined(__BIG_ENDIAN)
1639 u16 xsum8; 1865 u16 xsum8;
@@ -1654,96 +1880,20 @@ struct double_regpair {
1654 1880
1655 1881
1656/* 1882/*
1657 * The eth storm context of Ustorm (configuration part) 1883 * SDM operation gen command (generate aggregative interrupt)
1658 */ 1884 */
1659struct ustorm_eth_st_context_config { 1885struct sdm_op_gen {
1660#if defined(__BIG_ENDIAN) 1886 __le32 command;
1661 u8 flags; 1887#define SDM_OP_GEN_COMP_PARAM (0x1F<<0)
1662#define USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT (0x1<<0) 1888#define SDM_OP_GEN_COMP_PARAM_SHIFT 0
1663#define USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT_SHIFT 0 1889#define SDM_OP_GEN_COMP_TYPE (0x7<<5)
1664#define USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_DYNAMIC_HC (0x1<<1) 1890#define SDM_OP_GEN_COMP_TYPE_SHIFT 5
1665#define USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_DYNAMIC_HC_SHIFT 1 1891#define SDM_OP_GEN_AGG_VECT_IDX (0xFF<<8)
1666#define USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA (0x1<<2) 1892#define SDM_OP_GEN_AGG_VECT_IDX_SHIFT 8
1667#define USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA_SHIFT 2 1893#define SDM_OP_GEN_AGG_VECT_IDX_VALID (0x1<<16)
1668#define USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS (0x1<<3) 1894#define SDM_OP_GEN_AGG_VECT_IDX_VALID_SHIFT 16
1669#define USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS_SHIFT 3 1895#define SDM_OP_GEN_RESERVED (0x7FFF<<17)
1670#define __USTORM_ETH_ST_CONTEXT_CONFIG_RESERVED0 (0xF<<4) 1896#define SDM_OP_GEN_RESERVED_SHIFT 17
1671#define __USTORM_ETH_ST_CONTEXT_CONFIG_RESERVED0_SHIFT 4
1672 u8 status_block_id;
1673 u8 clientId;
1674 u8 sb_index_numbers;
1675#define USTORM_ETH_ST_CONTEXT_CONFIG_CQE_SB_INDEX_NUMBER (0xF<<0)
1676#define USTORM_ETH_ST_CONTEXT_CONFIG_CQE_SB_INDEX_NUMBER_SHIFT 0
1677#define USTORM_ETH_ST_CONTEXT_CONFIG_BD_SB_INDEX_NUMBER (0xF<<4)
1678#define USTORM_ETH_ST_CONTEXT_CONFIG_BD_SB_INDEX_NUMBER_SHIFT 4
1679#elif defined(__LITTLE_ENDIAN)
1680 u8 sb_index_numbers;
1681#define USTORM_ETH_ST_CONTEXT_CONFIG_CQE_SB_INDEX_NUMBER (0xF<<0)
1682#define USTORM_ETH_ST_CONTEXT_CONFIG_CQE_SB_INDEX_NUMBER_SHIFT 0
1683#define USTORM_ETH_ST_CONTEXT_CONFIG_BD_SB_INDEX_NUMBER (0xF<<4)
1684#define USTORM_ETH_ST_CONTEXT_CONFIG_BD_SB_INDEX_NUMBER_SHIFT 4
1685 u8 clientId;
1686 u8 status_block_id;
1687 u8 flags;
1688#define USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT (0x1<<0)
1689#define USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT_SHIFT 0
1690#define USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_DYNAMIC_HC (0x1<<1)
1691#define USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_DYNAMIC_HC_SHIFT 1
1692#define USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA (0x1<<2)
1693#define USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA_SHIFT 2
1694#define USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS (0x1<<3)
1695#define USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS_SHIFT 3
1696#define __USTORM_ETH_ST_CONTEXT_CONFIG_RESERVED0 (0xF<<4)
1697#define __USTORM_ETH_ST_CONTEXT_CONFIG_RESERVED0_SHIFT 4
1698#endif
1699#if defined(__BIG_ENDIAN)
1700 u16 bd_buff_size;
1701 u8 statistics_counter_id;
1702 u8 mc_alignment_log_size;
1703#elif defined(__LITTLE_ENDIAN)
1704 u8 mc_alignment_log_size;
1705 u8 statistics_counter_id;
1706 u16 bd_buff_size;
1707#endif
1708#if defined(__BIG_ENDIAN)
1709 u8 __local_sge_prod;
1710 u8 __local_bd_prod;
1711 u16 sge_buff_size;
1712#elif defined(__LITTLE_ENDIAN)
1713 u16 sge_buff_size;
1714 u8 __local_bd_prod;
1715 u8 __local_sge_prod;
1716#endif
1717#if defined(__BIG_ENDIAN)
1718 u16 __sdm_bd_expected_counter;
1719 u8 cstorm_agg_int;
1720 u8 __expected_bds_on_ram;
1721#elif defined(__LITTLE_ENDIAN)
1722 u8 __expected_bds_on_ram;
1723 u8 cstorm_agg_int;
1724 u16 __sdm_bd_expected_counter;
1725#endif
1726#if defined(__BIG_ENDIAN)
1727 u16 __ring_data_ram_addr;
1728 u16 __hc_cstorm_ram_addr;
1729#elif defined(__LITTLE_ENDIAN)
1730 u16 __hc_cstorm_ram_addr;
1731 u16 __ring_data_ram_addr;
1732#endif
1733#if defined(__BIG_ENDIAN)
1734 u8 reserved1;
1735 u8 max_sges_for_packet;
1736 u16 __bd_ring_ram_addr;
1737#elif defined(__LITTLE_ENDIAN)
1738 u16 __bd_ring_ram_addr;
1739 u8 max_sges_for_packet;
1740 u8 reserved1;
1741#endif
1742 u32 bd_page_base_lo;
1743 u32 bd_page_base_hi;
1744 u32 sge_page_base_lo;
1745 u32 sge_page_base_hi;
1746 struct regpair reserved2;
1747}; 1897};
1748 1898
1749/* 1899/*
@@ -1762,20 +1912,13 @@ struct eth_rx_sge {
1762 __le32 addr_hi; 1912 __le32 addr_hi;
1763}; 1913};
1764 1914
1765/* 1915
1766 * Local BDs and SGEs rings (in ETH)
1767 */
1768struct eth_local_rx_rings {
1769 struct eth_rx_bd __local_bd_ring[8];
1770 struct eth_rx_sge __local_sge_ring[10];
1771};
1772 1916
1773/* 1917/*
1774 * The eth storm context of Ustorm 1918 * The eth storm context of Ustorm
1775 */ 1919 */
1776struct ustorm_eth_st_context { 1920struct ustorm_eth_st_context {
1777 struct ustorm_eth_st_context_config common; 1921 u32 reserved0[48];
1778 struct eth_local_rx_rings __rings;
1779}; 1922};
1780 1923
1781/* 1924/*
@@ -1786,337 +1929,53 @@ struct tstorm_eth_st_context {
1786}; 1929};
1787 1930
1788/* 1931/*
1789 * The eth aggregative context section of Xstorm
1790 */
1791struct xstorm_eth_extra_ag_context_section {
1792#if defined(__BIG_ENDIAN)
1793 u8 __tcp_agg_vars1;
1794 u8 __reserved50;
1795 u16 __mss;
1796#elif defined(__LITTLE_ENDIAN)
1797 u16 __mss;
1798 u8 __reserved50;
1799 u8 __tcp_agg_vars1;
1800#endif
1801 u32 __snd_nxt;
1802 u32 __tx_wnd;
1803 u32 __snd_una;
1804 u32 __reserved53;
1805#if defined(__BIG_ENDIAN)
1806 u8 __agg_val8_th;
1807 u8 __agg_val8;
1808 u16 __tcp_agg_vars2;
1809#elif defined(__LITTLE_ENDIAN)
1810 u16 __tcp_agg_vars2;
1811 u8 __agg_val8;
1812 u8 __agg_val8_th;
1813#endif
1814 u32 __reserved58;
1815 u32 __reserved59;
1816 u32 __reserved60;
1817 u32 __reserved61;
1818#if defined(__BIG_ENDIAN)
1819 u16 __agg_val7_th;
1820 u16 __agg_val7;
1821#elif defined(__LITTLE_ENDIAN)
1822 u16 __agg_val7;
1823 u16 __agg_val7_th;
1824#endif
1825#if defined(__BIG_ENDIAN)
1826 u8 __tcp_agg_vars5;
1827 u8 __tcp_agg_vars4;
1828 u8 __tcp_agg_vars3;
1829 u8 __reserved62;
1830#elif defined(__LITTLE_ENDIAN)
1831 u8 __reserved62;
1832 u8 __tcp_agg_vars3;
1833 u8 __tcp_agg_vars4;
1834 u8 __tcp_agg_vars5;
1835#endif
1836 u32 __tcp_agg_vars6;
1837#if defined(__BIG_ENDIAN)
1838 u16 __agg_misc6;
1839 u16 __tcp_agg_vars7;
1840#elif defined(__LITTLE_ENDIAN)
1841 u16 __tcp_agg_vars7;
1842 u16 __agg_misc6;
1843#endif
1844 u32 __agg_val10;
1845 u32 __agg_val10_th;
1846#if defined(__BIG_ENDIAN)
1847 u16 __reserved3;
1848 u8 __reserved2;
1849 u8 __da_only_cnt;
1850#elif defined(__LITTLE_ENDIAN)
1851 u8 __da_only_cnt;
1852 u8 __reserved2;
1853 u16 __reserved3;
1854#endif
1855};
1856
1857/*
1858 * The eth aggregative context of Xstorm 1932 * The eth aggregative context of Xstorm
1859 */ 1933 */
1860struct xstorm_eth_ag_context { 1934struct xstorm_eth_ag_context {
1861#if defined(__BIG_ENDIAN) 1935 u32 reserved0;
1862 u16 agg_val1;
1863 u8 __agg_vars1;
1864 u8 __state;
1865#elif defined(__LITTLE_ENDIAN)
1866 u8 __state;
1867 u8 __agg_vars1;
1868 u16 agg_val1;
1869#endif
1870#if defined(__BIG_ENDIAN) 1936#if defined(__BIG_ENDIAN)
1871 u8 cdu_reserved; 1937 u8 cdu_reserved;
1872 u8 __agg_vars4; 1938 u8 reserved2;
1873 u8 __agg_vars3; 1939 u16 reserved1;
1874 u8 __agg_vars2;
1875#elif defined(__LITTLE_ENDIAN) 1940#elif defined(__LITTLE_ENDIAN)
1876 u8 __agg_vars2; 1941 u16 reserved1;
1877 u8 __agg_vars3; 1942 u8 reserved2;
1878 u8 __agg_vars4;
1879 u8 cdu_reserved; 1943 u8 cdu_reserved;
1880#endif 1944#endif
1881 u32 __bd_prod; 1945 u32 reserved3[30];
1882#if defined(__BIG_ENDIAN)
1883 u16 __agg_vars5;
1884 u16 __agg_val4_th;
1885#elif defined(__LITTLE_ENDIAN)
1886 u16 __agg_val4_th;
1887 u16 __agg_vars5;
1888#endif
1889 struct xstorm_eth_extra_ag_context_section __extra_section;
1890#if defined(__BIG_ENDIAN)
1891 u16 __agg_vars7;
1892 u8 __agg_val3_th;
1893 u8 __agg_vars6;
1894#elif defined(__LITTLE_ENDIAN)
1895 u8 __agg_vars6;
1896 u8 __agg_val3_th;
1897 u16 __agg_vars7;
1898#endif
1899#if defined(__BIG_ENDIAN)
1900 u16 __agg_val11_th;
1901 u16 __agg_val11;
1902#elif defined(__LITTLE_ENDIAN)
1903 u16 __agg_val11;
1904 u16 __agg_val11_th;
1905#endif
1906#if defined(__BIG_ENDIAN)
1907 u8 __reserved1;
1908 u8 __agg_val6_th;
1909 u16 __agg_val9;
1910#elif defined(__LITTLE_ENDIAN)
1911 u16 __agg_val9;
1912 u8 __agg_val6_th;
1913 u8 __reserved1;
1914#endif
1915#if defined(__BIG_ENDIAN)
1916 u16 __agg_val2_th;
1917 u16 __agg_val2;
1918#elif defined(__LITTLE_ENDIAN)
1919 u16 __agg_val2;
1920 u16 __agg_val2_th;
1921#endif
1922 u32 __agg_vars8;
1923#if defined(__BIG_ENDIAN)
1924 u16 __agg_misc0;
1925 u16 __agg_val4;
1926#elif defined(__LITTLE_ENDIAN)
1927 u16 __agg_val4;
1928 u16 __agg_misc0;
1929#endif
1930#if defined(__BIG_ENDIAN)
1931 u8 __agg_val3;
1932 u8 __agg_val6;
1933 u8 __agg_val5_th;
1934 u8 __agg_val5;
1935#elif defined(__LITTLE_ENDIAN)
1936 u8 __agg_val5;
1937 u8 __agg_val5_th;
1938 u8 __agg_val6;
1939 u8 __agg_val3;
1940#endif
1941#if defined(__BIG_ENDIAN)
1942 u16 __agg_misc1;
1943 u16 __bd_ind_max_val;
1944#elif defined(__LITTLE_ENDIAN)
1945 u16 __bd_ind_max_val;
1946 u16 __agg_misc1;
1947#endif
1948 u32 __reserved57;
1949 u32 __agg_misc4;
1950 u32 __agg_misc5;
1951};
1952
1953/*
1954 * The eth extra aggregative context section of Tstorm
1955 */
1956struct tstorm_eth_extra_ag_context_section {
1957 u32 __agg_val1;
1958#if defined(__BIG_ENDIAN)
1959 u8 __tcp_agg_vars2;
1960 u8 __agg_val3;
1961 u16 __agg_val2;
1962#elif defined(__LITTLE_ENDIAN)
1963 u16 __agg_val2;
1964 u8 __agg_val3;
1965 u8 __tcp_agg_vars2;
1966#endif
1967#if defined(__BIG_ENDIAN)
1968 u16 __agg_val5;
1969 u8 __agg_val6;
1970 u8 __tcp_agg_vars3;
1971#elif defined(__LITTLE_ENDIAN)
1972 u8 __tcp_agg_vars3;
1973 u8 __agg_val6;
1974 u16 __agg_val5;
1975#endif
1976 u32 __reserved63;
1977 u32 __reserved64;
1978 u32 __reserved65;
1979 u32 __reserved66;
1980 u32 __reserved67;
1981 u32 __tcp_agg_vars1;
1982 u32 __reserved61;
1983 u32 __reserved62;
1984 u32 __reserved2;
1985}; 1946};
1986 1947
1987/* 1948/*
1988 * The eth aggregative context of Tstorm 1949 * The eth aggregative context of Tstorm
1989 */ 1950 */
1990struct tstorm_eth_ag_context { 1951struct tstorm_eth_ag_context {
1991#if defined(__BIG_ENDIAN) 1952 u32 __reserved0[14];
1992 u16 __reserved54;
1993 u8 __agg_vars1;
1994 u8 __state;
1995#elif defined(__LITTLE_ENDIAN)
1996 u8 __state;
1997 u8 __agg_vars1;
1998 u16 __reserved54;
1999#endif
2000#if defined(__BIG_ENDIAN)
2001 u16 __agg_val4;
2002 u16 __agg_vars2;
2003#elif defined(__LITTLE_ENDIAN)
2004 u16 __agg_vars2;
2005 u16 __agg_val4;
2006#endif
2007 struct tstorm_eth_extra_ag_context_section __extra_section;
2008}; 1953};
2009 1954
1955
2010/* 1956/*
2011 * The eth aggregative context of Cstorm 1957 * The eth aggregative context of Cstorm
2012 */ 1958 */
2013struct cstorm_eth_ag_context { 1959struct cstorm_eth_ag_context {
2014 u32 __agg_vars1; 1960 u32 __reserved0[10];
2015#if defined(__BIG_ENDIAN)
2016 u8 __aux1_th;
2017 u8 __aux1_val;
2018 u16 __agg_vars2;
2019#elif defined(__LITTLE_ENDIAN)
2020 u16 __agg_vars2;
2021 u8 __aux1_val;
2022 u8 __aux1_th;
2023#endif
2024 u32 __num_of_treated_packet;
2025 u32 __last_packet_treated;
2026#if defined(__BIG_ENDIAN)
2027 u16 __reserved58;
2028 u16 __reserved57;
2029#elif defined(__LITTLE_ENDIAN)
2030 u16 __reserved57;
2031 u16 __reserved58;
2032#endif
2033#if defined(__BIG_ENDIAN)
2034 u8 __reserved62;
2035 u8 __reserved61;
2036 u8 __reserved60;
2037 u8 __reserved59;
2038#elif defined(__LITTLE_ENDIAN)
2039 u8 __reserved59;
2040 u8 __reserved60;
2041 u8 __reserved61;
2042 u8 __reserved62;
2043#endif
2044#if defined(__BIG_ENDIAN)
2045 u16 __reserved64;
2046 u16 __reserved63;
2047#elif defined(__LITTLE_ENDIAN)
2048 u16 __reserved63;
2049 u16 __reserved64;
2050#endif
2051 u32 __reserved65;
2052#if defined(__BIG_ENDIAN)
2053 u16 __agg_vars3;
2054 u16 __rq_inv_cnt;
2055#elif defined(__LITTLE_ENDIAN)
2056 u16 __rq_inv_cnt;
2057 u16 __agg_vars3;
2058#endif
2059#if defined(__BIG_ENDIAN)
2060 u16 __packet_index_th;
2061 u16 __packet_index;
2062#elif defined(__LITTLE_ENDIAN)
2063 u16 __packet_index;
2064 u16 __packet_index_th;
2065#endif
2066}; 1961};
2067 1962
1963
2068/* 1964/*
2069 * The eth aggregative context of Ustorm 1965 * The eth aggregative context of Ustorm
2070 */ 1966 */
2071struct ustorm_eth_ag_context { 1967struct ustorm_eth_ag_context {
2072#if defined(__BIG_ENDIAN) 1968 u32 __reserved0;
2073 u8 __aux_counter_flags;
2074 u8 __agg_vars2;
2075 u8 __agg_vars1;
2076 u8 __state;
2077#elif defined(__LITTLE_ENDIAN)
2078 u8 __state;
2079 u8 __agg_vars1;
2080 u8 __agg_vars2;
2081 u8 __aux_counter_flags;
2082#endif
2083#if defined(__BIG_ENDIAN) 1969#if defined(__BIG_ENDIAN)
2084 u8 cdu_usage; 1970 u8 cdu_usage;
2085 u8 __agg_misc2; 1971 u8 __reserved2;
2086 u16 __agg_misc1; 1972 u16 __reserved1;
2087#elif defined(__LITTLE_ENDIAN) 1973#elif defined(__LITTLE_ENDIAN)
2088 u16 __agg_misc1; 1974 u16 __reserved1;
2089 u8 __agg_misc2; 1975 u8 __reserved2;
2090 u8 cdu_usage; 1976 u8 cdu_usage;
2091#endif 1977#endif
2092 u32 __agg_misc4; 1978 u32 __reserved3[6];
2093#if defined(__BIG_ENDIAN)
2094 u8 __agg_val3_th;
2095 u8 __agg_val3;
2096 u16 __agg_misc3;
2097#elif defined(__LITTLE_ENDIAN)
2098 u16 __agg_misc3;
2099 u8 __agg_val3;
2100 u8 __agg_val3_th;
2101#endif
2102 u32 __agg_val1;
2103 u32 __agg_misc4_th;
2104#if defined(__BIG_ENDIAN)
2105 u16 __agg_val2_th;
2106 u16 __agg_val2;
2107#elif defined(__LITTLE_ENDIAN)
2108 u16 __agg_val2;
2109 u16 __agg_val2_th;
2110#endif
2111#if defined(__BIG_ENDIAN)
2112 u16 __reserved2;
2113 u8 __decision_rules;
2114 u8 __decision_rule_enable_bits;
2115#elif defined(__LITTLE_ENDIAN)
2116 u8 __decision_rule_enable_bits;
2117 u8 __decision_rules;
2118 u16 __reserved2;
2119#endif
2120}; 1979};
2121 1980
2122/* 1981/*
@@ -2140,18 +1999,16 @@ struct timers_block_context {
2140 */ 1999 */
2141struct eth_tx_bd_flags { 2000struct eth_tx_bd_flags {
2142 u8 as_bitfield; 2001 u8 as_bitfield;
2143#define ETH_TX_BD_FLAGS_VLAN_TAG (0x1<<0) 2002#define ETH_TX_BD_FLAGS_IP_CSUM (0x1<<0)
2144#define ETH_TX_BD_FLAGS_VLAN_TAG_SHIFT 0 2003#define ETH_TX_BD_FLAGS_IP_CSUM_SHIFT 0
2145#define ETH_TX_BD_FLAGS_IP_CSUM (0x1<<1) 2004#define ETH_TX_BD_FLAGS_L4_CSUM (0x1<<1)
2146#define ETH_TX_BD_FLAGS_IP_CSUM_SHIFT 1 2005#define ETH_TX_BD_FLAGS_L4_CSUM_SHIFT 1
2147#define ETH_TX_BD_FLAGS_L4_CSUM (0x1<<2) 2006#define ETH_TX_BD_FLAGS_VLAN_MODE (0x3<<2)
2148#define ETH_TX_BD_FLAGS_L4_CSUM_SHIFT 2 2007#define ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT 2
2149#define ETH_TX_BD_FLAGS_END_BD (0x1<<3)
2150#define ETH_TX_BD_FLAGS_END_BD_SHIFT 3
2151#define ETH_TX_BD_FLAGS_START_BD (0x1<<4) 2008#define ETH_TX_BD_FLAGS_START_BD (0x1<<4)
2152#define ETH_TX_BD_FLAGS_START_BD_SHIFT 4 2009#define ETH_TX_BD_FLAGS_START_BD_SHIFT 4
2153#define ETH_TX_BD_FLAGS_HDR_POOL (0x1<<5) 2010#define ETH_TX_BD_FLAGS_IS_UDP (0x1<<5)
2154#define ETH_TX_BD_FLAGS_HDR_POOL_SHIFT 5 2011#define ETH_TX_BD_FLAGS_IS_UDP_SHIFT 5
2155#define ETH_TX_BD_FLAGS_SW_LSO (0x1<<6) 2012#define ETH_TX_BD_FLAGS_SW_LSO (0x1<<6)
2156#define ETH_TX_BD_FLAGS_SW_LSO_SHIFT 6 2013#define ETH_TX_BD_FLAGS_SW_LSO_SHIFT 6
2157#define ETH_TX_BD_FLAGS_IPV6 (0x1<<7) 2014#define ETH_TX_BD_FLAGS_IPV6 (0x1<<7)
@@ -2166,7 +2023,7 @@ struct eth_tx_start_bd {
2166 __le32 addr_hi; 2023 __le32 addr_hi;
2167 __le16 nbd; 2024 __le16 nbd;
2168 __le16 nbytes; 2025 __le16 nbytes;
2169 __le16 vlan; 2026 __le16 vlan_or_ethertype;
2170 struct eth_tx_bd_flags bd_flags; 2027 struct eth_tx_bd_flags bd_flags;
2171 u8 general_data; 2028 u8 general_data;
2172#define ETH_TX_START_BD_HDR_NBDS (0x3F<<0) 2029#define ETH_TX_START_BD_HDR_NBDS (0x3F<<0)
@@ -2179,48 +2036,48 @@ struct eth_tx_start_bd {
2179 * Tx regular BD structure 2036 * Tx regular BD structure
2180 */ 2037 */
2181struct eth_tx_bd { 2038struct eth_tx_bd {
2182 u32 addr_lo; 2039 __le32 addr_lo;
2183 u32 addr_hi; 2040 __le32 addr_hi;
2184 u16 total_pkt_bytes; 2041 __le16 total_pkt_bytes;
2185 u16 nbytes; 2042 __le16 nbytes;
2186 u8 reserved[4]; 2043 u8 reserved[4];
2187}; 2044};
2188 2045
2189/* 2046/*
2190 * Tx parsing BD structure for ETH,Relevant in START 2047 * Tx parsing BD structure for ETH E1/E1h
2191 */ 2048 */
2192struct eth_tx_parse_bd { 2049struct eth_tx_parse_bd_e1x {
2193 u8 global_data; 2050 u8 global_data;
2194#define ETH_TX_PARSE_BD_IP_HDR_START_OFFSET (0xF<<0) 2051#define ETH_TX_PARSE_BD_E1X_IP_HDR_START_OFFSET_W (0xF<<0)
2195#define ETH_TX_PARSE_BD_IP_HDR_START_OFFSET_SHIFT 0 2052#define ETH_TX_PARSE_BD_E1X_IP_HDR_START_OFFSET_W_SHIFT 0
2196#define ETH_TX_PARSE_BD_UDP_CS_FLG (0x1<<4) 2053#define ETH_TX_PARSE_BD_E1X_RESERVED0 (0x1<<4)
2197#define ETH_TX_PARSE_BD_UDP_CS_FLG_SHIFT 4 2054#define ETH_TX_PARSE_BD_E1X_RESERVED0_SHIFT 4
2198#define ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN (0x1<<5) 2055#define ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN (0x1<<5)
2199#define ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN_SHIFT 5 2056#define ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN_SHIFT 5
2200#define ETH_TX_PARSE_BD_LLC_SNAP_EN (0x1<<6) 2057#define ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN (0x1<<6)
2201#define ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT 6 2058#define ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT 6
2202#define ETH_TX_PARSE_BD_NS_FLG (0x1<<7) 2059#define ETH_TX_PARSE_BD_E1X_NS_FLG (0x1<<7)
2203#define ETH_TX_PARSE_BD_NS_FLG_SHIFT 7 2060#define ETH_TX_PARSE_BD_E1X_NS_FLG_SHIFT 7
2204 u8 tcp_flags; 2061 u8 tcp_flags;
2205#define ETH_TX_PARSE_BD_FIN_FLG (0x1<<0) 2062#define ETH_TX_PARSE_BD_E1X_FIN_FLG (0x1<<0)
2206#define ETH_TX_PARSE_BD_FIN_FLG_SHIFT 0 2063#define ETH_TX_PARSE_BD_E1X_FIN_FLG_SHIFT 0
2207#define ETH_TX_PARSE_BD_SYN_FLG (0x1<<1) 2064#define ETH_TX_PARSE_BD_E1X_SYN_FLG (0x1<<1)
2208#define ETH_TX_PARSE_BD_SYN_FLG_SHIFT 1 2065#define ETH_TX_PARSE_BD_E1X_SYN_FLG_SHIFT 1
2209#define ETH_TX_PARSE_BD_RST_FLG (0x1<<2) 2066#define ETH_TX_PARSE_BD_E1X_RST_FLG (0x1<<2)
2210#define ETH_TX_PARSE_BD_RST_FLG_SHIFT 2 2067#define ETH_TX_PARSE_BD_E1X_RST_FLG_SHIFT 2
2211#define ETH_TX_PARSE_BD_PSH_FLG (0x1<<3) 2068#define ETH_TX_PARSE_BD_E1X_PSH_FLG (0x1<<3)
2212#define ETH_TX_PARSE_BD_PSH_FLG_SHIFT 3 2069#define ETH_TX_PARSE_BD_E1X_PSH_FLG_SHIFT 3
2213#define ETH_TX_PARSE_BD_ACK_FLG (0x1<<4) 2070#define ETH_TX_PARSE_BD_E1X_ACK_FLG (0x1<<4)
2214#define ETH_TX_PARSE_BD_ACK_FLG_SHIFT 4 2071#define ETH_TX_PARSE_BD_E1X_ACK_FLG_SHIFT 4
2215#define ETH_TX_PARSE_BD_URG_FLG (0x1<<5) 2072#define ETH_TX_PARSE_BD_E1X_URG_FLG (0x1<<5)
2216#define ETH_TX_PARSE_BD_URG_FLG_SHIFT 5 2073#define ETH_TX_PARSE_BD_E1X_URG_FLG_SHIFT 5
2217#define ETH_TX_PARSE_BD_ECE_FLG (0x1<<6) 2074#define ETH_TX_PARSE_BD_E1X_ECE_FLG (0x1<<6)
2218#define ETH_TX_PARSE_BD_ECE_FLG_SHIFT 6 2075#define ETH_TX_PARSE_BD_E1X_ECE_FLG_SHIFT 6
2219#define ETH_TX_PARSE_BD_CWR_FLG (0x1<<7) 2076#define ETH_TX_PARSE_BD_E1X_CWR_FLG (0x1<<7)
2220#define ETH_TX_PARSE_BD_CWR_FLG_SHIFT 7 2077#define ETH_TX_PARSE_BD_E1X_CWR_FLG_SHIFT 7
2221 u8 ip_hlen; 2078 u8 ip_hlen_w;
2222 s8 reserved; 2079 s8 reserved;
2223 __le16 total_hlen; 2080 __le16 total_hlen_w;
2224 __le16 tcp_pseudo_csum; 2081 __le16 tcp_pseudo_csum;
2225 __le16 lso_mss; 2082 __le16 lso_mss;
2226 __le16 ip_id; 2083 __le16 ip_id;
@@ -2228,6 +2085,27 @@ struct eth_tx_parse_bd {
2228}; 2085};
2229 2086
2230/* 2087/*
2088 * Tx parsing BD structure for ETH E2
2089 */
2090struct eth_tx_parse_bd_e2 {
2091 __le16 dst_mac_addr_lo;
2092 __le16 dst_mac_addr_mid;
2093 __le16 dst_mac_addr_hi;
2094 __le16 src_mac_addr_lo;
2095 __le16 src_mac_addr_mid;
2096 __le16 src_mac_addr_hi;
2097 __le32 parsing_data;
2098#define ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W (0x1FFF<<0)
2099#define ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W_SHIFT 0
2100#define ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW (0xF<<13)
2101#define ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT 13
2102#define ETH_TX_PARSE_BD_E2_LSO_MSS (0x3FFF<<17)
2103#define ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT 17
2104#define ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR (0x1<<31)
2105#define ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR_SHIFT 31
2106};
2107
2108/*
2231 * The last BD in the BD memory will hold a pointer to the next BD memory 2109 * The last BD in the BD memory will hold a pointer to the next BD memory
2232 */ 2110 */
2233struct eth_tx_next_bd { 2111struct eth_tx_next_bd {
@@ -2242,79 +2120,24 @@ struct eth_tx_next_bd {
2242union eth_tx_bd_types { 2120union eth_tx_bd_types {
2243 struct eth_tx_start_bd start_bd; 2121 struct eth_tx_start_bd start_bd;
2244 struct eth_tx_bd reg_bd; 2122 struct eth_tx_bd reg_bd;
2245 struct eth_tx_parse_bd parse_bd; 2123 struct eth_tx_parse_bd_e1x parse_bd_e1x;
2124 struct eth_tx_parse_bd_e2 parse_bd_e2;
2246 struct eth_tx_next_bd next_bd; 2125 struct eth_tx_next_bd next_bd;
2247}; 2126};
2248 2127
2128
2249/* 2129/*
2250 * The eth storm context of Xstorm 2130 * The eth storm context of Xstorm
2251 */ 2131 */
2252struct xstorm_eth_st_context { 2132struct xstorm_eth_st_context {
2253 u32 tx_bd_page_base_lo; 2133 u32 reserved0[60];
2254 u32 tx_bd_page_base_hi;
2255#if defined(__BIG_ENDIAN)
2256 u16 tx_bd_cons;
2257 u8 statistics_data;
2258#define XSTORM_ETH_ST_CONTEXT_STATISTICS_COUNTER_ID (0x7F<<0)
2259#define XSTORM_ETH_ST_CONTEXT_STATISTICS_COUNTER_ID_SHIFT 0
2260#define XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE (0x1<<7)
2261#define XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE_SHIFT 7
2262 u8 __local_tx_bd_prod;
2263#elif defined(__LITTLE_ENDIAN)
2264 u8 __local_tx_bd_prod;
2265 u8 statistics_data;
2266#define XSTORM_ETH_ST_CONTEXT_STATISTICS_COUNTER_ID (0x7F<<0)
2267#define XSTORM_ETH_ST_CONTEXT_STATISTICS_COUNTER_ID_SHIFT 0
2268#define XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE (0x1<<7)
2269#define XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE_SHIFT 7
2270 u16 tx_bd_cons;
2271#endif
2272 u32 __reserved1;
2273 u32 __reserved2;
2274#if defined(__BIG_ENDIAN)
2275 u8 __ram_cache_index;
2276 u8 __double_buffer_client;
2277 u16 __pkt_cons;
2278#elif defined(__LITTLE_ENDIAN)
2279 u16 __pkt_cons;
2280 u8 __double_buffer_client;
2281 u8 __ram_cache_index;
2282#endif
2283#if defined(__BIG_ENDIAN)
2284 u16 __statistics_address;
2285 u16 __gso_next;
2286#elif defined(__LITTLE_ENDIAN)
2287 u16 __gso_next;
2288 u16 __statistics_address;
2289#endif
2290#if defined(__BIG_ENDIAN)
2291 u8 __local_tx_bd_cons;
2292 u8 safc_group_num;
2293 u8 safc_group_en;
2294 u8 __is_eth_conn;
2295#elif defined(__LITTLE_ENDIAN)
2296 u8 __is_eth_conn;
2297 u8 safc_group_en;
2298 u8 safc_group_num;
2299 u8 __local_tx_bd_cons;
2300#endif
2301 union eth_tx_bd_types __bds[13];
2302}; 2134};
2303 2135
2304/* 2136/*
2305 * The eth storm context of Cstorm 2137 * The eth storm context of Cstorm
2306 */ 2138 */
2307struct cstorm_eth_st_context { 2139struct cstorm_eth_st_context {
2308#if defined(__BIG_ENDIAN) 2140 u32 __reserved0[4];
2309 u16 __reserved0;
2310 u8 sb_index_number;
2311 u8 status_block_id;
2312#elif defined(__LITTLE_ENDIAN)
2313 u8 status_block_id;
2314 u8 sb_index_number;
2315 u16 __reserved0;
2316#endif
2317 u32 __reserved1[3];
2318}; 2141};
2319 2142
2320/* 2143/*
@@ -2362,103 +2185,114 @@ struct eth_tx_doorbell {
2362 2185
2363 2186
2364/* 2187/*
2365 * cstorm default status block, generated by ustorm 2188 * client init fc data
2366 */ 2189 */
2367struct cstorm_def_status_block_u { 2190struct client_init_fc_data {
2368 __le16 index_values[HC_USTORM_DEF_SB_NUM_INDICES]; 2191 __le16 cqe_pause_thr_low;
2369 __le16 status_block_index; 2192 __le16 cqe_pause_thr_high;
2370 u8 func; 2193 __le16 bd_pause_thr_low;
2371 u8 status_block_id; 2194 __le16 bd_pause_thr_high;
2372 __le32 __flags; 2195 __le16 sge_pause_thr_low;
2373}; 2196 __le16 sge_pause_thr_high;
2374 2197 __le16 rx_cos_mask;
2375/* 2198 u8 safc_group_num;
2376 * cstorm default status block, generated by cstorm 2199 u8 safc_group_en_flg;
2377 */ 2200 u8 traffic_type;
2378struct cstorm_def_status_block_c { 2201 u8 reserved0;
2379 __le16 index_values[HC_CSTORM_DEF_SB_NUM_INDICES]; 2202 __le16 reserved1;
2380 __le16 status_block_index; 2203 __le32 reserved2;
2381 u8 func;
2382 u8 status_block_id;
2383 __le32 __flags;
2384};
2385
2386/*
2387 * xstorm status block
2388 */
2389struct xstorm_def_status_block {
2390 __le16 index_values[HC_XSTORM_DEF_SB_NUM_INDICES];
2391 __le16 status_block_index;
2392 u8 func;
2393 u8 status_block_id;
2394 __le32 __flags;
2395}; 2204};
2396 2205
2397/*
2398 * tstorm status block
2399 */
2400struct tstorm_def_status_block {
2401 __le16 index_values[HC_TSTORM_DEF_SB_NUM_INDICES];
2402 __le16 status_block_index;
2403 u8 func;
2404 u8 status_block_id;
2405 __le32 __flags;
2406};
2407 2206
2408/* 2207/*
2409 * host status block 2208 * client init ramrod data
2410 */ 2209 */
2411struct host_def_status_block { 2210struct client_init_general_data {
2412 struct atten_def_status_block atten_status_block; 2211 u8 client_id;
2413 struct cstorm_def_status_block_u u_def_status_block; 2212 u8 statistics_counter_id;
2414 struct cstorm_def_status_block_c c_def_status_block; 2213 u8 statistics_en_flg;
2415 struct xstorm_def_status_block x_def_status_block; 2214 u8 is_fcoe_flg;
2416 struct tstorm_def_status_block t_def_status_block; 2215 u8 activate_flg;
2216 u8 sp_client_id;
2217 __le16 reserved0;
2218 __le32 reserved1[2];
2417}; 2219};
2418 2220
2419 2221
2420/* 2222/*
2421 * cstorm status block, generated by ustorm 2223 * client init rx data
2422 */ 2224 */
2423struct cstorm_status_block_u { 2225struct client_init_rx_data {
2424 __le16 index_values[HC_USTORM_SB_NUM_INDICES]; 2226 u8 tpa_en_flg;
2425 __le16 status_block_index; 2227 u8 vmqueue_mode_en_flg;
2426 u8 func; 2228 u8 extra_data_over_sgl_en_flg;
2229 u8 cache_line_alignment_log_size;
2230 u8 enable_dynamic_hc;
2231 u8 max_sges_for_packet;
2232 u8 client_qzone_id;
2233 u8 drop_ip_cs_err_flg;
2234 u8 drop_tcp_cs_err_flg;
2235 u8 drop_ttl0_flg;
2236 u8 drop_udp_cs_err_flg;
2237 u8 inner_vlan_removal_enable_flg;
2238 u8 outer_vlan_removal_enable_flg;
2427 u8 status_block_id; 2239 u8 status_block_id;
2428 __le32 __flags; 2240 u8 rx_sb_index_number;
2241 u8 reserved0[3];
2242 __le16 bd_buff_size;
2243 __le16 sge_buff_size;
2244 __le16 mtu;
2245 struct regpair bd_page_base;
2246 struct regpair sge_page_base;
2247 struct regpair cqe_page_base;
2248 u8 is_leading_rss;
2249 u8 is_approx_mcast;
2250 __le16 max_agg_size;
2251 __le32 reserved2[3];
2252};
2253
2254/*
2255 * client init tx data
2256 */
2257struct client_init_tx_data {
2258 u8 enforce_security_flg;
2259 u8 tx_status_block_id;
2260 u8 tx_sb_index_number;
2261 u8 reserved0;
2262 __le16 mtu;
2263 __le16 reserved1;
2264 struct regpair tx_bd_page_base;
2265 __le32 reserved2[2];
2429}; 2266};
2430 2267
2431/* 2268/*
2432 * cstorm status block, generated by cstorm 2269 * client init ramrod data
2433 */ 2270 */
2434struct cstorm_status_block_c { 2271struct client_init_ramrod_data {
2435 __le16 index_values[HC_CSTORM_SB_NUM_INDICES]; 2272 struct client_init_general_data general;
2436 __le16 status_block_index; 2273 struct client_init_rx_data rx;
2437 u8 func; 2274 struct client_init_tx_data tx;
2438 u8 status_block_id; 2275 struct client_init_fc_data fc;
2439 __le32 __flags;
2440}; 2276};
2441 2277
2278
2442/* 2279/*
2443 * host status block 2280 * The data contain client ID need to the ramrod
2444 */ 2281 */
2445struct host_status_block { 2282struct eth_common_ramrod_data {
2446 struct cstorm_status_block_u u_status_block; 2283 u32 client_id;
2447 struct cstorm_status_block_c c_status_block; 2284 u32 reserved1;
2448}; 2285};
2449 2286
2450 2287
2451/* 2288/*
2452 * The data for RSS setup ramrod 2289 * union for sgl and raw data.
2453 */ 2290 */
2454struct eth_client_setup_ramrod_data { 2291union eth_sgl_or_raw_data {
2455 u32 client_id; 2292 __le16 sgl[8];
2456 u8 is_rdma; 2293 u32 raw_data[4];
2457 u8 is_fcoe;
2458 u16 reserved1;
2459}; 2294};
2460 2295
2461
2462/* 2296/*
2463 * regular eth FP CQE parameters struct 2297 * regular eth FP CQE parameters struct
2464 */ 2298 */
@@ -2476,8 +2310,8 @@ struct eth_fast_path_rx_cqe {
2476#define ETH_FAST_PATH_RX_CQE_START_FLG_SHIFT 4 2310#define ETH_FAST_PATH_RX_CQE_START_FLG_SHIFT 4
2477#define ETH_FAST_PATH_RX_CQE_END_FLG (0x1<<5) 2311#define ETH_FAST_PATH_RX_CQE_END_FLG (0x1<<5)
2478#define ETH_FAST_PATH_RX_CQE_END_FLG_SHIFT 5 2312#define ETH_FAST_PATH_RX_CQE_END_FLG_SHIFT 5
2479#define ETH_FAST_PATH_RX_CQE_RESERVED0 (0x3<<6) 2313#define ETH_FAST_PATH_RX_CQE_SGL_RAW_SEL (0x3<<6)
2480#define ETH_FAST_PATH_RX_CQE_RESERVED0_SHIFT 6 2314#define ETH_FAST_PATH_RX_CQE_SGL_RAW_SEL_SHIFT 6
2481 u8 status_flags; 2315 u8 status_flags;
2482#define ETH_FAST_PATH_RX_CQE_RSS_HASH_TYPE (0x7<<0) 2316#define ETH_FAST_PATH_RX_CQE_RSS_HASH_TYPE (0x7<<0)
2483#define ETH_FAST_PATH_RX_CQE_RSS_HASH_TYPE_SHIFT 0 2317#define ETH_FAST_PATH_RX_CQE_RSS_HASH_TYPE_SHIFT 0
@@ -2498,7 +2332,7 @@ struct eth_fast_path_rx_cqe {
2498 __le16 pkt_len; 2332 __le16 pkt_len;
2499 __le16 len_on_bd; 2333 __le16 len_on_bd;
2500 struct parsing_flags pars_flags; 2334 struct parsing_flags pars_flags;
2501 __le16 sgl[8]; 2335 union eth_sgl_or_raw_data sgl_or_raw_data;
2502}; 2336};
2503 2337
2504 2338
@@ -2510,11 +2344,10 @@ struct eth_halt_ramrod_data {
2510 u32 reserved0; 2344 u32 reserved0;
2511}; 2345};
2512 2346
2513
2514/* 2347/*
2515 * The data for statistics query ramrod 2348 * The data for statistics query ramrod
2516 */ 2349 */
2517struct eth_query_ramrod_data { 2350struct common_query_ramrod_data {
2518#if defined(__BIG_ENDIAN) 2351#if defined(__BIG_ENDIAN)
2519 u8 reserved0; 2352 u8 reserved0;
2520 u8 collect_port; 2353 u8 collect_port;
@@ -2597,9 +2430,9 @@ struct spe_hdr {
2597 __le16 type; 2430 __le16 type;
2598#define SPE_HDR_CONN_TYPE (0xFF<<0) 2431#define SPE_HDR_CONN_TYPE (0xFF<<0)
2599#define SPE_HDR_CONN_TYPE_SHIFT 0 2432#define SPE_HDR_CONN_TYPE_SHIFT 0
2600#define SPE_HDR_COMMON_RAMROD (0xFF<<8) 2433#define SPE_HDR_FUNCTION_ID (0xFF<<8)
2601#define SPE_HDR_COMMON_RAMROD_SHIFT 8 2434#define SPE_HDR_FUNCTION_ID_SHIFT 8
2602 __le16 reserved; 2435 __le16 reserved1;
2603}; 2436};
2604 2437
2605/* 2438/*
@@ -2607,12 +2440,10 @@ struct spe_hdr {
2607 */ 2440 */
2608union eth_specific_data { 2441union eth_specific_data {
2609 u8 protocol_data[8]; 2442 u8 protocol_data[8];
2610 struct regpair mac_config_addr; 2443 struct regpair client_init_ramrod_init_data;
2611 struct eth_client_setup_ramrod_data client_setup_ramrod_data;
2612 struct eth_halt_ramrod_data halt_ramrod_data; 2444 struct eth_halt_ramrod_data halt_ramrod_data;
2613 struct regpair leading_cqe_addr;
2614 struct regpair update_data_addr; 2445 struct regpair update_data_addr;
2615 struct eth_query_ramrod_data query_ramrod_data; 2446 struct eth_common_ramrod_data common_ramrod_data;
2616}; 2447};
2617 2448
2618/* 2449/*
@@ -2637,7 +2468,7 @@ struct eth_tx_bds_array {
2637 */ 2468 */
2638struct tstorm_eth_function_common_config { 2469struct tstorm_eth_function_common_config {
2639#if defined(__BIG_ENDIAN) 2470#if defined(__BIG_ENDIAN)
2640 u8 leading_client_id; 2471 u8 reserved1;
2641 u8 rss_result_mask; 2472 u8 rss_result_mask;
2642 u16 config_flags; 2473 u16 config_flags;
2643#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_CAPABILITY (0x1<<0) 2474#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_CAPABILITY (0x1<<0)
@@ -2650,16 +2481,12 @@ struct tstorm_eth_function_common_config {
2650#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_TCP_CAPABILITY_SHIFT 3 2481#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_TCP_CAPABILITY_SHIFT 3
2651#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_MODE (0x7<<4) 2482#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_MODE (0x7<<4)
2652#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_MODE_SHIFT 4 2483#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_MODE_SHIFT 4
2653#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_DEFAULT_ENABLE (0x1<<7) 2484#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA (0x1<<7)
2654#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_DEFAULT_ENABLE_SHIFT 7 2485#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA_SHIFT 7
2655#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_VLAN_IN_CAM (0x1<<8) 2486#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_VLAN_FILTERING_ENABLE (0x1<<8)
2656#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_VLAN_IN_CAM_SHIFT 8 2487#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_VLAN_FILTERING_ENABLE_SHIFT 8
2657#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM (0x1<<9) 2488#define __TSTORM_ETH_FUNCTION_COMMON_CONFIG_RESERVED0 (0x7F<<9)
2658#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM_SHIFT 9 2489#define __TSTORM_ETH_FUNCTION_COMMON_CONFIG_RESERVED0_SHIFT 9
2659#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA (0x1<<10)
2660#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA_SHIFT 10
2661#define __TSTORM_ETH_FUNCTION_COMMON_CONFIG_RESERVED0 (0x1F<<11)
2662#define __TSTORM_ETH_FUNCTION_COMMON_CONFIG_RESERVED0_SHIFT 11
2663#elif defined(__LITTLE_ENDIAN) 2490#elif defined(__LITTLE_ENDIAN)
2664 u16 config_flags; 2491 u16 config_flags;
2665#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_CAPABILITY (0x1<<0) 2492#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_CAPABILITY (0x1<<0)
@@ -2672,18 +2499,14 @@ struct tstorm_eth_function_common_config {
2672#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_TCP_CAPABILITY_SHIFT 3 2499#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_TCP_CAPABILITY_SHIFT 3
2673#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_MODE (0x7<<4) 2500#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_MODE (0x7<<4)
2674#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_MODE_SHIFT 4 2501#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_MODE_SHIFT 4
2675#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_DEFAULT_ENABLE (0x1<<7) 2502#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA (0x1<<7)
2676#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_DEFAULT_ENABLE_SHIFT 7 2503#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA_SHIFT 7
2677#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_VLAN_IN_CAM (0x1<<8) 2504#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_VLAN_FILTERING_ENABLE (0x1<<8)
2678#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_VLAN_IN_CAM_SHIFT 8 2505#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_VLAN_FILTERING_ENABLE_SHIFT 8
2679#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM (0x1<<9) 2506#define __TSTORM_ETH_FUNCTION_COMMON_CONFIG_RESERVED0 (0x7F<<9)
2680#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM_SHIFT 9 2507#define __TSTORM_ETH_FUNCTION_COMMON_CONFIG_RESERVED0_SHIFT 9
2681#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA (0x1<<10)
2682#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA_SHIFT 10
2683#define __TSTORM_ETH_FUNCTION_COMMON_CONFIG_RESERVED0 (0x1F<<11)
2684#define __TSTORM_ETH_FUNCTION_COMMON_CONFIG_RESERVED0_SHIFT 11
2685 u8 rss_result_mask; 2508 u8 rss_result_mask;
2686 u8 leading_client_id; 2509 u8 reserved1;
2687#endif 2510#endif
2688 u16 vlan_id[2]; 2511 u16 vlan_id[2];
2689}; 2512};
@@ -2731,90 +2554,42 @@ struct mac_configuration_hdr {
2731 u8 length; 2554 u8 length;
2732 u8 offset; 2555 u8 offset;
2733 u16 client_id; 2556 u16 client_id;
2734 u32 reserved1; 2557 u16 echo;
2735}; 2558 u16 reserved1;
2736
2737/*
2738 * MAC address in list for ramrod
2739 */
2740struct tstorm_cam_entry {
2741 __le16 lsb_mac_addr;
2742 __le16 middle_mac_addr;
2743 __le16 msb_mac_addr;
2744 __le16 flags;
2745#define TSTORM_CAM_ENTRY_PORT_ID (0x1<<0)
2746#define TSTORM_CAM_ENTRY_PORT_ID_SHIFT 0
2747#define TSTORM_CAM_ENTRY_RSRVVAL0 (0x7<<1)
2748#define TSTORM_CAM_ENTRY_RSRVVAL0_SHIFT 1
2749#define TSTORM_CAM_ENTRY_RESERVED0 (0xFFF<<4)
2750#define TSTORM_CAM_ENTRY_RESERVED0_SHIFT 4
2751};
2752
2753/*
2754 * MAC filtering: CAM target table entry
2755 */
2756struct tstorm_cam_target_table_entry {
2757 u8 flags;
2758#define TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST (0x1<<0)
2759#define TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST_SHIFT 0
2760#define TSTORM_CAM_TARGET_TABLE_ENTRY_OVERRIDE_VLAN_REMOVAL (0x1<<1)
2761#define TSTORM_CAM_TARGET_TABLE_ENTRY_OVERRIDE_VLAN_REMOVAL_SHIFT 1
2762#define TSTORM_CAM_TARGET_TABLE_ENTRY_ACTION_TYPE (0x1<<2)
2763#define TSTORM_CAM_TARGET_TABLE_ENTRY_ACTION_TYPE_SHIFT 2
2764#define TSTORM_CAM_TARGET_TABLE_ENTRY_RDMA_MAC (0x1<<3)
2765#define TSTORM_CAM_TARGET_TABLE_ENTRY_RDMA_MAC_SHIFT 3
2766#define TSTORM_CAM_TARGET_TABLE_ENTRY_RESERVED0 (0xF<<4)
2767#define TSTORM_CAM_TARGET_TABLE_ENTRY_RESERVED0_SHIFT 4
2768 u8 reserved1;
2769 u16 vlan_id;
2770 u32 clients_bit_vector;
2771}; 2559};
2772 2560
2773/* 2561/*
2774 * MAC address in list for ramrod 2562 * MAC address in list for ramrod
2775 */ 2563 */
2776struct mac_configuration_entry { 2564struct mac_configuration_entry {
2777 struct tstorm_cam_entry cam_entry;
2778 struct tstorm_cam_target_table_entry target_table_entry;
2779};
2780
2781/*
2782 * MAC filtering configuration command
2783 */
2784struct mac_configuration_cmd {
2785 struct mac_configuration_hdr hdr;
2786 struct mac_configuration_entry config_table[64];
2787};
2788
2789
2790/*
2791 * MAC address in list for ramrod
2792 */
2793struct mac_configuration_entry_e1h {
2794 __le16 lsb_mac_addr; 2565 __le16 lsb_mac_addr;
2795 __le16 middle_mac_addr; 2566 __le16 middle_mac_addr;
2796 __le16 msb_mac_addr; 2567 __le16 msb_mac_addr;
2797 __le16 vlan_id; 2568 __le16 vlan_id;
2798 __le16 e1hov_id; 2569 u8 pf_id;
2799 u8 reserved0;
2800 u8 flags; 2570 u8 flags;
2801#define MAC_CONFIGURATION_ENTRY_E1H_PORT (0x1<<0) 2571#define MAC_CONFIGURATION_ENTRY_ACTION_TYPE (0x1<<0)
2802#define MAC_CONFIGURATION_ENTRY_E1H_PORT_SHIFT 0 2572#define MAC_CONFIGURATION_ENTRY_ACTION_TYPE_SHIFT 0
2803#define MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE (0x1<<1) 2573#define MAC_CONFIGURATION_ENTRY_RDMA_MAC (0x1<<1)
2804#define MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE_SHIFT 1 2574#define MAC_CONFIGURATION_ENTRY_RDMA_MAC_SHIFT 1
2805#define MAC_CONFIGURATION_ENTRY_E1H_RDMA_MAC (0x1<<2) 2575#define MAC_CONFIGURATION_ENTRY_VLAN_FILTERING_MODE (0x3<<2)
2806#define MAC_CONFIGURATION_ENTRY_E1H_RDMA_MAC_SHIFT 2 2576#define MAC_CONFIGURATION_ENTRY_VLAN_FILTERING_MODE_SHIFT 2
2807#define MAC_CONFIGURATION_ENTRY_E1H_RESERVED1 (0x1F<<3) 2577#define MAC_CONFIGURATION_ENTRY_OVERRIDE_VLAN_REMOVAL (0x1<<4)
2808#define MAC_CONFIGURATION_ENTRY_E1H_RESERVED1_SHIFT 3 2578#define MAC_CONFIGURATION_ENTRY_OVERRIDE_VLAN_REMOVAL_SHIFT 4
2579#define MAC_CONFIGURATION_ENTRY_BROADCAST (0x1<<5)
2580#define MAC_CONFIGURATION_ENTRY_BROADCAST_SHIFT 5
2581#define MAC_CONFIGURATION_ENTRY_RESERVED1 (0x3<<6)
2582#define MAC_CONFIGURATION_ENTRY_RESERVED1_SHIFT 6
2583 u16 reserved0;
2809 u32 clients_bit_vector; 2584 u32 clients_bit_vector;
2810}; 2585};
2811 2586
2812/* 2587/*
2813 * MAC filtering configuration command 2588 * MAC filtering configuration command
2814 */ 2589 */
2815struct mac_configuration_cmd_e1h { 2590struct mac_configuration_cmd {
2816 struct mac_configuration_hdr hdr; 2591 struct mac_configuration_hdr hdr;
2817 struct mac_configuration_entry_e1h config_table[32]; 2592 struct mac_configuration_entry config_table[64];
2818}; 2593};
2819 2594
2820 2595
@@ -2827,65 +2602,6 @@ struct tstorm_eth_approximate_match_multicast_filtering {
2827 2602
2828 2603
2829/* 2604/*
2830 * Configuration parameters per client in Tstorm
2831 */
2832struct tstorm_eth_client_config {
2833#if defined(__BIG_ENDIAN)
2834 u8 reserved0;
2835 u8 statistics_counter_id;
2836 u16 mtu;
2837#elif defined(__LITTLE_ENDIAN)
2838 u16 mtu;
2839 u8 statistics_counter_id;
2840 u8 reserved0;
2841#endif
2842#if defined(__BIG_ENDIAN)
2843 u16 drop_flags;
2844#define TSTORM_ETH_CLIENT_CONFIG_DROP_IP_CS_ERR (0x1<<0)
2845#define TSTORM_ETH_CLIENT_CONFIG_DROP_IP_CS_ERR_SHIFT 0
2846#define TSTORM_ETH_CLIENT_CONFIG_DROP_TCP_CS_ERR (0x1<<1)
2847#define TSTORM_ETH_CLIENT_CONFIG_DROP_TCP_CS_ERR_SHIFT 1
2848#define TSTORM_ETH_CLIENT_CONFIG_DROP_TTL0 (0x1<<2)
2849#define TSTORM_ETH_CLIENT_CONFIG_DROP_TTL0_SHIFT 2
2850#define TSTORM_ETH_CLIENT_CONFIG_DROP_UDP_CS_ERR (0x1<<3)
2851#define TSTORM_ETH_CLIENT_CONFIG_DROP_UDP_CS_ERR_SHIFT 3
2852#define __TSTORM_ETH_CLIENT_CONFIG_RESERVED2 (0xFFF<<4)
2853#define __TSTORM_ETH_CLIENT_CONFIG_RESERVED2_SHIFT 4
2854 u16 config_flags;
2855#define TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE (0x1<<0)
2856#define TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE_SHIFT 0
2857#define TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE (0x1<<1)
2858#define TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE_SHIFT 1
2859#define TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE (0x1<<2)
2860#define TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE_SHIFT 2
2861#define __TSTORM_ETH_CLIENT_CONFIG_RESERVED1 (0x1FFF<<3)
2862#define __TSTORM_ETH_CLIENT_CONFIG_RESERVED1_SHIFT 3
2863#elif defined(__LITTLE_ENDIAN)
2864 u16 config_flags;
2865#define TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE (0x1<<0)
2866#define TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE_SHIFT 0
2867#define TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE (0x1<<1)
2868#define TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE_SHIFT 1
2869#define TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE (0x1<<2)
2870#define TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE_SHIFT 2
2871#define __TSTORM_ETH_CLIENT_CONFIG_RESERVED1 (0x1FFF<<3)
2872#define __TSTORM_ETH_CLIENT_CONFIG_RESERVED1_SHIFT 3
2873 u16 drop_flags;
2874#define TSTORM_ETH_CLIENT_CONFIG_DROP_IP_CS_ERR (0x1<<0)
2875#define TSTORM_ETH_CLIENT_CONFIG_DROP_IP_CS_ERR_SHIFT 0
2876#define TSTORM_ETH_CLIENT_CONFIG_DROP_TCP_CS_ERR (0x1<<1)
2877#define TSTORM_ETH_CLIENT_CONFIG_DROP_TCP_CS_ERR_SHIFT 1
2878#define TSTORM_ETH_CLIENT_CONFIG_DROP_TTL0 (0x1<<2)
2879#define TSTORM_ETH_CLIENT_CONFIG_DROP_TTL0_SHIFT 2
2880#define TSTORM_ETH_CLIENT_CONFIG_DROP_UDP_CS_ERR (0x1<<3)
2881#define TSTORM_ETH_CLIENT_CONFIG_DROP_UDP_CS_ERR_SHIFT 3
2882#define __TSTORM_ETH_CLIENT_CONFIG_RESERVED2 (0xFFF<<4)
2883#define __TSTORM_ETH_CLIENT_CONFIG_RESERVED2_SHIFT 4
2884#endif
2885};
2886
2887
2888/*
2889 * MAC filtering configuration parameters per port in Tstorm 2605 * MAC filtering configuration parameters per port in Tstorm
2890 */ 2606 */
2891struct tstorm_eth_mac_filter_config { 2607struct tstorm_eth_mac_filter_config {
@@ -2895,8 +2611,8 @@ struct tstorm_eth_mac_filter_config {
2895 u32 mcast_accept_all; 2611 u32 mcast_accept_all;
2896 u32 bcast_drop_all; 2612 u32 bcast_drop_all;
2897 u32 bcast_accept_all; 2613 u32 bcast_accept_all;
2898 u32 strict_vlan;
2899 u32 vlan_filter[2]; 2614 u32 vlan_filter[2];
2615 u32 unmatched_unicast;
2900 u32 reserved; 2616 u32 reserved;
2901}; 2617};
2902 2618
@@ -2919,41 +2635,6 @@ struct tstorm_eth_tpa_exist {
2919 2635
2920 2636
2921/* 2637/*
2922 * rx rings pause data for E1h only
2923 */
2924struct ustorm_eth_rx_pause_data_e1h {
2925#if defined(__BIG_ENDIAN)
2926 u16 bd_thr_low;
2927 u16 cqe_thr_low;
2928#elif defined(__LITTLE_ENDIAN)
2929 u16 cqe_thr_low;
2930 u16 bd_thr_low;
2931#endif
2932#if defined(__BIG_ENDIAN)
2933 u16 cos;
2934 u16 sge_thr_low;
2935#elif defined(__LITTLE_ENDIAN)
2936 u16 sge_thr_low;
2937 u16 cos;
2938#endif
2939#if defined(__BIG_ENDIAN)
2940 u16 bd_thr_high;
2941 u16 cqe_thr_high;
2942#elif defined(__LITTLE_ENDIAN)
2943 u16 cqe_thr_high;
2944 u16 bd_thr_high;
2945#endif
2946#if defined(__BIG_ENDIAN)
2947 u16 reserved0;
2948 u16 sge_thr_high;
2949#elif defined(__LITTLE_ENDIAN)
2950 u16 sge_thr_high;
2951 u16 reserved0;
2952#endif
2953};
2954
2955
2956/*
2957 * Three RX producers for ETH 2638 * Three RX producers for ETH
2958 */ 2639 */
2959struct ustorm_eth_rx_producers { 2640struct ustorm_eth_rx_producers {
@@ -2975,6 +2656,18 @@ struct ustorm_eth_rx_producers {
2975 2656
2976 2657
2977/* 2658/*
2659 * cfc delete event data
2660 */
2661struct cfc_del_event_data {
2662 u32 cid;
2663 u8 error;
2664 u8 reserved0;
2665 u16 reserved1;
2666 u32 reserved2;
2667};
2668
2669
2670/*
2978 * per-port SAFC demo variables 2671 * per-port SAFC demo variables
2979 */ 2672 */
2980struct cmng_flags_per_port { 2673struct cmng_flags_per_port {
@@ -2990,8 +2683,10 @@ struct cmng_flags_per_port {
2990#define CMNG_FLAGS_PER_PORT_RATE_SHAPING_PROTOCOL_SHIFT 3 2683#define CMNG_FLAGS_PER_PORT_RATE_SHAPING_PROTOCOL_SHIFT 3
2991#define CMNG_FLAGS_PER_PORT_FAIRNESS_COS (0x1<<4) 2684#define CMNG_FLAGS_PER_PORT_FAIRNESS_COS (0x1<<4)
2992#define CMNG_FLAGS_PER_PORT_FAIRNESS_COS_SHIFT 4 2685#define CMNG_FLAGS_PER_PORT_FAIRNESS_COS_SHIFT 4
2993#define __CMNG_FLAGS_PER_PORT_RESERVED0 (0x7FFFFFF<<5) 2686#define CMNG_FLAGS_PER_PORT_FAIRNESS_COS_MODE (0x1<<5)
2994#define __CMNG_FLAGS_PER_PORT_RESERVED0_SHIFT 5 2687#define CMNG_FLAGS_PER_PORT_FAIRNESS_COS_MODE_SHIFT 5
2688#define __CMNG_FLAGS_PER_PORT_RESERVED0 (0x3FFFFFF<<6)
2689#define __CMNG_FLAGS_PER_PORT_RESERVED0_SHIFT 6
2995}; 2690};
2996 2691
2997 2692
@@ -3025,30 +2720,92 @@ struct safc_struct_per_port {
3025 u8 __reserved0; 2720 u8 __reserved0;
3026 u16 __reserved1; 2721 u16 __reserved1;
3027#endif 2722#endif
2723 u8 cos_to_traffic_types[MAX_COS_NUMBER];
2724 u32 __reserved2;
3028 u16 cos_to_pause_mask[NUM_OF_SAFC_BITS]; 2725 u16 cos_to_pause_mask[NUM_OF_SAFC_BITS];
3029}; 2726};
3030 2727
3031/* 2728/*
2729 * per-port PFC variables
2730 */
2731struct pfc_struct_per_port {
2732 u8 priority_to_traffic_types[MAX_PFC_PRIORITIES];
2733#if defined(__BIG_ENDIAN)
2734 u16 pfc_pause_quanta_in_nanosec;
2735 u8 __reserved0;
2736 u8 priority_non_pausable_mask;
2737#elif defined(__LITTLE_ENDIAN)
2738 u8 priority_non_pausable_mask;
2739 u8 __reserved0;
2740 u16 pfc_pause_quanta_in_nanosec;
2741#endif
2742};
2743
2744/*
2745 * Priority and cos
2746 */
2747struct priority_cos {
2748#if defined(__BIG_ENDIAN)
2749 u16 reserved1;
2750 u8 cos;
2751 u8 priority;
2752#elif defined(__LITTLE_ENDIAN)
2753 u8 priority;
2754 u8 cos;
2755 u16 reserved1;
2756#endif
2757 u32 reserved2;
2758};
2759
2760/*
3032 * Per-port congestion management variables 2761 * Per-port congestion management variables
3033 */ 2762 */
3034struct cmng_struct_per_port { 2763struct cmng_struct_per_port {
3035 struct rate_shaping_vars_per_port rs_vars; 2764 struct rate_shaping_vars_per_port rs_vars;
3036 struct fairness_vars_per_port fair_vars; 2765 struct fairness_vars_per_port fair_vars;
3037 struct safc_struct_per_port safc_vars; 2766 struct safc_struct_per_port safc_vars;
2767 struct pfc_struct_per_port pfc_vars;
2768#if defined(__BIG_ENDIAN)
2769 u16 __reserved1;
2770 u8 dcb_enabled;
2771 u8 llfc_mode;
2772#elif defined(__LITTLE_ENDIAN)
2773 u8 llfc_mode;
2774 u8 dcb_enabled;
2775 u16 __reserved1;
2776#endif
2777 struct priority_cos
2778 traffic_type_to_priority_cos[MAX_PFC_TRAFFIC_TYPES];
3038 struct cmng_flags_per_port flags; 2779 struct cmng_flags_per_port flags;
3039}; 2780};
3040 2781
3041 2782
2783
2784/*
2785 * Dynamic HC counters set by the driver
2786 */
2787struct hc_dynamic_drv_counter {
2788 u32 val[HC_SB_MAX_DYNAMIC_INDICES];
2789};
2790
2791/*
2792 * zone A per-queue data
2793 */
2794struct cstorm_queue_zone_data {
2795 struct hc_dynamic_drv_counter hc_dyn_drv_cnt;
2796 struct regpair reserved[2];
2797};
2798
3042/* 2799/*
3043 * Dynamic host coalescing init parameters 2800 * Dynamic host coalescing init parameters
3044 */ 2801 */
3045struct dynamic_hc_config { 2802struct dynamic_hc_config {
3046 u32 threshold[3]; 2803 u32 threshold[3];
3047 u8 shift_per_protocol[HC_USTORM_SB_NUM_INDICES]; 2804 u8 shift_per_protocol[HC_SB_MAX_DYNAMIC_INDICES];
3048 u8 hc_timeout0[HC_USTORM_SB_NUM_INDICES]; 2805 u8 hc_timeout0[HC_SB_MAX_DYNAMIC_INDICES];
3049 u8 hc_timeout1[HC_USTORM_SB_NUM_INDICES]; 2806 u8 hc_timeout1[HC_SB_MAX_DYNAMIC_INDICES];
3050 u8 hc_timeout2[HC_USTORM_SB_NUM_INDICES]; 2807 u8 hc_timeout2[HC_SB_MAX_DYNAMIC_INDICES];
3051 u8 hc_timeout3[HC_USTORM_SB_NUM_INDICES]; 2808 u8 hc_timeout3[HC_SB_MAX_DYNAMIC_INDICES];
3052}; 2809};
3053 2810
3054 2811
@@ -3072,7 +2829,7 @@ struct xstorm_per_client_stats {
3072 * Common statistics collected by the Xstorm (per port) 2829 * Common statistics collected by the Xstorm (per port)
3073 */ 2830 */
3074struct xstorm_common_stats { 2831struct xstorm_common_stats {
3075 struct xstorm_per_client_stats client_statistics[MAX_X_STAT_COUNTER_ID]; 2832 struct xstorm_per_client_stats client_statistics[MAX_STAT_COUNTER_ID];
3076}; 2833};
3077 2834
3078/* 2835/*
@@ -3109,7 +2866,7 @@ struct tstorm_per_client_stats {
3109 */ 2866 */
3110struct tstorm_common_stats { 2867struct tstorm_common_stats {
3111 struct tstorm_per_port_stats port_statistics; 2868 struct tstorm_per_port_stats port_statistics;
3112 struct tstorm_per_client_stats client_statistics[MAX_T_STAT_COUNTER_ID]; 2869 struct tstorm_per_client_stats client_statistics[MAX_STAT_COUNTER_ID];
3113}; 2870};
3114 2871
3115/* 2872/*
@@ -3130,7 +2887,7 @@ struct ustorm_per_client_stats {
3130 * Protocol-common statistics collected by the Ustorm 2887 * Protocol-common statistics collected by the Ustorm
3131 */ 2888 */
3132struct ustorm_common_stats { 2889struct ustorm_common_stats {
3133 struct ustorm_per_client_stats client_statistics[MAX_U_STAT_COUNTER_ID]; 2890 struct ustorm_per_client_stats client_statistics[MAX_STAT_COUNTER_ID];
3134}; 2891};
3135 2892
3136/* 2893/*
@@ -3144,6 +2901,70 @@ struct eth_stats_query {
3144 2901
3145 2902
3146/* 2903/*
2904 * set mac event data
2905 */
2906struct set_mac_event_data {
2907 u16 echo;
2908 u16 reserved0;
2909 u32 reserved1;
2910 u32 reserved2;
2911};
2912
2913/*
2914 * union for all event ring message types
2915 */
2916union event_data {
2917 struct set_mac_event_data set_mac_event;
2918 struct cfc_del_event_data cfc_del_event;
2919};
2920
2921
2922/*
2923 * per PF event ring data
2924 */
2925struct event_ring_data {
2926 struct regpair base_addr;
2927#if defined(__BIG_ENDIAN)
2928 u8 index_id;
2929 u8 sb_id;
2930 u16 producer;
2931#elif defined(__LITTLE_ENDIAN)
2932 u16 producer;
2933 u8 sb_id;
2934 u8 index_id;
2935#endif
2936 u32 reserved0;
2937};
2938
2939
2940/*
2941 * event ring message element (each element is 128 bits)
2942 */
2943struct event_ring_msg {
2944 u8 opcode;
2945 u8 reserved0;
2946 u16 reserved1;
2947 union event_data data;
2948};
2949
2950/*
2951 * event ring next page element (128 bits)
2952 */
2953struct event_ring_next {
2954 struct regpair addr;
2955 u32 reserved[2];
2956};
2957
2958/*
2959 * union for event ring element types (each element is 128 bits)
2960 */
2961union event_ring_elem {
2962 struct event_ring_msg message;
2963 struct event_ring_next next_page;
2964};
2965
2966
2967/*
3147 * per-vnic fairness variables 2968 * per-vnic fairness variables
3148 */ 2969 */
3149struct fairness_vars_per_vn { 2970struct fairness_vars_per_vn {
@@ -3182,6 +3003,137 @@ struct fw_version {
3182 3003
3183 3004
3184/* 3005/*
3006 * Dynamic Host-Coalescing - Driver(host) counters
3007 */
3008struct hc_dynamic_sb_drv_counters {
3009 u32 dynamic_hc_drv_counter[HC_SB_MAX_DYNAMIC_INDICES];
3010};
3011
3012
3013/*
3014 * 2 bytes. configuration/state parameters for a single protocol index
3015 */
3016struct hc_index_data {
3017#if defined(__BIG_ENDIAN)
3018 u8 flags;
3019#define HC_INDEX_DATA_SM_ID (0x1<<0)
3020#define HC_INDEX_DATA_SM_ID_SHIFT 0
3021#define HC_INDEX_DATA_HC_ENABLED (0x1<<1)
3022#define HC_INDEX_DATA_HC_ENABLED_SHIFT 1
3023#define HC_INDEX_DATA_DYNAMIC_HC_ENABLED (0x1<<2)
3024#define HC_INDEX_DATA_DYNAMIC_HC_ENABLED_SHIFT 2
3025#define HC_INDEX_DATA_RESERVE (0x1F<<3)
3026#define HC_INDEX_DATA_RESERVE_SHIFT 3
3027 u8 timeout;
3028#elif defined(__LITTLE_ENDIAN)
3029 u8 timeout;
3030 u8 flags;
3031#define HC_INDEX_DATA_SM_ID (0x1<<0)
3032#define HC_INDEX_DATA_SM_ID_SHIFT 0
3033#define HC_INDEX_DATA_HC_ENABLED (0x1<<1)
3034#define HC_INDEX_DATA_HC_ENABLED_SHIFT 1
3035#define HC_INDEX_DATA_DYNAMIC_HC_ENABLED (0x1<<2)
3036#define HC_INDEX_DATA_DYNAMIC_HC_ENABLED_SHIFT 2
3037#define HC_INDEX_DATA_RESERVE (0x1F<<3)
3038#define HC_INDEX_DATA_RESERVE_SHIFT 3
3039#endif
3040};
3041
3042
3043/*
3044 * HC state-machine
3045 */
3046struct hc_status_block_sm {
3047#if defined(__BIG_ENDIAN)
3048 u8 igu_seg_id;
3049 u8 igu_sb_id;
3050 u8 timer_value;
3051 u8 __flags;
3052#elif defined(__LITTLE_ENDIAN)
3053 u8 __flags;
3054 u8 timer_value;
3055 u8 igu_sb_id;
3056 u8 igu_seg_id;
3057#endif
3058 u32 time_to_expire;
3059};
3060
3061/*
3062 * hold PCI identification variables- used in various places in firmware
3063 */
3064struct pci_entity {
3065#if defined(__BIG_ENDIAN)
3066 u8 vf_valid;
3067 u8 vf_id;
3068 u8 vnic_id;
3069 u8 pf_id;
3070#elif defined(__LITTLE_ENDIAN)
3071 u8 pf_id;
3072 u8 vnic_id;
3073 u8 vf_id;
3074 u8 vf_valid;
3075#endif
3076};
3077
3078/*
3079 * The fast-path status block meta-data, common to all chips
3080 */
3081struct hc_sb_data {
3082 struct regpair host_sb_addr;
3083 struct hc_status_block_sm state_machine[HC_SB_MAX_SM];
3084 struct pci_entity p_func;
3085#if defined(__BIG_ENDIAN)
3086 u8 rsrv0;
3087 u8 dhc_qzone_id;
3088 u8 __dynamic_hc_level;
3089 u8 same_igu_sb_1b;
3090#elif defined(__LITTLE_ENDIAN)
3091 u8 same_igu_sb_1b;
3092 u8 __dynamic_hc_level;
3093 u8 dhc_qzone_id;
3094 u8 rsrv0;
3095#endif
3096 struct regpair rsrv1[2];
3097};
3098
3099
3100/*
3101 * The fast-path status block meta-data
3102 */
3103struct hc_sp_status_block_data {
3104 struct regpair host_sb_addr;
3105#if defined(__BIG_ENDIAN)
3106 u16 rsrv;
3107 u8 igu_seg_id;
3108 u8 igu_sb_id;
3109#elif defined(__LITTLE_ENDIAN)
3110 u8 igu_sb_id;
3111 u8 igu_seg_id;
3112 u16 rsrv;
3113#endif
3114 struct pci_entity p_func;
3115};
3116
3117
3118/*
3119 * The fast-path status block meta-data
3120 */
3121struct hc_status_block_data_e1x {
3122 struct hc_index_data index_data[HC_SB_MAX_INDICES_E1X];
3123 struct hc_sb_data common;
3124};
3125
3126
3127/*
3128 * The fast-path status block meta-data
3129 */
3130struct hc_status_block_data_e2 {
3131 struct hc_index_data index_data[HC_SB_MAX_INDICES_E2];
3132 struct hc_sb_data common;
3133};
3134
3135
3136/*
3185 * FW version stored in first line of pram 3137 * FW version stored in first line of pram
3186 */ 3138 */
3187struct pram_fw_version { 3139struct pram_fw_version {
@@ -3204,11 +3156,21 @@ struct pram_fw_version {
3204 3156
3205 3157
3206/* 3158/*
3159 * Ethernet slow path element
3160 */
3161union protocol_common_specific_data {
3162 u8 protocol_data[8];
3163 struct regpair phy_address;
3164 struct regpair mac_config_addr;
3165 struct common_query_ramrod_data query_ramrod_data;
3166};
3167
3168/*
3207 * The send queue element 3169 * The send queue element
3208 */ 3170 */
3209struct protocol_common_spe { 3171struct protocol_common_spe {
3210 struct spe_hdr hdr; 3172 struct spe_hdr hdr;
3211 struct regpair phy_address; 3173 union protocol_common_specific_data data;
3212}; 3174};
3213 3175
3214 3176
@@ -3241,7 +3203,7 @@ struct rate_shaping_vars_per_vn {
3241 */ 3203 */
3242struct slow_path_element { 3204struct slow_path_element {
3243 struct spe_hdr hdr; 3205 struct spe_hdr hdr;
3244 u8 protocol_data[8]; 3206 struct regpair protocol_data;
3245}; 3207};
3246 3208
3247 3209
@@ -3254,3 +3216,97 @@ struct stats_indication_flags {
3254}; 3216};
3255 3217
3256 3218
3219/*
3220 * per-port PFC variables
3221 */
3222struct storm_pfc_struct_per_port {
3223#if defined(__BIG_ENDIAN)
3224 u16 mid_mac_addr;
3225 u16 msb_mac_addr;
3226#elif defined(__LITTLE_ENDIAN)
3227 u16 msb_mac_addr;
3228 u16 mid_mac_addr;
3229#endif
3230#if defined(__BIG_ENDIAN)
3231 u16 pfc_pause_quanta_in_nanosec;
3232 u16 lsb_mac_addr;
3233#elif defined(__LITTLE_ENDIAN)
3234 u16 lsb_mac_addr;
3235 u16 pfc_pause_quanta_in_nanosec;
3236#endif
3237};
3238
3239/*
3240 * Per-port congestion management variables
3241 */
3242struct storm_cmng_struct_per_port {
3243 struct storm_pfc_struct_per_port pfc_vars;
3244};
3245
3246
3247/*
3248 * zone A per-queue data
3249 */
3250struct tstorm_queue_zone_data {
3251 struct regpair reserved[4];
3252};
3253
3254
3255/*
3256 * zone B per-VF data
3257 */
3258struct tstorm_vf_zone_data {
3259 struct regpair reserved;
3260};
3261
3262
3263/*
3264 * zone A per-queue data
3265 */
3266struct ustorm_queue_zone_data {
3267 struct ustorm_eth_rx_producers eth_rx_producers;
3268 struct regpair reserved[3];
3269};
3270
3271
3272/*
3273 * zone B per-VF data
3274 */
3275struct ustorm_vf_zone_data {
3276 struct regpair reserved;
3277};
3278
3279
3280/*
3281 * data per VF-PF channel
3282 */
3283struct vf_pf_channel_data {
3284#if defined(__BIG_ENDIAN)
3285 u16 reserved0;
3286 u8 valid;
3287 u8 state;
3288#elif defined(__LITTLE_ENDIAN)
3289 u8 state;
3290 u8 valid;
3291 u16 reserved0;
3292#endif
3293 u32 reserved1;
3294};
3295
3296
3297/*
3298 * zone A per-queue data
3299 */
3300struct xstorm_queue_zone_data {
3301 struct regpair reserved[4];
3302};
3303
3304
3305/*
3306 * zone B per-VF data
3307 */
3308struct xstorm_vf_zone_data {
3309 struct regpair reserved;
3310};
3311
3312#endif /* BNX2X_HSI_H */
diff --git a/drivers/net/bnx2x/bnx2x_init.h b/drivers/net/bnx2x/bnx2x_init.h
index 65b26cbfe3e7..a9d54874a559 100644
--- a/drivers/net/bnx2x/bnx2x_init.h
+++ b/drivers/net/bnx2x/bnx2x_init.h
@@ -97,6 +97,9 @@
97#define MISC_AEU_BLOCK 35 97#define MISC_AEU_BLOCK 35
98#define PGLUE_B_BLOCK 36 98#define PGLUE_B_BLOCK 36
99#define IGU_BLOCK 37 99#define IGU_BLOCK 37
100#define ATC_BLOCK 38
101#define QM_4PORT_BLOCK 39
102#define XSEM_4PORT_BLOCK 40
100 103
101 104
102/* Returns the index of start or end of a specific block stage in ops array*/ 105/* Returns the index of start or end of a specific block stage in ops array*/
@@ -148,5 +151,46 @@ union init_op {
148 struct raw_op raw; 151 struct raw_op raw;
149}; 152};
150 153
154#define INITOP_SET 0 /* set the HW directly */
155#define INITOP_CLEAR 1 /* clear the HW directly */
156#define INITOP_INIT 2 /* set the init-value array */
157
158/****************************************************************************
159* ILT management
160****************************************************************************/
161struct ilt_line {
162 dma_addr_t page_mapping;
163 void *page;
164 u32 size;
165};
166
167struct ilt_client_info {
168 u32 page_size;
169 u16 start;
170 u16 end;
171 u16 client_num;
172 u16 flags;
173#define ILT_CLIENT_SKIP_INIT 0x1
174#define ILT_CLIENT_SKIP_MEM 0x2
175};
176
177struct bnx2x_ilt {
178 u32 start_line;
179 struct ilt_line *lines;
180 struct ilt_client_info clients[4];
181#define ILT_CLIENT_CDU 0
182#define ILT_CLIENT_QM 1
183#define ILT_CLIENT_SRC 2
184#define ILT_CLIENT_TM 3
185};
186
187/****************************************************************************
188* SRC configuration
189****************************************************************************/
190struct src_ent {
191 u8 opaque[56];
192 u64 next;
193};
194
151#endif /* BNX2X_INIT_H */ 195#endif /* BNX2X_INIT_H */
152 196
diff --git a/drivers/net/bnx2x/bnx2x_init_ops.h b/drivers/net/bnx2x/bnx2x_init_ops.h
index 2b1363a6fe78..e65de784182c 100644
--- a/drivers/net/bnx2x/bnx2x_init_ops.h
+++ b/drivers/net/bnx2x/bnx2x_init_ops.h
@@ -151,6 +151,15 @@ static void bnx2x_init_wr_wb(struct bnx2x *bp, u32 addr, const u32 *data,
151 bnx2x_init_ind_wr(bp, addr, data, len); 151 bnx2x_init_ind_wr(bp, addr, data, len);
152} 152}
153 153
154static void bnx2x_wr_64(struct bnx2x *bp, u32 reg, u32 val_lo, u32 val_hi)
155{
156 u32 wb_write[2];
157
158 wb_write[0] = val_lo;
159 wb_write[1] = val_hi;
160 REG_WR_DMAE_LEN(bp, reg, wb_write, 2);
161}
162
154static void bnx2x_init_wr_zp(struct bnx2x *bp, u32 addr, u32 len, u32 blob_off) 163static void bnx2x_init_wr_zp(struct bnx2x *bp, u32 addr, u32 len, u32 blob_off)
155{ 164{
156 const u8 *data = NULL; 165 const u8 *data = NULL;
@@ -477,18 +486,30 @@ static void bnx2x_init_pxp_arb(struct bnx2x *bp, int r_order, int w_order)
477 REG_WR(bp, PXP2_REG_RQ_RD_MBS0, r_order); 486 REG_WR(bp, PXP2_REG_RQ_RD_MBS0, r_order);
478 REG_WR(bp, PXP2_REG_RQ_RD_MBS1, r_order); 487 REG_WR(bp, PXP2_REG_RQ_RD_MBS1, r_order);
479 488
480 if (r_order == MAX_RD_ORD) 489 if ((CHIP_IS_E1(bp) || CHIP_IS_E1H(bp)) && (r_order == MAX_RD_ORD))
481 REG_WR(bp, PXP2_REG_RQ_PDR_LIMIT, 0xe00); 490 REG_WR(bp, PXP2_REG_RQ_PDR_LIMIT, 0xe00);
482 491
483 REG_WR(bp, PXP2_REG_WR_USDMDP_TH, (0x18 << w_order)); 492 if (CHIP_IS_E2(bp))
493 REG_WR(bp, PXP2_REG_WR_USDMDP_TH, (0x8 << w_order));
494 else
495 REG_WR(bp, PXP2_REG_WR_USDMDP_TH, (0x18 << w_order));
484 496
485 if (CHIP_IS_E1H(bp)) { 497 if (CHIP_IS_E1H(bp) || CHIP_IS_E2(bp)) {
486 /* MPS w_order optimal TH presently TH 498 /* MPS w_order optimal TH presently TH
487 * 128 0 0 2 499 * 128 0 0 2
488 * 256 1 1 3 500 * 256 1 1 3
489 * >=512 2 2 3 501 * >=512 2 2 3
490 */ 502 */
491 val = ((w_order == 0) ? 2 : 3); 503 /* DMAE is special */
504 if (CHIP_IS_E2(bp)) {
505 /* E2 can use optimal TH */
506 val = w_order;
507 REG_WR(bp, PXP2_REG_WR_DMAE_MPS, val);
508 } else {
509 val = ((w_order == 0) ? 2 : 3);
510 REG_WR(bp, PXP2_REG_WR_DMAE_MPS, 2);
511 }
512
492 REG_WR(bp, PXP2_REG_WR_HC_MPS, val); 513 REG_WR(bp, PXP2_REG_WR_HC_MPS, val);
493 REG_WR(bp, PXP2_REG_WR_USDM_MPS, val); 514 REG_WR(bp, PXP2_REG_WR_USDM_MPS, val);
494 REG_WR(bp, PXP2_REG_WR_CSDM_MPS, val); 515 REG_WR(bp, PXP2_REG_WR_CSDM_MPS, val);
@@ -498,9 +519,344 @@ static void bnx2x_init_pxp_arb(struct bnx2x *bp, int r_order, int w_order)
498 REG_WR(bp, PXP2_REG_WR_TM_MPS, val); 519 REG_WR(bp, PXP2_REG_WR_TM_MPS, val);
499 REG_WR(bp, PXP2_REG_WR_SRC_MPS, val); 520 REG_WR(bp, PXP2_REG_WR_SRC_MPS, val);
500 REG_WR(bp, PXP2_REG_WR_DBG_MPS, val); 521 REG_WR(bp, PXP2_REG_WR_DBG_MPS, val);
501 REG_WR(bp, PXP2_REG_WR_DMAE_MPS, 2); /* DMAE is special */
502 REG_WR(bp, PXP2_REG_WR_CDU_MPS, val); 522 REG_WR(bp, PXP2_REG_WR_CDU_MPS, val);
503 } 523 }
524
525 /* Validate number of tags suppoted by device */
526#define PCIE_REG_PCIER_TL_HDR_FC_ST 0x2980
527 val = REG_RD(bp, PCIE_REG_PCIER_TL_HDR_FC_ST);
528 val &= 0xFF;
529 if (val <= 0x20)
530 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x20);
531}
532
533/****************************************************************************
534* ILT management
535****************************************************************************/
536/*
537 * This codes hides the low level HW interaction for ILT management and
538 * configuration. The API consists of a shadow ILT table which is set by the
539 * driver and a set of routines to use it to configure the HW.
540 *
541 */
542
543/* ILT HW init operations */
544
545/* ILT memory management operations */
546#define ILT_MEMOP_ALLOC 0
547#define ILT_MEMOP_FREE 1
548
549/* the phys address is shifted right 12 bits and has an added
550 * 1=valid bit added to the 53rd bit
551 * then since this is a wide register(TM)
552 * we split it into two 32 bit writes
553 */
554#define ILT_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
555#define ILT_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
556#define ILT_RANGE(f, l) (((l) << 10) | f)
557
558static int bnx2x_ilt_line_mem_op(struct bnx2x *bp, struct ilt_line *line,
559 u32 size, u8 memop)
560{
561 if (memop == ILT_MEMOP_FREE) {
562 BNX2X_ILT_FREE(line->page, line->page_mapping, line->size);
563 return 0;
564 }
565 BNX2X_ILT_ZALLOC(line->page, &line->page_mapping, size);
566 if (!line->page)
567 return -1;
568 line->size = size;
569 return 0;
570}
571
572
573static int bnx2x_ilt_client_mem_op(struct bnx2x *bp, int cli_num, u8 memop)
574{
575 int i, rc;
576 struct bnx2x_ilt *ilt = BP_ILT(bp);
577 struct ilt_client_info *ilt_cli = &ilt->clients[cli_num];
578
579 if (!ilt || !ilt->lines)
580 return -1;
581
582 if (ilt_cli->flags & (ILT_CLIENT_SKIP_INIT | ILT_CLIENT_SKIP_MEM))
583 return 0;
584
585 for (rc = 0, i = ilt_cli->start; i <= ilt_cli->end && !rc; i++) {
586 rc = bnx2x_ilt_line_mem_op(bp, &ilt->lines[i],
587 ilt_cli->page_size, memop);
588 }
589 return rc;
590}
591
592int bnx2x_ilt_mem_op(struct bnx2x *bp, u8 memop)
593{
594 int rc = bnx2x_ilt_client_mem_op(bp, ILT_CLIENT_CDU, memop);
595 if (!rc)
596 rc = bnx2x_ilt_client_mem_op(bp, ILT_CLIENT_QM, memop);
597 if (!rc)
598 rc = bnx2x_ilt_client_mem_op(bp, ILT_CLIENT_SRC, memop);
599 if (!rc)
600 rc = bnx2x_ilt_client_mem_op(bp, ILT_CLIENT_TM, memop);
601
602 return rc;
603}
604
605static void bnx2x_ilt_line_wr(struct bnx2x *bp, int abs_idx,
606 dma_addr_t page_mapping)
607{
608 u32 reg;
609
610 if (CHIP_IS_E1(bp))
611 reg = PXP2_REG_RQ_ONCHIP_AT + abs_idx*8;
612 else
613 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + abs_idx*8;
614
615 bnx2x_wr_64(bp, reg, ILT_ADDR1(page_mapping), ILT_ADDR2(page_mapping));
616}
617
618static void bnx2x_ilt_line_init_op(struct bnx2x *bp, struct bnx2x_ilt *ilt,
619 int idx, u8 initop)
620{
621 dma_addr_t null_mapping;
622 int abs_idx = ilt->start_line + idx;
623
624
625 switch (initop) {
626 case INITOP_INIT:
627 /* set in the init-value array */
628 case INITOP_SET:
629 bnx2x_ilt_line_wr(bp, abs_idx, ilt->lines[idx].page_mapping);
630 break;
631 case INITOP_CLEAR:
632 null_mapping = 0;
633 bnx2x_ilt_line_wr(bp, abs_idx, null_mapping);
634 break;
635 }
636}
637
638void bnx2x_ilt_boundry_init_op(struct bnx2x *bp,
639 struct ilt_client_info *ilt_cli,
640 u32 ilt_start, u8 initop)
641{
642 u32 start_reg = 0;
643 u32 end_reg = 0;
644
645 /* The boundary is either SET or INIT,
646 CLEAR => SET and for now SET ~~ INIT */
647
648 /* find the appropriate regs */
649 if (CHIP_IS_E1(bp)) {
650 switch (ilt_cli->client_num) {
651 case ILT_CLIENT_CDU:
652 start_reg = PXP2_REG_PSWRQ_CDU0_L2P;
653 break;
654 case ILT_CLIENT_QM:
655 start_reg = PXP2_REG_PSWRQ_QM0_L2P;
656 break;
657 case ILT_CLIENT_SRC:
658 start_reg = PXP2_REG_PSWRQ_SRC0_L2P;
659 break;
660 case ILT_CLIENT_TM:
661 start_reg = PXP2_REG_PSWRQ_TM0_L2P;
662 break;
663 }
664 REG_WR(bp, start_reg + BP_FUNC(bp)*4,
665 ILT_RANGE((ilt_start + ilt_cli->start),
666 (ilt_start + ilt_cli->end)));
667 } else {
668 switch (ilt_cli->client_num) {
669 case ILT_CLIENT_CDU:
670 start_reg = PXP2_REG_RQ_CDU_FIRST_ILT;
671 end_reg = PXP2_REG_RQ_CDU_LAST_ILT;
672 break;
673 case ILT_CLIENT_QM:
674 start_reg = PXP2_REG_RQ_QM_FIRST_ILT;
675 end_reg = PXP2_REG_RQ_QM_LAST_ILT;
676 break;
677 case ILT_CLIENT_SRC:
678 start_reg = PXP2_REG_RQ_SRC_FIRST_ILT;
679 end_reg = PXP2_REG_RQ_SRC_LAST_ILT;
680 break;
681 case ILT_CLIENT_TM:
682 start_reg = PXP2_REG_RQ_TM_FIRST_ILT;
683 end_reg = PXP2_REG_RQ_TM_LAST_ILT;
684 break;
685 }
686 REG_WR(bp, start_reg, (ilt_start + ilt_cli->start));
687 REG_WR(bp, end_reg, (ilt_start + ilt_cli->end));
688 }
689}
690
691void bnx2x_ilt_client_init_op_ilt(struct bnx2x *bp, struct bnx2x_ilt *ilt,
692 struct ilt_client_info *ilt_cli, u8 initop)
693{
694 int i;
695
696 if (ilt_cli->flags & ILT_CLIENT_SKIP_INIT)
697 return;
698
699 for (i = ilt_cli->start; i <= ilt_cli->end; i++)
700 bnx2x_ilt_line_init_op(bp, ilt, i, initop);
701
702 /* init/clear the ILT boundries */
703 bnx2x_ilt_boundry_init_op(bp, ilt_cli, ilt->start_line, initop);
704}
705
706void bnx2x_ilt_client_init_op(struct bnx2x *bp,
707 struct ilt_client_info *ilt_cli, u8 initop)
708{
709 struct bnx2x_ilt *ilt = BP_ILT(bp);
710
711 bnx2x_ilt_client_init_op_ilt(bp, ilt, ilt_cli, initop);
712}
713
714static void bnx2x_ilt_client_id_init_op(struct bnx2x *bp,
715 int cli_num, u8 initop)
716{
717 struct bnx2x_ilt *ilt = BP_ILT(bp);
718 struct ilt_client_info *ilt_cli = &ilt->clients[cli_num];
719
720 bnx2x_ilt_client_init_op(bp, ilt_cli, initop);
721}
722
723void bnx2x_ilt_init_op(struct bnx2x *bp, u8 initop)
724{
725 bnx2x_ilt_client_id_init_op(bp, ILT_CLIENT_CDU, initop);
726 bnx2x_ilt_client_id_init_op(bp, ILT_CLIENT_QM, initop);
727 bnx2x_ilt_client_id_init_op(bp, ILT_CLIENT_SRC, initop);
728 bnx2x_ilt_client_id_init_op(bp, ILT_CLIENT_TM, initop);
729}
730
731static void bnx2x_ilt_init_client_psz(struct bnx2x *bp, int cli_num,
732 u32 psz_reg, u8 initop)
733{
734 struct bnx2x_ilt *ilt = BP_ILT(bp);
735 struct ilt_client_info *ilt_cli = &ilt->clients[cli_num];
736
737 if (ilt_cli->flags & ILT_CLIENT_SKIP_INIT)
738 return;
739
740 switch (initop) {
741 case INITOP_INIT:
742 /* set in the init-value array */
743 case INITOP_SET:
744 REG_WR(bp, psz_reg, ILOG2(ilt_cli->page_size >> 12));
745 break;
746 case INITOP_CLEAR:
747 break;
748 }
749}
750
751/*
752 * called during init common stage, ilt clients should be initialized
753 * prioir to calling this function
754 */
755void bnx2x_ilt_init_page_size(struct bnx2x *bp, u8 initop)
756{
757 bnx2x_ilt_init_client_psz(bp, ILT_CLIENT_CDU,
758 PXP2_REG_RQ_CDU_P_SIZE, initop);
759 bnx2x_ilt_init_client_psz(bp, ILT_CLIENT_QM,
760 PXP2_REG_RQ_QM_P_SIZE, initop);
761 bnx2x_ilt_init_client_psz(bp, ILT_CLIENT_SRC,
762 PXP2_REG_RQ_SRC_P_SIZE, initop);
763 bnx2x_ilt_init_client_psz(bp, ILT_CLIENT_TM,
764 PXP2_REG_RQ_TM_P_SIZE, initop);
765}
766
767/****************************************************************************
768* QM initializations
769****************************************************************************/
770#define QM_QUEUES_PER_FUNC 16 /* E1 has 32, but only 16 are used */
771#define QM_INIT_MIN_CID_COUNT 31
772#define QM_INIT(cid_cnt) (cid_cnt > QM_INIT_MIN_CID_COUNT)
773
774/* called during init port stage */
775void bnx2x_qm_init_cid_count(struct bnx2x *bp, int qm_cid_count,
776 u8 initop)
777{
778 int port = BP_PORT(bp);
779
780 if (QM_INIT(qm_cid_count)) {
781 switch (initop) {
782 case INITOP_INIT:
783 /* set in the init-value array */
784 case INITOP_SET:
785 REG_WR(bp, QM_REG_CONNNUM_0 + port*4,
786 qm_cid_count/16 - 1);
787 break;
788 case INITOP_CLEAR:
789 break;
790 }
791 }
792}
793
794static void bnx2x_qm_set_ptr_table(struct bnx2x *bp, int qm_cid_count)
795{
796 int i;
797 u32 wb_data[2];
798
799 wb_data[0] = wb_data[1] = 0;
800
801 for (i = 0; i < 4 * QM_QUEUES_PER_FUNC; i++) {
802 REG_WR(bp, QM_REG_BASEADDR + i*4,
803 qm_cid_count * 4 * (i % QM_QUEUES_PER_FUNC));
804 bnx2x_init_ind_wr(bp, QM_REG_PTRTBL + i*8,
805 wb_data, 2);
806
807 if (CHIP_IS_E1H(bp)) {
808 REG_WR(bp, QM_REG_BASEADDR_EXT_A + i*4,
809 qm_cid_count * 4 * (i % QM_QUEUES_PER_FUNC));
810 bnx2x_init_ind_wr(bp, QM_REG_PTRTBL_EXT_A + i*8,
811 wb_data, 2);
812 }
813 }
814}
815
816/* called during init common stage */
817void bnx2x_qm_init_ptr_table(struct bnx2x *bp, int qm_cid_count,
818 u8 initop)
819{
820 if (!QM_INIT(qm_cid_count))
821 return;
822
823 switch (initop) {
824 case INITOP_INIT:
825 /* set in the init-value array */
826 case INITOP_SET:
827 bnx2x_qm_set_ptr_table(bp, qm_cid_count);
828 break;
829 case INITOP_CLEAR:
830 break;
831 }
832}
833
834/****************************************************************************
835* SRC initializations
836****************************************************************************/
837
838/* called during init func stage */
839void bnx2x_src_init_t2(struct bnx2x *bp, struct src_ent *t2,
840 dma_addr_t t2_mapping, int src_cid_count)
841{
842 int i;
843 int port = BP_PORT(bp);
844
845 /* Initialize T2 */
846 for (i = 0; i < src_cid_count-1; i++)
847 t2[i].next = (u64)(t2_mapping + (i+1)*sizeof(struct src_ent));
848
849 /* tell the searcher where the T2 table is */
850 REG_WR(bp, SRC_REG_COUNTFREE0 + port*4, src_cid_count);
851
852 bnx2x_wr_64(bp, SRC_REG_FIRSTFREE0 + port*16,
853 U64_LO(t2_mapping), U64_HI(t2_mapping));
854
855 bnx2x_wr_64(bp, SRC_REG_LASTFREE0 + port*16,
856 U64_LO((u64)t2_mapping +
857 (src_cid_count-1) * sizeof(struct src_ent)),
858 U64_HI((u64)t2_mapping +
859 (src_cid_count-1) * sizeof(struct src_ent)));
504} 860}
505 861
506#endif /* BNX2X_INIT_OPS_H */ 862#endif /* BNX2X_INIT_OPS_H */
diff --git a/drivers/net/bnx2x/bnx2x_link.c b/drivers/net/bnx2x/bnx2x_link.c
index a07a3a6abd40..3e99bf9c42b9 100644
--- a/drivers/net/bnx2x/bnx2x_link.c
+++ b/drivers/net/bnx2x/bnx2x_link.c
@@ -28,7 +28,7 @@
28 28
29/********************************************************/ 29/********************************************************/
30#define ETH_HLEN 14 30#define ETH_HLEN 14
31#define ETH_OVREHEAD (ETH_HLEN + 8)/* 8 for CRC + VLAN*/ 31#define ETH_OVREHEAD (ETH_HLEN + 8 + 8)/* 16 for CRC + VLAN + LLC */
32#define ETH_MIN_PACKET_SIZE 60 32#define ETH_MIN_PACKET_SIZE 60
33#define ETH_MAX_PACKET_SIZE 1500 33#define ETH_MAX_PACKET_SIZE 1500
34#define ETH_MAX_JUMBO_PACKET_SIZE 9600 34#define ETH_MAX_JUMBO_PACKET_SIZE 9600
@@ -377,9 +377,60 @@ static u8 bnx2x_emac_enable(struct link_params *params,
377 return 0; 377 return 0;
378} 378}
379 379
380static void bnx2x_update_bmac2(struct link_params *params,
381 struct link_vars *vars,
382 u8 is_lb)
383{
384 /*
385 * Set rx control: Strip CRC and enable BigMAC to relay
386 * control packets to the system as well
387 */
388 u32 wb_data[2];
389 struct bnx2x *bp = params->bp;
390 u32 bmac_addr = params->port ? NIG_REG_INGRESS_BMAC1_MEM :
391 NIG_REG_INGRESS_BMAC0_MEM;
392 u32 val = 0x14;
393
394 if (vars->flow_ctrl & BNX2X_FLOW_CTRL_RX)
395 /* Enable BigMAC to react on received Pause packets */
396 val |= (1<<5);
397 wb_data[0] = val;
398 wb_data[1] = 0;
399 REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_RX_CONTROL,
400 wb_data, 2);
401 udelay(30);
402
403 /* Tx control */
404 val = 0xc0;
405 if (vars->flow_ctrl & BNX2X_FLOW_CTRL_TX)
406 val |= 0x800000;
407 wb_data[0] = val;
408 wb_data[1] = 0;
409 REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_TX_CONTROL,
410 wb_data, 2);
411
412 val = 0x8000;
413 wb_data[0] = val;
414 wb_data[1] = 0;
415 REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_TX_PAUSE_CONTROL,
416 wb_data, 2);
380 417
418 /* mac control */
419 val = 0x3; /* Enable RX and TX */
420 if (is_lb) {
421 val |= 0x4; /* Local loopback */
422 DP(NETIF_MSG_LINK, "enable bmac loopback\n");
423 }
381 424
382static u8 bnx2x_bmac_enable(struct link_params *params, struct link_vars *vars, 425 wb_data[0] = val;
426 wb_data[1] = 0;
427 REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_BMAC_CONTROL,
428 wb_data, 2);
429}
430
431
432static u8 bnx2x_bmac1_enable(struct link_params *params,
433 struct link_vars *vars,
383 u8 is_lb) 434 u8 is_lb)
384{ 435{
385 struct bnx2x *bp = params->bp; 436 struct bnx2x *bp = params->bp;
@@ -389,17 +440,7 @@ static u8 bnx2x_bmac_enable(struct link_params *params, struct link_vars *vars,
389 u32 wb_data[2]; 440 u32 wb_data[2];
390 u32 val; 441 u32 val;
391 442
392 DP(NETIF_MSG_LINK, "Enabling BigMAC\n"); 443 DP(NETIF_MSG_LINK, "Enabling BigMAC1\n");
393 /* reset and unreset the BigMac */
394 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
395 (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
396 msleep(1);
397
398 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
399 (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
400
401 /* enable access for bmac registers */
402 REG_WR(bp, NIG_REG_BMAC0_REGS_OUT_EN + port*4, 0x1);
403 444
404 /* XGXS control */ 445 /* XGXS control */
405 wb_data[0] = 0x3c; 446 wb_data[0] = 0x3c;
@@ -479,6 +520,103 @@ static u8 bnx2x_bmac_enable(struct link_params *params, struct link_vars *vars,
479 wb_data, 2); 520 wb_data, 2);
480 } 521 }
481 522
523
524 return 0;
525}
526
527static u8 bnx2x_bmac2_enable(struct link_params *params,
528 struct link_vars *vars,
529 u8 is_lb)
530{
531 struct bnx2x *bp = params->bp;
532 u8 port = params->port;
533 u32 bmac_addr = port ? NIG_REG_INGRESS_BMAC1_MEM :
534 NIG_REG_INGRESS_BMAC0_MEM;
535 u32 wb_data[2];
536
537 DP(NETIF_MSG_LINK, "Enabling BigMAC2\n");
538
539 wb_data[0] = 0;
540 wb_data[1] = 0;
541 REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_BMAC_CONTROL,
542 wb_data, 2);
543 udelay(30);
544
545 /* XGXS control: Reset phy HW, MDIO registers, PHY PLL and BMAC */
546 wb_data[0] = 0x3c;
547 wb_data[1] = 0;
548 REG_WR_DMAE(bp, bmac_addr +
549 BIGMAC2_REGISTER_BMAC_XGXS_CONTROL,
550 wb_data, 2);
551
552 udelay(30);
553
554 /* tx MAC SA */
555 wb_data[0] = ((params->mac_addr[2] << 24) |
556 (params->mac_addr[3] << 16) |
557 (params->mac_addr[4] << 8) |
558 params->mac_addr[5]);
559 wb_data[1] = ((params->mac_addr[0] << 8) |
560 params->mac_addr[1]);
561 REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_TX_SOURCE_ADDR,
562 wb_data, 2);
563
564 udelay(30);
565
566 /* Configure SAFC */
567 wb_data[0] = 0x1000200;
568 wb_data[1] = 0;
569 REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_RX_LLFC_MSG_FLDS,
570 wb_data, 2);
571 udelay(30);
572
573 /* set rx mtu */
574 wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD;
575 wb_data[1] = 0;
576 REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_RX_MAX_SIZE,
577 wb_data, 2);
578 udelay(30);
579
580 /* set tx mtu */
581 wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD;
582 wb_data[1] = 0;
583 REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_TX_MAX_SIZE,
584 wb_data, 2);
585 udelay(30);
586 /* set cnt max size */
587 wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD - 2;
588 wb_data[1] = 0;
589 REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_CNT_MAX_SIZE,
590 wb_data, 2);
591 udelay(30);
592 bnx2x_update_bmac2(params, vars, is_lb);
593
594 return 0;
595}
596
597u8 bnx2x_bmac_enable(struct link_params *params,
598 struct link_vars *vars,
599 u8 is_lb)
600{
601 u8 rc, port = params->port;
602 struct bnx2x *bp = params->bp;
603 u32 val;
604 /* reset and unreset the BigMac */
605 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
606 (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
607 udelay(10);
608
609 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
610 (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
611
612 /* enable access for bmac registers */
613 REG_WR(bp, NIG_REG_BMAC0_REGS_OUT_EN + port*4, 0x1);
614
615 /* Enable BMAC according to BMAC type*/
616 if (CHIP_IS_E2(bp))
617 rc = bnx2x_bmac2_enable(params, vars, is_lb);
618 else
619 rc = bnx2x_bmac1_enable(params, vars, is_lb);
482 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 0x1); 620 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 0x1);
483 REG_WR(bp, NIG_REG_XGXS_LANE_SEL_P0 + port*4, 0x0); 621 REG_WR(bp, NIG_REG_XGXS_LANE_SEL_P0 + port*4, 0x0);
484 REG_WR(bp, NIG_REG_EGRESS_EMAC0_PORT + port*4, 0x0); 622 REG_WR(bp, NIG_REG_EGRESS_EMAC0_PORT + port*4, 0x0);
@@ -493,7 +631,7 @@ static u8 bnx2x_bmac_enable(struct link_params *params, struct link_vars *vars,
493 REG_WR(bp, NIG_REG_BMAC0_OUT_EN + port*4, 0x1); 631 REG_WR(bp, NIG_REG_BMAC0_OUT_EN + port*4, 0x1);
494 632
495 vars->mac_type = MAC_TYPE_BMAC; 633 vars->mac_type = MAC_TYPE_BMAC;
496 return 0; 634 return rc;
497} 635}
498 636
499 637
@@ -519,13 +657,25 @@ static void bnx2x_bmac_rx_disable(struct bnx2x *bp, u8 port)
519 (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port) && 657 (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port) &&
520 nig_bmac_enable) { 658 nig_bmac_enable) {
521 659
522 /* Clear Rx Enable bit in BMAC_CONTROL register */ 660 if (CHIP_IS_E2(bp)) {
523 REG_RD_DMAE(bp, bmac_addr + BIGMAC_REGISTER_BMAC_CONTROL, 661 /* Clear Rx Enable bit in BMAC_CONTROL register */
524 wb_data, 2); 662 REG_RD_DMAE(bp, bmac_addr +
525 wb_data[0] &= ~BMAC_CONTROL_RX_ENABLE; 663 BIGMAC2_REGISTER_BMAC_CONTROL,
526 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_BMAC_CONTROL, 664 wb_data, 2);
527 wb_data, 2); 665 wb_data[0] &= ~BMAC_CONTROL_RX_ENABLE;
528 666 REG_WR_DMAE(bp, bmac_addr +
667 BIGMAC2_REGISTER_BMAC_CONTROL,
668 wb_data, 2);
669 } else {
670 /* Clear Rx Enable bit in BMAC_CONTROL register */
671 REG_RD_DMAE(bp, bmac_addr +
672 BIGMAC_REGISTER_BMAC_CONTROL,
673 wb_data, 2);
674 wb_data[0] &= ~BMAC_CONTROL_RX_ENABLE;
675 REG_WR_DMAE(bp, bmac_addr +
676 BIGMAC_REGISTER_BMAC_CONTROL,
677 wb_data, 2);
678 }
529 msleep(1); 679 msleep(1);
530 } 680 }
531} 681}
@@ -821,23 +971,31 @@ u8 bnx2x_phy_write(struct link_params *params, u8 phy_addr,
821 return -EINVAL; 971 return -EINVAL;
822} 972}
823 973
824static void bnx2x_set_aer_mmd(struct link_params *params, 974static void bnx2x_set_aer_mmd_xgxs(struct link_params *params,
825 struct bnx2x_phy *phy) 975 struct bnx2x_phy *phy)
826{ 976{
827 struct bnx2x *bp = params->bp;
828 u32 ser_lane; 977 u32 ser_lane;
829 u16 offset; 978 u16 offset, aer_val;
830 979 struct bnx2x *bp = params->bp;
831 ser_lane = ((params->lane_config & 980 ser_lane = ((params->lane_config &
832 PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >> 981 PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >>
833 PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT); 982 PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT);
834 983
835 offset = (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) ? 984 offset = phy->addr + ser_lane;
836 (phy->addr + ser_lane) : 0; 985 if (CHIP_IS_E2(bp))
837 986 aer_val = 0x2800 + offset - 1;
987 else
988 aer_val = 0x3800 + offset;
838 CL45_WR_OVER_CL22(bp, phy, 989 CL45_WR_OVER_CL22(bp, phy,
839 MDIO_REG_BANK_AER_BLOCK, 990 MDIO_REG_BANK_AER_BLOCK,
840 MDIO_AER_BLOCK_AER_REG, 0x3800 + offset); 991 MDIO_AER_BLOCK_AER_REG, aer_val);
992}
993static void bnx2x_set_aer_mmd_serdes(struct bnx2x *bp,
994 struct bnx2x_phy *phy)
995{
996 CL45_WR_OVER_CL22(bp, phy,
997 MDIO_REG_BANK_AER_BLOCK,
998 MDIO_AER_BLOCK_AER_REG, 0x3800);
841} 999}
842 1000
843/******************************************************************/ 1001/******************************************************************/
@@ -2046,12 +2204,12 @@ static u8 bnx2x_init_serdes(struct bnx2x_phy *phy,
2046 u8 rc; 2204 u8 rc;
2047 vars->phy_flags |= PHY_SGMII_FLAG; 2205 vars->phy_flags |= PHY_SGMII_FLAG;
2048 bnx2x_calc_ieee_aneg_adv(phy, params, &vars->ieee_fc); 2206 bnx2x_calc_ieee_aneg_adv(phy, params, &vars->ieee_fc);
2049 bnx2x_set_aer_mmd(params, phy); 2207 bnx2x_set_aer_mmd_serdes(params->bp, phy);
2050 rc = bnx2x_reset_unicore(params, phy, 1); 2208 rc = bnx2x_reset_unicore(params, phy, 1);
2051 /* reset the SerDes and wait for reset bit return low */ 2209 /* reset the SerDes and wait for reset bit return low */
2052 if (rc != 0) 2210 if (rc != 0)
2053 return rc; 2211 return rc;
2054 bnx2x_set_aer_mmd(params, phy); 2212 bnx2x_set_aer_mmd_serdes(params->bp, phy);
2055 2213
2056 return rc; 2214 return rc;
2057} 2215}
@@ -2076,7 +2234,7 @@ static u8 bnx2x_init_xgxs(struct bnx2x_phy *phy,
2076 vars->phy_flags &= ~PHY_SGMII_FLAG; 2234 vars->phy_flags &= ~PHY_SGMII_FLAG;
2077 2235
2078 bnx2x_calc_ieee_aneg_adv(phy, params, &vars->ieee_fc); 2236 bnx2x_calc_ieee_aneg_adv(phy, params, &vars->ieee_fc);
2079 bnx2x_set_aer_mmd(params, phy); 2237 bnx2x_set_aer_mmd_xgxs(params, phy);
2080 bnx2x_set_master_ln(params, phy); 2238 bnx2x_set_master_ln(params, phy);
2081 2239
2082 rc = bnx2x_reset_unicore(params, phy, 0); 2240 rc = bnx2x_reset_unicore(params, phy, 0);
@@ -2084,7 +2242,7 @@ static u8 bnx2x_init_xgxs(struct bnx2x_phy *phy,
2084 if (rc != 0) 2242 if (rc != 0)
2085 return rc; 2243 return rc;
2086 2244
2087 bnx2x_set_aer_mmd(params, phy); 2245 bnx2x_set_aer_mmd_xgxs(params, phy);
2088 2246
2089 /* setting the masterLn_def again after the reset */ 2247 /* setting the masterLn_def again after the reset */
2090 bnx2x_set_master_ln(params, phy); 2248 bnx2x_set_master_ln(params, phy);
@@ -2358,7 +2516,7 @@ static void bnx2x_set_xgxs_loopback(struct bnx2x_phy *phy,
2358 0x6041); 2516 0x6041);
2359 msleep(200); 2517 msleep(200);
2360 /* set aer mmd back */ 2518 /* set aer mmd back */
2361 bnx2x_set_aer_mmd(params, phy); 2519 bnx2x_set_aer_mmd_xgxs(params, phy);
2362 2520
2363 /* and md_devad */ 2521 /* and md_devad */
2364 REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_DEVAD + port*0x18, 2522 REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_DEVAD + port*0x18,
@@ -2721,7 +2879,10 @@ static void bnx2x_common_ext_link_reset(struct bnx2x_phy *phy,
2721 struct bnx2x *bp = params->bp; 2879 struct bnx2x *bp = params->bp;
2722 u8 gpio_port; 2880 u8 gpio_port;
2723 /* HW reset */ 2881 /* HW reset */
2724 gpio_port = params->port; 2882 if (CHIP_IS_E2(bp))
2883 gpio_port = BP_PATH(bp);
2884 else
2885 gpio_port = params->port;
2725 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1, 2886 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2726 MISC_REGISTERS_GPIO_OUTPUT_LOW, 2887 MISC_REGISTERS_GPIO_OUTPUT_LOW,
2727 gpio_port); 2888 gpio_port);
@@ -2799,8 +2960,9 @@ static u8 bnx2x_update_link_up(struct link_params *params,
2799 } 2960 }
2800 2961
2801 /* PBF - link up */ 2962 /* PBF - link up */
2802 rc |= bnx2x_pbf_update(params, vars->flow_ctrl, 2963 if (!(CHIP_IS_E2(bp)))
2803 vars->line_speed); 2964 rc |= bnx2x_pbf_update(params, vars->flow_ctrl,
2965 vars->line_speed);
2804 2966
2805 /* disable drain */ 2967 /* disable drain */
2806 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + port*4, 0); 2968 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + port*4, 0);
@@ -3443,7 +3605,10 @@ static u8 bnx2x_8073_config_init(struct bnx2x_phy *phy,
3443 u8 gpio_port; 3605 u8 gpio_port;
3444 DP(NETIF_MSG_LINK, "Init 8073\n"); 3606 DP(NETIF_MSG_LINK, "Init 8073\n");
3445 3607
3446 gpio_port = params->port; 3608 if (CHIP_IS_E2(bp))
3609 gpio_port = BP_PATH(bp);
3610 else
3611 gpio_port = params->port;
3447 /* Restore normal power mode*/ 3612 /* Restore normal power mode*/
3448 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2, 3613 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
3449 MISC_REGISTERS_GPIO_OUTPUT_HIGH, gpio_port); 3614 MISC_REGISTERS_GPIO_OUTPUT_HIGH, gpio_port);
@@ -3680,7 +3845,10 @@ static void bnx2x_8073_link_reset(struct bnx2x_phy *phy,
3680{ 3845{
3681 struct bnx2x *bp = params->bp; 3846 struct bnx2x *bp = params->bp;
3682 u8 gpio_port; 3847 u8 gpio_port;
3683 gpio_port = params->port; 3848 if (CHIP_IS_E2(bp))
3849 gpio_port = BP_PATH(bp);
3850 else
3851 gpio_port = params->port;
3684 DP(NETIF_MSG_LINK, "Setting 8073 port %d into low power mode\n", 3852 DP(NETIF_MSG_LINK, "Setting 8073 port %d into low power mode\n",
3685 gpio_port); 3853 gpio_port);
3686 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2, 3854 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
@@ -4066,6 +4234,7 @@ static u8 bnx2x_verify_sfp_module(struct bnx2x_phy *phy,
4066 "verification\n"); 4234 "verification\n");
4067 return -EINVAL; 4235 return -EINVAL;
4068 } 4236 }
4237
4069 fw_cmd_param = FW_PARAM_SET(phy->addr, phy->type, phy->mdio_ctrl); 4238 fw_cmd_param = FW_PARAM_SET(phy->addr, phy->type, phy->mdio_ctrl);
4070 fw_resp = bnx2x_fw_command(bp, cmd, fw_cmd_param); 4239 fw_resp = bnx2x_fw_command(bp, cmd, fw_cmd_param);
4071 if (fw_resp == FW_MSG_CODE_VRFY_OPT_MDL_SUCCESS) { 4240 if (fw_resp == FW_MSG_CODE_VRFY_OPT_MDL_SUCCESS) {
@@ -6370,7 +6539,10 @@ static u8 bnx2x_populate_int_phy(struct bnx2x *bp, u32 shmem_base, u8 port,
6370 phy->mdio_ctrl = bnx2x_get_emac_base(bp, 6539 phy->mdio_ctrl = bnx2x_get_emac_base(bp,
6371 SHARED_HW_CFG_MDC_MDIO_ACCESS1_BOTH, 6540 SHARED_HW_CFG_MDC_MDIO_ACCESS1_BOTH,
6372 port); 6541 port);
6373 phy->def_md_devad = DEFAULT_PHY_DEV_ADDR; 6542 if (CHIP_IS_E2(bp))
6543 phy->def_md_devad = E2_DEFAULT_PHY_DEV_ADDR;
6544 else
6545 phy->def_md_devad = DEFAULT_PHY_DEV_ADDR;
6374 6546
6375 DP(NETIF_MSG_LINK, "Internal phy port=%d, addr=0x%x, mdio_ctl=0x%x\n", 6547 DP(NETIF_MSG_LINK, "Internal phy port=%d, addr=0x%x, mdio_ctl=0x%x\n",
6376 port, phy->addr, phy->mdio_ctrl); 6548 port, phy->addr, phy->mdio_ctrl);
@@ -6741,7 +6913,9 @@ u8 bnx2x_phy_init(struct link_params *params, struct link_vars *vars)
6741 } 6913 }
6742 6914
6743 bnx2x_emac_enable(params, vars, 0); 6915 bnx2x_emac_enable(params, vars, 0);
6744 bnx2x_pbf_update(params, vars->flow_ctrl, vars->line_speed); 6916 if (!(CHIP_IS_E2(bp)))
6917 bnx2x_pbf_update(params, vars->flow_ctrl,
6918 vars->line_speed);
6745 /* disable drain */ 6919 /* disable drain */
6746 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0); 6920 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0);
6747 6921
@@ -6931,18 +7105,34 @@ u8 bnx2x_link_reset(struct link_params *params, struct link_vars *vars,
6931/****************************************************************************/ 7105/****************************************************************************/
6932/* Common function */ 7106/* Common function */
6933/****************************************************************************/ 7107/****************************************************************************/
6934static u8 bnx2x_8073_common_init_phy(struct bnx2x *bp, u32 shmem_base, u32 shmem2_base, u8 phy_index) 7108static u8 bnx2x_8073_common_init_phy(struct bnx2x *bp,
7109 u32 shmem_base_path[],
7110 u32 shmem2_base_path[], u8 phy_index,
7111 u32 chip_id)
6935{ 7112{
6936 struct bnx2x_phy phy[PORT_MAX]; 7113 struct bnx2x_phy phy[PORT_MAX];
6937 struct bnx2x_phy *phy_blk[PORT_MAX]; 7114 struct bnx2x_phy *phy_blk[PORT_MAX];
6938 u16 val; 7115 u16 val;
6939 s8 port; 7116 s8 port;
7117 s8 port_of_path = 0;
6940 7118
6941 /* PART1 - Reset both phys */ 7119 /* PART1 - Reset both phys */
6942 for (port = PORT_MAX - 1; port >= PORT_0; port--) { 7120 for (port = PORT_MAX - 1; port >= PORT_0; port--) {
7121 u32 shmem_base, shmem2_base;
7122 /* In E2, same phy is using for port0 of the two paths */
7123 if (CHIP_IS_E2(bp)) {
7124 shmem_base = shmem_base_path[port];
7125 shmem2_base = shmem2_base_path[port];
7126 port_of_path = 0;
7127 } else {
7128 shmem_base = shmem_base_path[0];
7129 shmem2_base = shmem2_base_path[0];
7130 port_of_path = port;
7131 }
7132
6943 /* Extract the ext phy address for the port */ 7133 /* Extract the ext phy address for the port */
6944 if (bnx2x_populate_phy(bp, phy_index, shmem_base, shmem2_base, 7134 if (bnx2x_populate_phy(bp, phy_index, shmem_base, shmem2_base,
6945 port, &phy[port]) != 7135 port_of_path, &phy[port]) !=
6946 0) { 7136 0) {
6947 DP(NETIF_MSG_LINK, "populate_phy failed\n"); 7137 DP(NETIF_MSG_LINK, "populate_phy failed\n");
6948 return -EINVAL; 7138 return -EINVAL;
@@ -6980,9 +7170,15 @@ static u8 bnx2x_8073_common_init_phy(struct bnx2x *bp, u32 shmem_base, u32 shmem
6980 /* PART2 - Download firmware to both phys */ 7170 /* PART2 - Download firmware to both phys */
6981 for (port = PORT_MAX - 1; port >= PORT_0; port--) { 7171 for (port = PORT_MAX - 1; port >= PORT_0; port--) {
6982 u16 fw_ver1; 7172 u16 fw_ver1;
7173 if (CHIP_IS_E2(bp))
7174 port_of_path = 0;
7175 else
7176 port_of_path = port;
6983 7177
7178 DP(NETIF_MSG_LINK, "Loading spirom for phy address 0x%x\n",
7179 phy_blk[port]->addr);
6984 bnx2x_8073_8727_external_rom_boot(bp, phy_blk[port], 7180 bnx2x_8073_8727_external_rom_boot(bp, phy_blk[port],
6985 port); 7181 port_of_path);
6986 7182
6987 bnx2x_cl45_read(bp, phy_blk[port], 7183 bnx2x_cl45_read(bp, phy_blk[port],
6988 MDIO_PMA_DEVAD, 7184 MDIO_PMA_DEVAD,
@@ -7038,9 +7234,10 @@ static u8 bnx2x_8073_common_init_phy(struct bnx2x *bp, u32 shmem_base, u32 shmem
7038 } 7234 }
7039 return 0; 7235 return 0;
7040} 7236}
7041 7237static u8 bnx2x_8726_common_init_phy(struct bnx2x *bp,
7042static u8 bnx2x_8726_common_init_phy(struct bnx2x *bp, u32 shmem_base, 7238 u32 shmem_base_path[],
7043 u32 shmem2_base, u8 phy_index) 7239 u32 shmem2_base_path[], u8 phy_index,
7240 u32 chip_id)
7044{ 7241{
7045 u32 val; 7242 u32 val;
7046 s8 port; 7243 s8 port;
@@ -7055,6 +7252,16 @@ static u8 bnx2x_8726_common_init_phy(struct bnx2x *bp, u32 shmem_base,
7055 bnx2x_ext_phy_hw_reset(bp, 1); 7252 bnx2x_ext_phy_hw_reset(bp, 1);
7056 msleep(5); 7253 msleep(5);
7057 for (port = 0; port < PORT_MAX; port++) { 7254 for (port = 0; port < PORT_MAX; port++) {
7255 u32 shmem_base, shmem2_base;
7256
7257 /* In E2, same phy is using for port0 of the two paths */
7258 if (CHIP_IS_E2(bp)) {
7259 shmem_base = shmem_base_path[port];
7260 shmem2_base = shmem2_base_path[port];
7261 } else {
7262 shmem_base = shmem_base_path[0];
7263 shmem2_base = shmem2_base_path[0];
7264 }
7058 /* Extract the ext phy address for the port */ 7265 /* Extract the ext phy address for the port */
7059 if (bnx2x_populate_phy(bp, phy_index, shmem_base, shmem2_base, 7266 if (bnx2x_populate_phy(bp, phy_index, shmem_base, shmem2_base,
7060 port, &phy) != 7267 port, &phy) !=
@@ -7076,14 +7283,16 @@ static u8 bnx2x_8726_common_init_phy(struct bnx2x *bp, u32 shmem_base,
7076 7283
7077 return 0; 7284 return 0;
7078} 7285}
7079static u8 bnx2x_8727_common_init_phy(struct bnx2x *bp, u32 shmem_base, 7286static u8 bnx2x_8727_common_init_phy(struct bnx2x *bp,
7080 u32 shmem2_base, u8 phy_index) 7287 u32 shmem_base_path[],
7288 u32 shmem2_base_path[], u8 phy_index,
7289 u32 chip_id)
7081{ 7290{
7082 s8 port; 7291 s8 port;
7083 u32 swap_val, swap_override; 7292 u32 swap_val, swap_override;
7084 struct bnx2x_phy phy[PORT_MAX]; 7293 struct bnx2x_phy phy[PORT_MAX];
7085 struct bnx2x_phy *phy_blk[PORT_MAX]; 7294 struct bnx2x_phy *phy_blk[PORT_MAX];
7086 DP(NETIF_MSG_LINK, "Executing BCM8727 common init\n"); 7295 s8 port_of_path;
7087 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP); 7296 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
7088 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE); 7297 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
7089 7298
@@ -7098,19 +7307,33 @@ static u8 bnx2x_8727_common_init_phy(struct bnx2x *bp, u32 shmem_base,
7098 7307
7099 /* PART1 - Reset both phys */ 7308 /* PART1 - Reset both phys */
7100 for (port = PORT_MAX - 1; port >= PORT_0; port--) { 7309 for (port = PORT_MAX - 1; port >= PORT_0; port--) {
7310 u32 shmem_base, shmem2_base;
7311
7312 /* In E2, same phy is using for port0 of the two paths */
7313 if (CHIP_IS_E2(bp)) {
7314 shmem_base = shmem_base_path[port];
7315 shmem2_base = shmem2_base_path[port];
7316 port_of_path = 0;
7317 } else {
7318 shmem_base = shmem_base_path[0];
7319 shmem2_base = shmem2_base_path[0];
7320 port_of_path = port;
7321 }
7322
7101 /* Extract the ext phy address for the port */ 7323 /* Extract the ext phy address for the port */
7102 if (bnx2x_populate_phy(bp, phy_index, shmem_base, shmem2_base, 7324 if (bnx2x_populate_phy(bp, phy_index, shmem_base, shmem2_base,
7103 port, &phy[port]) != 7325 port_of_path, &phy[port]) !=
7104 0) { 7326 0) {
7105 DP(NETIF_MSG_LINK, "populate phy failed\n"); 7327 DP(NETIF_MSG_LINK, "populate phy failed\n");
7106 return -EINVAL; 7328 return -EINVAL;
7107 } 7329 }
7108 /* disable attentions */ 7330 /* disable attentions */
7109 bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 7331 bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 +
7110 (NIG_MASK_XGXS0_LINK_STATUS | 7332 port_of_path*4,
7111 NIG_MASK_XGXS0_LINK10G | 7333 (NIG_MASK_XGXS0_LINK_STATUS |
7112 NIG_MASK_SERDES0_LINK_STATUS | 7334 NIG_MASK_XGXS0_LINK10G |
7113 NIG_MASK_MI_INT)); 7335 NIG_MASK_SERDES0_LINK_STATUS |
7336 NIG_MASK_MI_INT));
7114 7337
7115 7338
7116 /* Reset the phy */ 7339 /* Reset the phy */
@@ -7132,9 +7355,14 @@ static u8 bnx2x_8727_common_init_phy(struct bnx2x *bp, u32 shmem_base,
7132 /* PART2 - Download firmware to both phys */ 7355 /* PART2 - Download firmware to both phys */
7133 for (port = PORT_MAX - 1; port >= PORT_0; port--) { 7356 for (port = PORT_MAX - 1; port >= PORT_0; port--) {
7134 u16 fw_ver1; 7357 u16 fw_ver1;
7135 7358 if (CHIP_IS_E2(bp))
7359 port_of_path = 0;
7360 else
7361 port_of_path = port;
7362 DP(NETIF_MSG_LINK, "Loading spirom for phy address 0x%x\n",
7363 phy_blk[port]->addr);
7136 bnx2x_8073_8727_external_rom_boot(bp, phy_blk[port], 7364 bnx2x_8073_8727_external_rom_boot(bp, phy_blk[port],
7137 port); 7365 port_of_path);
7138 bnx2x_cl45_read(bp, phy_blk[port], 7366 bnx2x_cl45_read(bp, phy_blk[port],
7139 MDIO_PMA_DEVAD, 7367 MDIO_PMA_DEVAD,
7140 MDIO_PMA_REG_ROM_VER1, &fw_ver1); 7368 MDIO_PMA_REG_ROM_VER1, &fw_ver1);
@@ -7150,29 +7378,32 @@ static u8 bnx2x_8727_common_init_phy(struct bnx2x *bp, u32 shmem_base,
7150 return 0; 7378 return 0;
7151} 7379}
7152 7380
7153static u8 bnx2x_ext_phy_common_init(struct bnx2x *bp, u32 shmem_base, 7381static u8 bnx2x_ext_phy_common_init(struct bnx2x *bp, u32 shmem_base_path[],
7154 u32 shmem2_base, u8 phy_index, 7382 u32 shmem2_base_path[], u8 phy_index,
7155 u32 ext_phy_type) 7383 u32 ext_phy_type, u32 chip_id)
7156{ 7384{
7157 u8 rc = 0; 7385 u8 rc = 0;
7158 7386
7159 switch (ext_phy_type) { 7387 switch (ext_phy_type) {
7160 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073: 7388 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
7161 rc = bnx2x_8073_common_init_phy(bp, shmem_base, 7389 rc = bnx2x_8073_common_init_phy(bp, shmem_base_path,
7162 shmem2_base, phy_index); 7390 shmem2_base_path,
7391 phy_index, chip_id);
7163 break; 7392 break;
7164 7393
7165 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727: 7394 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
7166 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727_NOC: 7395 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727_NOC:
7167 rc = bnx2x_8727_common_init_phy(bp, shmem_base, 7396 rc = bnx2x_8727_common_init_phy(bp, shmem_base_path,
7168 shmem2_base, phy_index); 7397 shmem2_base_path,
7398 phy_index, chip_id);
7169 break; 7399 break;
7170 7400
7171 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726: 7401 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
7172 /* GPIO1 affects both ports, so there's need to pull 7402 /* GPIO1 affects both ports, so there's need to pull
7173 it for single port alone */ 7403 it for single port alone */
7174 rc = bnx2x_8726_common_init_phy(bp, shmem_base, 7404 rc = bnx2x_8726_common_init_phy(bp, shmem_base_path,
7175 shmem2_base, phy_index); 7405 shmem2_base_path,
7406 phy_index, chip_id);
7176 break; 7407 break;
7177 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE: 7408 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
7178 rc = -EINVAL; 7409 rc = -EINVAL;
@@ -7187,8 +7418,8 @@ static u8 bnx2x_ext_phy_common_init(struct bnx2x *bp, u32 shmem_base,
7187 return rc; 7418 return rc;
7188} 7419}
7189 7420
7190u8 bnx2x_common_init_phy(struct bnx2x *bp, u32 shmem_base, 7421u8 bnx2x_common_init_phy(struct bnx2x *bp, u32 shmem_base_path[],
7191 u32 shmem2_base) 7422 u32 shmem2_base_path[], u32 chip_id)
7192{ 7423{
7193 u8 rc = 0; 7424 u8 rc = 0;
7194 u8 phy_index; 7425 u8 phy_index;
@@ -7202,12 +7433,13 @@ u8 bnx2x_common_init_phy(struct bnx2x *bp, u32 shmem_base,
7202 for (phy_index = EXT_PHY1; phy_index < MAX_PHYS; 7433 for (phy_index = EXT_PHY1; phy_index < MAX_PHYS;
7203 phy_index++) { 7434 phy_index++) {
7204 ext_phy_config = bnx2x_get_ext_phy_config(bp, 7435 ext_phy_config = bnx2x_get_ext_phy_config(bp,
7205 shmem_base, 7436 shmem_base_path[0],
7206 phy_index, 0); 7437 phy_index, 0);
7207 ext_phy_type = XGXS_EXT_PHY_TYPE(ext_phy_config); 7438 ext_phy_type = XGXS_EXT_PHY_TYPE(ext_phy_config);
7208 rc |= bnx2x_ext_phy_common_init(bp, shmem_base, 7439 rc |= bnx2x_ext_phy_common_init(bp, shmem_base_path,
7209 shmem2_base, 7440 shmem2_base_path,
7210 phy_index, ext_phy_type); 7441 phy_index, ext_phy_type,
7442 chip_id);
7211 } 7443 }
7212 return rc; 7444 return rc;
7213} 7445}
diff --git a/drivers/net/bnx2x/bnx2x_link.h b/drivers/net/bnx2x/bnx2x_link.h
index e98ea3d19471..58a4c7199276 100644
--- a/drivers/net/bnx2x/bnx2x_link.h
+++ b/drivers/net/bnx2x/bnx2x_link.h
@@ -22,7 +22,8 @@
22/***********************************************************/ 22/***********************************************************/
23/* Defines */ 23/* Defines */
24/***********************************************************/ 24/***********************************************************/
25#define DEFAULT_PHY_DEV_ADDR 3 25#define DEFAULT_PHY_DEV_ADDR 3
26#define E2_DEFAULT_PHY_DEV_ADDR 5
26 27
27 28
28 29
@@ -315,7 +316,8 @@ u8 bnx2x_test_link(struct link_params *input, struct link_vars *vars,
315 u8 is_serdes); 316 u8 is_serdes);
316 317
317/* One-time initialization for external phy after power up */ 318/* One-time initialization for external phy after power up */
318u8 bnx2x_common_init_phy(struct bnx2x *bp, u32 shmem_base, u32 shmem2_base); 319u8 bnx2x_common_init_phy(struct bnx2x *bp, u32 shmem_base_path[],
320 u32 shmem2_base_path[], u32 chip_id);
319 321
320/* Reset the external PHY using GPIO */ 322/* Reset the external PHY using GPIO */
321void bnx2x_ext_phy_hw_reset(struct bnx2x *bp, u8 port); 323void bnx2x_ext_phy_hw_reset(struct bnx2x *bp, u8 port);
diff --git a/drivers/net/bnx2x/bnx2x_main.c b/drivers/net/bnx2x/bnx2x_main.c
index 67587fe9e358..ff99a2fc0426 100644
--- a/drivers/net/bnx2x/bnx2x_main.c
+++ b/drivers/net/bnx2x/bnx2x_main.c
@@ -23,7 +23,6 @@
23#include <linux/errno.h> 23#include <linux/errno.h>
24#include <linux/ioport.h> 24#include <linux/ioport.h>
25#include <linux/slab.h> 25#include <linux/slab.h>
26#include <linux/vmalloc.h>
27#include <linux/interrupt.h> 26#include <linux/interrupt.h>
28#include <linux/pci.h> 27#include <linux/pci.h>
29#include <linux/init.h> 28#include <linux/init.h>
@@ -57,7 +56,6 @@
57#include "bnx2x_init_ops.h" 56#include "bnx2x_init_ops.h"
58#include "bnx2x_cmn.h" 57#include "bnx2x_cmn.h"
59 58
60
61#include <linux/firmware.h> 59#include <linux/firmware.h>
62#include "bnx2x_fw_file_hdr.h" 60#include "bnx2x_fw_file_hdr.h"
63/* FW files */ 61/* FW files */
@@ -66,8 +64,9 @@
66 __stringify(BCM_5710_FW_MINOR_VERSION) "." \ 64 __stringify(BCM_5710_FW_MINOR_VERSION) "." \
67 __stringify(BCM_5710_FW_REVISION_VERSION) "." \ 65 __stringify(BCM_5710_FW_REVISION_VERSION) "." \
68 __stringify(BCM_5710_FW_ENGINEERING_VERSION) 66 __stringify(BCM_5710_FW_ENGINEERING_VERSION)
69#define FW_FILE_NAME_E1 "bnx2x-e1-" FW_FILE_VERSION ".fw" 67#define FW_FILE_NAME_E1 "bnx2x/bnx2x-e1-" FW_FILE_VERSION ".fw"
70#define FW_FILE_NAME_E1H "bnx2x-e1h-" FW_FILE_VERSION ".fw" 68#define FW_FILE_NAME_E1H "bnx2x/bnx2x-e1h-" FW_FILE_VERSION ".fw"
69#define FW_FILE_NAME_E2 "bnx2x/bnx2x-e2-" FW_FILE_VERSION ".fw"
71 70
72/* Time in jiffies before concluding the transmitter is hung */ 71/* Time in jiffies before concluding the transmitter is hung */
73#define TX_TIMEOUT (5*HZ) 72#define TX_TIMEOUT (5*HZ)
@@ -77,18 +76,20 @@ static char version[] __devinitdata =
77 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; 76 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
78 77
79MODULE_AUTHOR("Eliezer Tamir"); 78MODULE_AUTHOR("Eliezer Tamir");
80MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver"); 79MODULE_DESCRIPTION("Broadcom NetXtreme II "
80 "BCM57710/57711/57711E/57712/57712E Driver");
81MODULE_LICENSE("GPL"); 81MODULE_LICENSE("GPL");
82MODULE_VERSION(DRV_MODULE_VERSION); 82MODULE_VERSION(DRV_MODULE_VERSION);
83MODULE_FIRMWARE(FW_FILE_NAME_E1); 83MODULE_FIRMWARE(FW_FILE_NAME_E1);
84MODULE_FIRMWARE(FW_FILE_NAME_E1H); 84MODULE_FIRMWARE(FW_FILE_NAME_E1H);
85MODULE_FIRMWARE(FW_FILE_NAME_E2);
85 86
86static int multi_mode = 1; 87static int multi_mode = 1;
87module_param(multi_mode, int, 0); 88module_param(multi_mode, int, 0);
88MODULE_PARM_DESC(multi_mode, " Multi queue mode " 89MODULE_PARM_DESC(multi_mode, " Multi queue mode "
89 "(0 Disable; 1 Enable (default))"); 90 "(0 Disable; 1 Enable (default))");
90 91
91static int num_queues; 92int num_queues;
92module_param(num_queues, int, 0); 93module_param(num_queues, int, 0);
93MODULE_PARM_DESC(num_queues, " Number of queues for multi_mode=1" 94MODULE_PARM_DESC(num_queues, " Number of queues for multi_mode=1"
94 " (default is as a number of CPUs)"); 95 " (default is as a number of CPUs)");
@@ -124,6 +125,8 @@ enum bnx2x_board_type {
124 BCM57710 = 0, 125 BCM57710 = 0,
125 BCM57711 = 1, 126 BCM57711 = 1,
126 BCM57711E = 2, 127 BCM57711E = 2,
128 BCM57712 = 3,
129 BCM57712E = 4
127}; 130};
128 131
129/* indexed by board_type, above */ 132/* indexed by board_type, above */
@@ -132,14 +135,24 @@ static struct {
132} board_info[] __devinitdata = { 135} board_info[] __devinitdata = {
133 { "Broadcom NetXtreme II BCM57710 XGb" }, 136 { "Broadcom NetXtreme II BCM57710 XGb" },
134 { "Broadcom NetXtreme II BCM57711 XGb" }, 137 { "Broadcom NetXtreme II BCM57711 XGb" },
135 { "Broadcom NetXtreme II BCM57711E XGb" } 138 { "Broadcom NetXtreme II BCM57711E XGb" },
139 { "Broadcom NetXtreme II BCM57712 XGb" },
140 { "Broadcom NetXtreme II BCM57712E XGb" }
136}; 141};
137 142
143#ifndef PCI_DEVICE_ID_NX2_57712
144#define PCI_DEVICE_ID_NX2_57712 0x1662
145#endif
146#ifndef PCI_DEVICE_ID_NX2_57712E
147#define PCI_DEVICE_ID_NX2_57712E 0x1663
148#endif
138 149
139static DEFINE_PCI_DEVICE_TABLE(bnx2x_pci_tbl) = { 150static DEFINE_PCI_DEVICE_TABLE(bnx2x_pci_tbl) = {
140 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 }, 151 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 },
141 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 }, 152 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 },
142 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E }, 153 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E },
154 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712), BCM57712 },
155 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712E), BCM57712E },
143 { 0 } 156 { 0 }
144}; 157};
145 158
@@ -149,6 +162,244 @@ MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
149* General service functions 162* General service functions
150****************************************************************************/ 163****************************************************************************/
151 164
165static inline void __storm_memset_dma_mapping(struct bnx2x *bp,
166 u32 addr, dma_addr_t mapping)
167{
168 REG_WR(bp, addr, U64_LO(mapping));
169 REG_WR(bp, addr + 4, U64_HI(mapping));
170}
171
172static inline void __storm_memset_fill(struct bnx2x *bp,
173 u32 addr, size_t size, u32 val)
174{
175 int i;
176 for (i = 0; i < size/4; i++)
177 REG_WR(bp, addr + (i * 4), val);
178}
179
180static inline void storm_memset_ustats_zero(struct bnx2x *bp,
181 u8 port, u16 stat_id)
182{
183 size_t size = sizeof(struct ustorm_per_client_stats);
184
185 u32 addr = BAR_USTRORM_INTMEM +
186 USTORM_PER_COUNTER_ID_STATS_OFFSET(port, stat_id);
187
188 __storm_memset_fill(bp, addr, size, 0);
189}
190
191static inline void storm_memset_tstats_zero(struct bnx2x *bp,
192 u8 port, u16 stat_id)
193{
194 size_t size = sizeof(struct tstorm_per_client_stats);
195
196 u32 addr = BAR_TSTRORM_INTMEM +
197 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, stat_id);
198
199 __storm_memset_fill(bp, addr, size, 0);
200}
201
202static inline void storm_memset_xstats_zero(struct bnx2x *bp,
203 u8 port, u16 stat_id)
204{
205 size_t size = sizeof(struct xstorm_per_client_stats);
206
207 u32 addr = BAR_XSTRORM_INTMEM +
208 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, stat_id);
209
210 __storm_memset_fill(bp, addr, size, 0);
211}
212
213
214static inline void storm_memset_spq_addr(struct bnx2x *bp,
215 dma_addr_t mapping, u16 abs_fid)
216{
217 u32 addr = XSEM_REG_FAST_MEMORY +
218 XSTORM_SPQ_PAGE_BASE_OFFSET(abs_fid);
219
220 __storm_memset_dma_mapping(bp, addr, mapping);
221}
222
223static inline void storm_memset_ov(struct bnx2x *bp, u16 ov, u16 abs_fid)
224{
225 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(abs_fid), ov);
226}
227
228static inline void storm_memset_func_cfg(struct bnx2x *bp,
229 struct tstorm_eth_function_common_config *tcfg,
230 u16 abs_fid)
231{
232 size_t size = sizeof(struct tstorm_eth_function_common_config);
233
234 u32 addr = BAR_TSTRORM_INTMEM +
235 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(abs_fid);
236
237 __storm_memset_struct(bp, addr, size, (u32 *)tcfg);
238}
239
240static inline void storm_memset_xstats_flags(struct bnx2x *bp,
241 struct stats_indication_flags *flags,
242 u16 abs_fid)
243{
244 size_t size = sizeof(struct stats_indication_flags);
245
246 u32 addr = BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(abs_fid);
247
248 __storm_memset_struct(bp, addr, size, (u32 *)flags);
249}
250
251static inline void storm_memset_tstats_flags(struct bnx2x *bp,
252 struct stats_indication_flags *flags,
253 u16 abs_fid)
254{
255 size_t size = sizeof(struct stats_indication_flags);
256
257 u32 addr = BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(abs_fid);
258
259 __storm_memset_struct(bp, addr, size, (u32 *)flags);
260}
261
262static inline void storm_memset_ustats_flags(struct bnx2x *bp,
263 struct stats_indication_flags *flags,
264 u16 abs_fid)
265{
266 size_t size = sizeof(struct stats_indication_flags);
267
268 u32 addr = BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(abs_fid);
269
270 __storm_memset_struct(bp, addr, size, (u32 *)flags);
271}
272
273static inline void storm_memset_cstats_flags(struct bnx2x *bp,
274 struct stats_indication_flags *flags,
275 u16 abs_fid)
276{
277 size_t size = sizeof(struct stats_indication_flags);
278
279 u32 addr = BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(abs_fid);
280
281 __storm_memset_struct(bp, addr, size, (u32 *)flags);
282}
283
284static inline void storm_memset_xstats_addr(struct bnx2x *bp,
285 dma_addr_t mapping, u16 abs_fid)
286{
287 u32 addr = BAR_XSTRORM_INTMEM +
288 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid);
289
290 __storm_memset_dma_mapping(bp, addr, mapping);
291}
292
293static inline void storm_memset_tstats_addr(struct bnx2x *bp,
294 dma_addr_t mapping, u16 abs_fid)
295{
296 u32 addr = BAR_TSTRORM_INTMEM +
297 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid);
298
299 __storm_memset_dma_mapping(bp, addr, mapping);
300}
301
302static inline void storm_memset_ustats_addr(struct bnx2x *bp,
303 dma_addr_t mapping, u16 abs_fid)
304{
305 u32 addr = BAR_USTRORM_INTMEM +
306 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid);
307
308 __storm_memset_dma_mapping(bp, addr, mapping);
309}
310
311static inline void storm_memset_cstats_addr(struct bnx2x *bp,
312 dma_addr_t mapping, u16 abs_fid)
313{
314 u32 addr = BAR_CSTRORM_INTMEM +
315 CSTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid);
316
317 __storm_memset_dma_mapping(bp, addr, mapping);
318}
319
320static inline void storm_memset_vf_to_pf(struct bnx2x *bp, u16 abs_fid,
321 u16 pf_id)
322{
323 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_VF_TO_PF_OFFSET(abs_fid),
324 pf_id);
325 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_VF_TO_PF_OFFSET(abs_fid),
326 pf_id);
327 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_VF_TO_PF_OFFSET(abs_fid),
328 pf_id);
329 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_VF_TO_PF_OFFSET(abs_fid),
330 pf_id);
331}
332
333static inline void storm_memset_func_en(struct bnx2x *bp, u16 abs_fid,
334 u8 enable)
335{
336 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(abs_fid),
337 enable);
338 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(abs_fid),
339 enable);
340 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(abs_fid),
341 enable);
342 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(abs_fid),
343 enable);
344}
345
346static inline void storm_memset_eq_data(struct bnx2x *bp,
347 struct event_ring_data *eq_data,
348 u16 pfid)
349{
350 size_t size = sizeof(struct event_ring_data);
351
352 u32 addr = BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_DATA_OFFSET(pfid);
353
354 __storm_memset_struct(bp, addr, size, (u32 *)eq_data);
355}
356
357static inline void storm_memset_eq_prod(struct bnx2x *bp, u16 eq_prod,
358 u16 pfid)
359{
360 u32 addr = BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_PROD_OFFSET(pfid);
361 REG_WR16(bp, addr, eq_prod);
362}
363
364static inline void storm_memset_hc_timeout(struct bnx2x *bp, u8 port,
365 u16 fw_sb_id, u8 sb_index,
366 u8 ticks)
367{
368
369 int index_offset = CHIP_IS_E2(bp) ?
370 offsetof(struct hc_status_block_data_e2, index_data) :
371 offsetof(struct hc_status_block_data_e1x, index_data);
372 u32 addr = BAR_CSTRORM_INTMEM +
373 CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) +
374 index_offset +
375 sizeof(struct hc_index_data)*sb_index +
376 offsetof(struct hc_index_data, timeout);
377 REG_WR8(bp, addr, ticks);
378 DP(NETIF_MSG_HW, "port %x fw_sb_id %d sb_index %d ticks %d\n",
379 port, fw_sb_id, sb_index, ticks);
380}
381static inline void storm_memset_hc_disable(struct bnx2x *bp, u8 port,
382 u16 fw_sb_id, u8 sb_index,
383 u8 disable)
384{
385 u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);
386 int index_offset = CHIP_IS_E2(bp) ?
387 offsetof(struct hc_status_block_data_e2, index_data) :
388 offsetof(struct hc_status_block_data_e1x, index_data);
389 u32 addr = BAR_CSTRORM_INTMEM +
390 CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) +
391 index_offset +
392 sizeof(struct hc_index_data)*sb_index +
393 offsetof(struct hc_index_data, flags);
394 u16 flags = REG_RD16(bp, addr);
395 /* clear and set */
396 flags &= ~HC_INDEX_DATA_HC_ENABLED;
397 flags |= enable_flag;
398 REG_WR16(bp, addr, flags);
399 DP(NETIF_MSG_HW, "port %x fw_sb_id %d sb_index %d disable %d\n",
400 port, fw_sb_id, sb_index, disable);
401}
402
152/* used only at init 403/* used only at init
153 * locking is done by mcp 404 * locking is done by mcp
154 */ 405 */
@@ -172,6 +423,75 @@ static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
172 return val; 423 return val;
173} 424}
174 425
426#define DMAE_DP_SRC_GRC "grc src_addr [%08x]"
427#define DMAE_DP_SRC_PCI "pci src_addr [%x:%08x]"
428#define DMAE_DP_DST_GRC "grc dst_addr [%08x]"
429#define DMAE_DP_DST_PCI "pci dst_addr [%x:%08x]"
430#define DMAE_DP_DST_NONE "dst_addr [none]"
431
432void bnx2x_dp_dmae(struct bnx2x *bp, struct dmae_command *dmae, int msglvl)
433{
434 u32 src_type = dmae->opcode & DMAE_COMMAND_SRC;
435
436 switch (dmae->opcode & DMAE_COMMAND_DST) {
437 case DMAE_CMD_DST_PCI:
438 if (src_type == DMAE_CMD_SRC_PCI)
439 DP(msglvl, "DMAE: opcode 0x%08x\n"
440 "src [%x:%08x], len [%d*4], dst [%x:%08x]\n"
441 "comp_addr [%x:%08x], comp_val 0x%08x\n",
442 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
443 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo,
444 dmae->comp_addr_hi, dmae->comp_addr_lo,
445 dmae->comp_val);
446 else
447 DP(msglvl, "DMAE: opcode 0x%08x\n"
448 "src [%08x], len [%d*4], dst [%x:%08x]\n"
449 "comp_addr [%x:%08x], comp_val 0x%08x\n",
450 dmae->opcode, dmae->src_addr_lo >> 2,
451 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo,
452 dmae->comp_addr_hi, dmae->comp_addr_lo,
453 dmae->comp_val);
454 break;
455 case DMAE_CMD_DST_GRC:
456 if (src_type == DMAE_CMD_SRC_PCI)
457 DP(msglvl, "DMAE: opcode 0x%08x\n"
458 "src [%x:%08x], len [%d*4], dst_addr [%08x]\n"
459 "comp_addr [%x:%08x], comp_val 0x%08x\n",
460 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
461 dmae->len, dmae->dst_addr_lo >> 2,
462 dmae->comp_addr_hi, dmae->comp_addr_lo,
463 dmae->comp_val);
464 else
465 DP(msglvl, "DMAE: opcode 0x%08x\n"
466 "src [%08x], len [%d*4], dst [%08x]\n"
467 "comp_addr [%x:%08x], comp_val 0x%08x\n",
468 dmae->opcode, dmae->src_addr_lo >> 2,
469 dmae->len, dmae->dst_addr_lo >> 2,
470 dmae->comp_addr_hi, dmae->comp_addr_lo,
471 dmae->comp_val);
472 break;
473 default:
474 if (src_type == DMAE_CMD_SRC_PCI)
475 DP(msglvl, "DMAE: opcode 0x%08x\n"
476 DP_LEVEL "src_addr [%x:%08x] len [%d * 4] "
477 "dst_addr [none]\n"
478 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
479 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
480 dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo,
481 dmae->comp_val);
482 else
483 DP(msglvl, "DMAE: opcode 0x%08x\n"
484 DP_LEVEL "src_addr [%08x] len [%d * 4] "
485 "dst_addr [none]\n"
486 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
487 dmae->opcode, dmae->src_addr_lo >> 2,
488 dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo,
489 dmae->comp_val);
490 break;
491 }
492
493}
494
175const u32 dmae_reg_go_c[] = { 495const u32 dmae_reg_go_c[] = {
176 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3, 496 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
177 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7, 497 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
@@ -195,85 +515,137 @@ void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae, int idx)
195 REG_WR(bp, dmae_reg_go_c[idx], 1); 515 REG_WR(bp, dmae_reg_go_c[idx], 1);
196} 516}
197 517
198void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr, 518u32 bnx2x_dmae_opcode_add_comp(u32 opcode, u8 comp_type)
199 u32 len32)
200{ 519{
201 struct dmae_command dmae; 520 return opcode | ((comp_type << DMAE_COMMAND_C_DST_SHIFT) |
202 u32 *wb_comp = bnx2x_sp(bp, wb_comp); 521 DMAE_CMD_C_ENABLE);
203 int cnt = 200; 522}
204 523
205 if (!bp->dmae_ready) { 524u32 bnx2x_dmae_opcode_clr_src_reset(u32 opcode)
206 u32 *data = bnx2x_sp(bp, wb_data[0]); 525{
526 return opcode & ~DMAE_CMD_SRC_RESET;
527}
207 528
208 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)" 529u32 bnx2x_dmae_opcode(struct bnx2x *bp, u8 src_type, u8 dst_type,
209 " using indirect\n", dst_addr, len32); 530 bool with_comp, u8 comp_type)
210 bnx2x_init_ind_wr(bp, dst_addr, data, len32); 531{
211 return; 532 u32 opcode = 0;
212 } 533
534 opcode |= ((src_type << DMAE_COMMAND_SRC_SHIFT) |
535 (dst_type << DMAE_COMMAND_DST_SHIFT));
213 536
214 memset(&dmae, 0, sizeof(struct dmae_command)); 537 opcode |= (DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET);
538
539 opcode |= (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0);
540 opcode |= ((BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT) |
541 (BP_E1HVN(bp) << DMAE_COMMAND_DST_VN_SHIFT));
542 opcode |= (DMAE_COM_SET_ERR << DMAE_COMMAND_ERR_POLICY_SHIFT);
215 543
216 dmae.opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
217 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
218 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
219#ifdef __BIG_ENDIAN 544#ifdef __BIG_ENDIAN
220 DMAE_CMD_ENDIANITY_B_DW_SWAP | 545 opcode |= DMAE_CMD_ENDIANITY_B_DW_SWAP;
221#else 546#else
222 DMAE_CMD_ENDIANITY_DW_SWAP | 547 opcode |= DMAE_CMD_ENDIANITY_DW_SWAP;
223#endif 548#endif
224 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) | 549 if (with_comp)
225 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT)); 550 opcode = bnx2x_dmae_opcode_add_comp(opcode, comp_type);
226 dmae.src_addr_lo = U64_LO(dma_addr); 551 return opcode;
227 dmae.src_addr_hi = U64_HI(dma_addr); 552}
228 dmae.dst_addr_lo = dst_addr >> 2; 553
229 dmae.dst_addr_hi = 0; 554void bnx2x_prep_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae,
230 dmae.len = len32; 555 u8 src_type, u8 dst_type)
231 dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp)); 556{
232 dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp)); 557 memset(dmae, 0, sizeof(struct dmae_command));
233 dmae.comp_val = DMAE_COMP_VAL; 558
234 559 /* set the opcode */
235 DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n" 560 dmae->opcode = bnx2x_dmae_opcode(bp, src_type, dst_type,
236 DP_LEVEL "src_addr [%x:%08x] len [%d *4] " 561 true, DMAE_COMP_PCI);
237 "dst_addr [%x:%08x (%08x)]\n" 562
238 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n", 563 /* fill in the completion parameters */
239 dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo, 564 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
240 dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, dst_addr, 565 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
241 dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val); 566 dmae->comp_val = DMAE_COMP_VAL;
242 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n", 567}
568
569/* issue a dmae command over the init-channel and wailt for completion */
570int bnx2x_issue_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae)
571{
572 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
573 int cnt = CHIP_REV_IS_SLOW(bp) ? (400000) : 40;
574 int rc = 0;
575
576 DP(BNX2X_MSG_OFF, "data before [0x%08x 0x%08x 0x%08x 0x%08x]\n",
243 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1], 577 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
244 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]); 578 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
245 579
580 /* lock the dmae channel */
246 mutex_lock(&bp->dmae_mutex); 581 mutex_lock(&bp->dmae_mutex);
247 582
583 /* reset completion */
248 *wb_comp = 0; 584 *wb_comp = 0;
249 585
250 bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp)); 586 /* post the command on the channel used for initializations */
587 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
251 588
589 /* wait for completion */
252 udelay(5); 590 udelay(5);
253 591 while ((*wb_comp & ~DMAE_PCI_ERR_FLAG) != DMAE_COMP_VAL) {
254 while (*wb_comp != DMAE_COMP_VAL) {
255 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp); 592 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
256 593
257 if (!cnt) { 594 if (!cnt) {
258 BNX2X_ERR("DMAE timeout!\n"); 595 BNX2X_ERR("DMAE timeout!\n");
259 break; 596 rc = DMAE_TIMEOUT;
597 goto unlock;
260 } 598 }
261 cnt--; 599 cnt--;
262 /* adjust delay for emulation/FPGA */ 600 udelay(50);
263 if (CHIP_REV_IS_SLOW(bp)) 601 }
264 msleep(100); 602 if (*wb_comp & DMAE_PCI_ERR_FLAG) {
265 else 603 BNX2X_ERR("DMAE PCI error!\n");
266 udelay(5); 604 rc = DMAE_PCI_ERROR;
267 } 605 }
268 606
607 DP(BNX2X_MSG_OFF, "data after [0x%08x 0x%08x 0x%08x 0x%08x]\n",
608 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
609 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
610
611unlock:
269 mutex_unlock(&bp->dmae_mutex); 612 mutex_unlock(&bp->dmae_mutex);
613 return rc;
614}
615
616void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
617 u32 len32)
618{
619 struct dmae_command dmae;
620
621 if (!bp->dmae_ready) {
622 u32 *data = bnx2x_sp(bp, wb_data[0]);
623
624 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
625 " using indirect\n", dst_addr, len32);
626 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
627 return;
628 }
629
630 /* set opcode and fixed command fields */
631 bnx2x_prep_dmae_with_comp(bp, &dmae, DMAE_SRC_PCI, DMAE_DST_GRC);
632
633 /* fill in addresses and len */
634 dmae.src_addr_lo = U64_LO(dma_addr);
635 dmae.src_addr_hi = U64_HI(dma_addr);
636 dmae.dst_addr_lo = dst_addr >> 2;
637 dmae.dst_addr_hi = 0;
638 dmae.len = len32;
639
640 bnx2x_dp_dmae(bp, &dmae, BNX2X_MSG_OFF);
641
642 /* issue the command and wait for completion */
643 bnx2x_issue_dmae_with_comp(bp, &dmae);
270} 644}
271 645
272void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32) 646void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
273{ 647{
274 struct dmae_command dmae; 648 struct dmae_command dmae;
275 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
276 int cnt = 200;
277 649
278 if (!bp->dmae_ready) { 650 if (!bp->dmae_ready) {
279 u32 *data = bnx2x_sp(bp, wb_data[0]); 651 u32 *data = bnx2x_sp(bp, wb_data[0]);
@@ -286,62 +658,20 @@ void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
286 return; 658 return;
287 } 659 }
288 660
289 memset(&dmae, 0, sizeof(struct dmae_command)); 661 /* set opcode and fixed command fields */
662 bnx2x_prep_dmae_with_comp(bp, &dmae, DMAE_SRC_GRC, DMAE_DST_PCI);
290 663
291 dmae.opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI | 664 /* fill in addresses and len */
292 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
293 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
294#ifdef __BIG_ENDIAN
295 DMAE_CMD_ENDIANITY_B_DW_SWAP |
296#else
297 DMAE_CMD_ENDIANITY_DW_SWAP |
298#endif
299 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
300 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
301 dmae.src_addr_lo = src_addr >> 2; 665 dmae.src_addr_lo = src_addr >> 2;
302 dmae.src_addr_hi = 0; 666 dmae.src_addr_hi = 0;
303 dmae.dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data)); 667 dmae.dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
304 dmae.dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data)); 668 dmae.dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
305 dmae.len = len32; 669 dmae.len = len32;
306 dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
307 dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
308 dmae.comp_val = DMAE_COMP_VAL;
309
310 DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
311 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
312 "dst_addr [%x:%08x (%08x)]\n"
313 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
314 dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo,
315 dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, src_addr,
316 dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val);
317
318 mutex_lock(&bp->dmae_mutex);
319
320 memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
321 *wb_comp = 0;
322 670
323 bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp)); 671 bnx2x_dp_dmae(bp, &dmae, BNX2X_MSG_OFF);
324
325 udelay(5);
326
327 while (*wb_comp != DMAE_COMP_VAL) {
328
329 if (!cnt) {
330 BNX2X_ERR("DMAE timeout!\n");
331 break;
332 }
333 cnt--;
334 /* adjust delay for emulation/FPGA */
335 if (CHIP_REV_IS_SLOW(bp))
336 msleep(100);
337 else
338 udelay(5);
339 }
340 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
341 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
342 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
343 672
344 mutex_unlock(&bp->dmae_mutex); 673 /* issue the command and wait for completion */
674 bnx2x_issue_dmae_with_comp(bp, &dmae);
345} 675}
346 676
347void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr, 677void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
@@ -508,19 +838,24 @@ static void bnx2x_fw_dump(struct bnx2x *bp)
508 u32 mark, offset; 838 u32 mark, offset;
509 __be32 data[9]; 839 __be32 data[9];
510 int word; 840 int word;
511 841 u32 trace_shmem_base;
512 if (BP_NOMCP(bp)) { 842 if (BP_NOMCP(bp)) {
513 BNX2X_ERR("NO MCP - can not dump\n"); 843 BNX2X_ERR("NO MCP - can not dump\n");
514 return; 844 return;
515 } 845 }
516 846
517 addr = bp->common.shmem_base - 0x0800 + 4; 847 if (BP_PATH(bp) == 0)
848 trace_shmem_base = bp->common.shmem_base;
849 else
850 trace_shmem_base = SHMEM2_RD(bp, other_shmem_base_addr);
851 addr = trace_shmem_base - 0x0800 + 4;
518 mark = REG_RD(bp, addr); 852 mark = REG_RD(bp, addr);
519 mark = MCP_REG_MCPR_SCRATCH + ((mark + 0x3) & ~0x3) - 0x08000000; 853 mark = (CHIP_IS_E1x(bp) ? MCP_REG_MCPR_SCRATCH : MCP_A_REG_MCPR_SCRATCH)
854 + ((mark + 0x3) & ~0x3) - 0x08000000;
520 pr_err("begin fw dump (mark 0x%x)\n", mark); 855 pr_err("begin fw dump (mark 0x%x)\n", mark);
521 856
522 pr_err(""); 857 pr_err("");
523 for (offset = mark; offset <= bp->common.shmem_base; offset += 0x8*4) { 858 for (offset = mark; offset <= trace_shmem_base; offset += 0x8*4) {
524 for (word = 0; word < 8; word++) 859 for (word = 0; word < 8; word++)
525 data[word] = htonl(REG_RD(bp, offset + 4*word)); 860 data[word] = htonl(REG_RD(bp, offset + 4*word));
526 data[8] = 0x0; 861 data[8] = 0x0;
@@ -538,7 +873,12 @@ static void bnx2x_fw_dump(struct bnx2x *bp)
538void bnx2x_panic_dump(struct bnx2x *bp) 873void bnx2x_panic_dump(struct bnx2x *bp)
539{ 874{
540 int i; 875 int i;
541 u16 j, start, end; 876 u16 j;
877 struct hc_sp_status_block_data sp_sb_data;
878 int func = BP_FUNC(bp);
879#ifdef BNX2X_STOP_ON_ERROR
880 u16 start = 0, end = 0;
881#endif
542 882
543 bp->stats_state = STATS_STATE_DISABLED; 883 bp->stats_state = STATS_STATE_DISABLED;
544 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n"); 884 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
@@ -547,44 +887,143 @@ void bnx2x_panic_dump(struct bnx2x *bp)
547 887
548 /* Indices */ 888 /* Indices */
549 /* Common */ 889 /* Common */
550 BNX2X_ERR("def_c_idx(0x%x) def_u_idx(0x%x) def_x_idx(0x%x)" 890 BNX2X_ERR("def_idx(0x%x) def_att_idx(0x%x) attn_state(0x%x)"
551 " def_t_idx(0x%x) def_att_idx(0x%x) attn_state(0x%x)"
552 " spq_prod_idx(0x%x)\n", 891 " spq_prod_idx(0x%x)\n",
553 bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx, 892 bp->def_idx, bp->def_att_idx,
554 bp->def_att_idx, bp->attn_state, bp->spq_prod_idx); 893 bp->attn_state, bp->spq_prod_idx);
894 BNX2X_ERR("DSB: attn bits(0x%x) ack(0x%x) id(0x%x) idx(0x%x)\n",
895 bp->def_status_blk->atten_status_block.attn_bits,
896 bp->def_status_blk->atten_status_block.attn_bits_ack,
897 bp->def_status_blk->atten_status_block.status_block_id,
898 bp->def_status_blk->atten_status_block.attn_bits_index);
899 BNX2X_ERR(" def (");
900 for (i = 0; i < HC_SP_SB_MAX_INDICES; i++)
901 pr_cont("0x%x%s",
902 bp->def_status_blk->sp_sb.index_values[i],
903 (i == HC_SP_SB_MAX_INDICES - 1) ? ") " : " ");
904
905 for (i = 0; i < sizeof(struct hc_sp_status_block_data)/sizeof(u32); i++)
906 *((u32 *)&sp_sb_data + i) = REG_RD(bp, BAR_CSTRORM_INTMEM +
907 CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) +
908 i*sizeof(u32));
909
910 pr_cont("igu_sb_id(0x%x) igu_seg_id (0x%x) "
911 "pf_id(0x%x) vnic_id(0x%x) "
912 "vf_id(0x%x) vf_valid (0x%x)\n",
913 sp_sb_data.igu_sb_id,
914 sp_sb_data.igu_seg_id,
915 sp_sb_data.p_func.pf_id,
916 sp_sb_data.p_func.vnic_id,
917 sp_sb_data.p_func.vf_id,
918 sp_sb_data.p_func.vf_valid);
919
555 920
556 /* Rx */
557 for_each_queue(bp, i) { 921 for_each_queue(bp, i) {
558 struct bnx2x_fastpath *fp = &bp->fp[i]; 922 struct bnx2x_fastpath *fp = &bp->fp[i];
559 923 int loop;
924 struct hc_status_block_data_e2 sb_data_e2;
925 struct hc_status_block_data_e1x sb_data_e1x;
926 struct hc_status_block_sm *hc_sm_p =
927 CHIP_IS_E2(bp) ?
928 sb_data_e2.common.state_machine :
929 sb_data_e1x.common.state_machine;
930 struct hc_index_data *hc_index_p =
931 CHIP_IS_E2(bp) ?
932 sb_data_e2.index_data :
933 sb_data_e1x.index_data;
934 int data_size;
935 u32 *sb_data_p;
936
937 /* Rx */
560 BNX2X_ERR("fp%d: rx_bd_prod(0x%x) rx_bd_cons(0x%x)" 938 BNX2X_ERR("fp%d: rx_bd_prod(0x%x) rx_bd_cons(0x%x)"
561 " *rx_bd_cons_sb(0x%x) rx_comp_prod(0x%x)" 939 " rx_comp_prod(0x%x)"
562 " rx_comp_cons(0x%x) *rx_cons_sb(0x%x)\n", 940 " rx_comp_cons(0x%x) *rx_cons_sb(0x%x)\n",
563 i, fp->rx_bd_prod, fp->rx_bd_cons, 941 i, fp->rx_bd_prod, fp->rx_bd_cons,
564 le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod, 942 fp->rx_comp_prod,
565 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb)); 943 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
566 BNX2X_ERR(" rx_sge_prod(0x%x) last_max_sge(0x%x)" 944 BNX2X_ERR(" rx_sge_prod(0x%x) last_max_sge(0x%x)"
567 " fp_u_idx(0x%x) *sb_u_idx(0x%x)\n", 945 " fp_hc_idx(0x%x)\n",
568 fp->rx_sge_prod, fp->last_max_sge, 946 fp->rx_sge_prod, fp->last_max_sge,
569 le16_to_cpu(fp->fp_u_idx), 947 le16_to_cpu(fp->fp_hc_idx));
570 fp->status_blk->u_status_block.status_block_index);
571 }
572
573 /* Tx */
574 for_each_queue(bp, i) {
575 struct bnx2x_fastpath *fp = &bp->fp[i];
576 948
949 /* Tx */
577 BNX2X_ERR("fp%d: tx_pkt_prod(0x%x) tx_pkt_cons(0x%x)" 950 BNX2X_ERR("fp%d: tx_pkt_prod(0x%x) tx_pkt_cons(0x%x)"
578 " tx_bd_prod(0x%x) tx_bd_cons(0x%x)" 951 " tx_bd_prod(0x%x) tx_bd_cons(0x%x)"
579 " *tx_cons_sb(0x%x)\n", 952 " *tx_cons_sb(0x%x)\n",
580 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod, 953 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
581 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb)); 954 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
582 BNX2X_ERR(" fp_c_idx(0x%x) *sb_c_idx(0x%x)" 955
583 " tx_db_prod(0x%x)\n", le16_to_cpu(fp->fp_c_idx), 956 loop = CHIP_IS_E2(bp) ?
584 fp->status_blk->c_status_block.status_block_index, 957 HC_SB_MAX_INDICES_E2 : HC_SB_MAX_INDICES_E1X;
585 fp->tx_db.data.prod); 958
959 /* host sb data */
960
961 BNX2X_ERR(" run indexes (");
962 for (j = 0; j < HC_SB_MAX_SM; j++)
963 pr_cont("0x%x%s",
964 fp->sb_running_index[j],
965 (j == HC_SB_MAX_SM - 1) ? ")" : " ");
966
967 BNX2X_ERR(" indexes (");
968 for (j = 0; j < loop; j++)
969 pr_cont("0x%x%s",
970 fp->sb_index_values[j],
971 (j == loop - 1) ? ")" : " ");
972 /* fw sb data */
973 data_size = CHIP_IS_E2(bp) ?
974 sizeof(struct hc_status_block_data_e2) :
975 sizeof(struct hc_status_block_data_e1x);
976 data_size /= sizeof(u32);
977 sb_data_p = CHIP_IS_E2(bp) ?
978 (u32 *)&sb_data_e2 :
979 (u32 *)&sb_data_e1x;
980 /* copy sb data in here */
981 for (j = 0; j < data_size; j++)
982 *(sb_data_p + j) = REG_RD(bp, BAR_CSTRORM_INTMEM +
983 CSTORM_STATUS_BLOCK_DATA_OFFSET(fp->fw_sb_id) +
984 j * sizeof(u32));
985
986 if (CHIP_IS_E2(bp)) {
987 pr_cont("pf_id(0x%x) vf_id (0x%x) vf_valid(0x%x) "
988 "vnic_id(0x%x) same_igu_sb_1b(0x%x)\n",
989 sb_data_e2.common.p_func.pf_id,
990 sb_data_e2.common.p_func.vf_id,
991 sb_data_e2.common.p_func.vf_valid,
992 sb_data_e2.common.p_func.vnic_id,
993 sb_data_e2.common.same_igu_sb_1b);
994 } else {
995 pr_cont("pf_id(0x%x) vf_id (0x%x) vf_valid(0x%x) "
996 "vnic_id(0x%x) same_igu_sb_1b(0x%x)\n",
997 sb_data_e1x.common.p_func.pf_id,
998 sb_data_e1x.common.p_func.vf_id,
999 sb_data_e1x.common.p_func.vf_valid,
1000 sb_data_e1x.common.p_func.vnic_id,
1001 sb_data_e1x.common.same_igu_sb_1b);
1002 }
1003
1004 /* SB_SMs data */
1005 for (j = 0; j < HC_SB_MAX_SM; j++) {
1006 pr_cont("SM[%d] __flags (0x%x) "
1007 "igu_sb_id (0x%x) igu_seg_id(0x%x) "
1008 "time_to_expire (0x%x) "
1009 "timer_value(0x%x)\n", j,
1010 hc_sm_p[j].__flags,
1011 hc_sm_p[j].igu_sb_id,
1012 hc_sm_p[j].igu_seg_id,
1013 hc_sm_p[j].time_to_expire,
1014 hc_sm_p[j].timer_value);
1015 }
1016
1017 /* Indecies data */
1018 for (j = 0; j < loop; j++) {
1019 pr_cont("INDEX[%d] flags (0x%x) "
1020 "timeout (0x%x)\n", j,
1021 hc_index_p[j].flags,
1022 hc_index_p[j].timeout);
1023 }
586 } 1024 }
587 1025
1026#ifdef BNX2X_STOP_ON_ERROR
588 /* Rings */ 1027 /* Rings */
589 /* Rx */ 1028 /* Rx */
590 for_each_queue(bp, i) { 1029 for_each_queue(bp, i) {
@@ -642,13 +1081,13 @@ void bnx2x_panic_dump(struct bnx2x *bp)
642 i, j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]); 1081 i, j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
643 } 1082 }
644 } 1083 }
645 1084#endif
646 bnx2x_fw_dump(bp); 1085 bnx2x_fw_dump(bp);
647 bnx2x_mc_assert(bp); 1086 bnx2x_mc_assert(bp);
648 BNX2X_ERR("end crash dump -----------------\n"); 1087 BNX2X_ERR("end crash dump -----------------\n");
649} 1088}
650 1089
651void bnx2x_int_enable(struct bnx2x *bp) 1090static void bnx2x_hc_int_enable(struct bnx2x *bp)
652{ 1091{
653 int port = BP_PORT(bp); 1092 int port = BP_PORT(bp);
654 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0; 1093 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
@@ -672,14 +1111,19 @@ void bnx2x_int_enable(struct bnx2x *bp)
672 HC_CONFIG_0_REG_INT_LINE_EN_0 | 1111 HC_CONFIG_0_REG_INT_LINE_EN_0 |
673 HC_CONFIG_0_REG_ATTN_BIT_EN_0); 1112 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
674 1113
675 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n", 1114 if (!CHIP_IS_E1(bp)) {
676 val, port, addr); 1115 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
1116 val, port, addr);
677 1117
678 REG_WR(bp, addr, val); 1118 REG_WR(bp, addr, val);
679 1119
680 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0; 1120 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
1121 }
681 } 1122 }
682 1123
1124 if (CHIP_IS_E1(bp))
1125 REG_WR(bp, HC_REG_INT_MASK + port*4, 0x1FFFF);
1126
683 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) mode %s\n", 1127 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) mode %s\n",
684 val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx"))); 1128 val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
685 1129
@@ -690,9 +1134,9 @@ void bnx2x_int_enable(struct bnx2x *bp)
690 mmiowb(); 1134 mmiowb();
691 barrier(); 1135 barrier();
692 1136
693 if (CHIP_IS_E1H(bp)) { 1137 if (!CHIP_IS_E1(bp)) {
694 /* init leading/trailing edge */ 1138 /* init leading/trailing edge */
695 if (IS_E1HMF(bp)) { 1139 if (IS_MF(bp)) {
696 val = (0xee0f | (1 << (BP_E1HVN(bp) + 4))); 1140 val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
697 if (bp->port.pmf) 1141 if (bp->port.pmf)
698 /* enable nig and gpio3 attention */ 1142 /* enable nig and gpio3 attention */
@@ -708,16 +1152,91 @@ void bnx2x_int_enable(struct bnx2x *bp)
708 mmiowb(); 1152 mmiowb();
709} 1153}
710 1154
711static void bnx2x_int_disable(struct bnx2x *bp) 1155static void bnx2x_igu_int_enable(struct bnx2x *bp)
1156{
1157 u32 val;
1158 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
1159 int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
1160
1161 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
1162
1163 if (msix) {
1164 val &= ~(IGU_PF_CONF_INT_LINE_EN |
1165 IGU_PF_CONF_SINGLE_ISR_EN);
1166 val |= (IGU_PF_CONF_FUNC_EN |
1167 IGU_PF_CONF_MSI_MSIX_EN |
1168 IGU_PF_CONF_ATTN_BIT_EN);
1169 } else if (msi) {
1170 val &= ~IGU_PF_CONF_INT_LINE_EN;
1171 val |= (IGU_PF_CONF_FUNC_EN |
1172 IGU_PF_CONF_MSI_MSIX_EN |
1173 IGU_PF_CONF_ATTN_BIT_EN |
1174 IGU_PF_CONF_SINGLE_ISR_EN);
1175 } else {
1176 val &= ~IGU_PF_CONF_MSI_MSIX_EN;
1177 val |= (IGU_PF_CONF_FUNC_EN |
1178 IGU_PF_CONF_INT_LINE_EN |
1179 IGU_PF_CONF_ATTN_BIT_EN |
1180 IGU_PF_CONF_SINGLE_ISR_EN);
1181 }
1182
1183 DP(NETIF_MSG_INTR, "write 0x%x to IGU mode %s\n",
1184 val, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
1185
1186 REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
1187
1188 barrier();
1189
1190 /* init leading/trailing edge */
1191 if (IS_MF(bp)) {
1192 val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
1193 if (bp->port.pmf)
1194 /* enable nig and gpio3 attention */
1195 val |= 0x1100;
1196 } else
1197 val = 0xffff;
1198
1199 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, val);
1200 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, val);
1201
1202 /* Make sure that interrupts are indeed enabled from here on */
1203 mmiowb();
1204}
1205
1206void bnx2x_int_enable(struct bnx2x *bp)
1207{
1208 if (bp->common.int_block == INT_BLOCK_HC)
1209 bnx2x_hc_int_enable(bp);
1210 else
1211 bnx2x_igu_int_enable(bp);
1212}
1213
1214static void bnx2x_hc_int_disable(struct bnx2x *bp)
712{ 1215{
713 int port = BP_PORT(bp); 1216 int port = BP_PORT(bp);
714 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0; 1217 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
715 u32 val = REG_RD(bp, addr); 1218 u32 val = REG_RD(bp, addr);
716 1219
717 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 | 1220 /*
718 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 | 1221 * in E1 we must use only PCI configuration space to disable
719 HC_CONFIG_0_REG_INT_LINE_EN_0 | 1222 * MSI/MSIX capablility
720 HC_CONFIG_0_REG_ATTN_BIT_EN_0); 1223 * It's forbitten to disable IGU_PF_CONF_MSI_MSIX_EN in HC block
1224 */
1225 if (CHIP_IS_E1(bp)) {
1226 /* Since IGU_PF_CONF_MSI_MSIX_EN still always on
1227 * Use mask register to prevent from HC sending interrupts
1228 * after we exit the function
1229 */
1230 REG_WR(bp, HC_REG_INT_MASK + port*4, 0);
1231
1232 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1233 HC_CONFIG_0_REG_INT_LINE_EN_0 |
1234 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
1235 } else
1236 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1237 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
1238 HC_CONFIG_0_REG_INT_LINE_EN_0 |
1239 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
721 1240
722 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n", 1241 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
723 val, port, addr); 1242 val, port, addr);
@@ -730,6 +1249,32 @@ static void bnx2x_int_disable(struct bnx2x *bp)
730 BNX2X_ERR("BUG! proper val not read from IGU!\n"); 1249 BNX2X_ERR("BUG! proper val not read from IGU!\n");
731} 1250}
732 1251
1252static void bnx2x_igu_int_disable(struct bnx2x *bp)
1253{
1254 u32 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
1255
1256 val &= ~(IGU_PF_CONF_MSI_MSIX_EN |
1257 IGU_PF_CONF_INT_LINE_EN |
1258 IGU_PF_CONF_ATTN_BIT_EN);
1259
1260 DP(NETIF_MSG_INTR, "write %x to IGU\n", val);
1261
1262 /* flush all outstanding writes */
1263 mmiowb();
1264
1265 REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
1266 if (REG_RD(bp, IGU_REG_PF_CONFIGURATION) != val)
1267 BNX2X_ERR("BUG! proper val not read from IGU!\n");
1268}
1269
1270void bnx2x_int_disable(struct bnx2x *bp)
1271{
1272 if (bp->common.int_block == INT_BLOCK_HC)
1273 bnx2x_hc_int_disable(bp);
1274 else
1275 bnx2x_igu_int_disable(bp);
1276}
1277
733void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw) 1278void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
734{ 1279{
735 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0; 1280 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
@@ -800,7 +1345,6 @@ static bool bnx2x_trylock_hw_lock(struct bnx2x *bp, u32 resource)
800 return false; 1345 return false;
801} 1346}
802 1347
803
804#ifdef BCM_CNIC 1348#ifdef BCM_CNIC
805static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid); 1349static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid);
806#endif 1350#endif
@@ -817,76 +1361,35 @@ void bnx2x_sp_event(struct bnx2x_fastpath *fp,
817 fp->index, cid, command, bp->state, 1361 fp->index, cid, command, bp->state,
818 rr_cqe->ramrod_cqe.ramrod_type); 1362 rr_cqe->ramrod_cqe.ramrod_type);
819 1363
820 bp->spq_left++; 1364 switch (command | fp->state) {
821 1365 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP | BNX2X_FP_STATE_OPENING):
822 if (fp->index) { 1366 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n", cid);
823 switch (command | fp->state) { 1367 fp->state = BNX2X_FP_STATE_OPEN;
824 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
825 BNX2X_FP_STATE_OPENING):
826 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
827 cid);
828 fp->state = BNX2X_FP_STATE_OPEN;
829 break;
830
831 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
832 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
833 cid);
834 fp->state = BNX2X_FP_STATE_HALTED;
835 break;
836
837 default:
838 BNX2X_ERR("unexpected MC reply (%d) "
839 "fp[%d] state is %x\n",
840 command, fp->index, fp->state);
841 break;
842 }
843 mb(); /* force bnx2x_wait_ramrod() to see the change */
844 return;
845 }
846
847 switch (command | bp->state) {
848 case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
849 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
850 bp->state = BNX2X_STATE_OPEN;
851 break; 1368 break;
852 1369
853 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT): 1370 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
854 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n"); 1371 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n", cid);
855 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
856 fp->state = BNX2X_FP_STATE_HALTED; 1372 fp->state = BNX2X_FP_STATE_HALTED;
857 break; 1373 break;
858 1374
859 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT): 1375 case (RAMROD_CMD_ID_ETH_TERMINATE | BNX2X_FP_STATE_TERMINATING):
860 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid); 1376 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] teminate ramrod\n", cid);
861 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED; 1377 fp->state = BNX2X_FP_STATE_TERMINATED;
862 break;
863
864#ifdef BCM_CNIC
865 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_OPEN):
866 DP(NETIF_MSG_IFDOWN, "got delete ramrod for CID %d\n", cid);
867 bnx2x_cnic_cfc_comp(bp, cid);
868 break;
869#endif
870
871 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
872 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
873 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
874 bp->set_mac_pending--;
875 smp_wmb();
876 break;
877
878 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
879 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
880 bp->set_mac_pending--;
881 smp_wmb();
882 break; 1378 break;
883 1379
884 default: 1380 default:
885 BNX2X_ERR("unexpected MC reply (%d) bp->state is %x\n", 1381 BNX2X_ERR("unexpected MC reply (%d) "
886 command, bp->state); 1382 "fp[%d] state is %x\n",
1383 command, fp->index, fp->state);
887 break; 1384 break;
888 } 1385 }
889 mb(); /* force bnx2x_wait_ramrod() to see the change */ 1386
1387 smp_mb__before_atomic_inc();
1388 atomic_inc(&bp->spq_left);
1389 /* push the change in fp->state and towards the memory */
1390 smp_wmb();
1391
1392 return;
890} 1393}
891 1394
892irqreturn_t bnx2x_interrupt(int irq, void *dev_instance) 1395irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
@@ -914,25 +1417,22 @@ irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
914 return IRQ_HANDLED; 1417 return IRQ_HANDLED;
915#endif 1418#endif
916 1419
917 for (i = 0; i < BNX2X_NUM_QUEUES(bp); i++) { 1420 for_each_queue(bp, i) {
918 struct bnx2x_fastpath *fp = &bp->fp[i]; 1421 struct bnx2x_fastpath *fp = &bp->fp[i];
919 1422
920 mask = 0x2 << fp->sb_id; 1423 mask = 0x2 << (fp->index + CNIC_CONTEXT_USE);
921 if (status & mask) { 1424 if (status & mask) {
922 /* Handle Rx and Tx according to SB id */ 1425 /* Handle Rx and Tx according to SB id */
923 prefetch(fp->rx_cons_sb); 1426 prefetch(fp->rx_cons_sb);
924 prefetch(&fp->status_blk->u_status_block.
925 status_block_index);
926 prefetch(fp->tx_cons_sb); 1427 prefetch(fp->tx_cons_sb);
927 prefetch(&fp->status_blk->c_status_block. 1428 prefetch(&fp->sb_running_index[SM_RX_ID]);
928 status_block_index);
929 napi_schedule(&bnx2x_fp(bp, fp->index, napi)); 1429 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
930 status &= ~mask; 1430 status &= ~mask;
931 } 1431 }
932 } 1432 }
933 1433
934#ifdef BCM_CNIC 1434#ifdef BCM_CNIC
935 mask = 0x2 << CNIC_SB_ID(bp); 1435 mask = 0x2;
936 if (status & (mask | 0x1)) { 1436 if (status & (mask | 0x1)) {
937 struct cnic_ops *c_ops = NULL; 1437 struct cnic_ops *c_ops = NULL;
938 1438
@@ -1273,12 +1773,12 @@ void bnx2x_calc_fc_adv(struct bnx2x *bp)
1273 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) { 1773 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
1274 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE: 1774 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
1275 bp->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause | 1775 bp->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause |
1276 ADVERTISED_Pause); 1776 ADVERTISED_Pause);
1277 break; 1777 break;
1278 1778
1279 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH: 1779 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
1280 bp->port.advertising[cfg_idx] |= (ADVERTISED_Asym_Pause | 1780 bp->port.advertising[cfg_idx] |= (ADVERTISED_Asym_Pause |
1281 ADVERTISED_Pause); 1781 ADVERTISED_Pause);
1282 break; 1782 break;
1283 1783
1284 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC: 1784 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
@@ -1287,12 +1787,11 @@ void bnx2x_calc_fc_adv(struct bnx2x *bp)
1287 1787
1288 default: 1788 default:
1289 bp->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause | 1789 bp->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause |
1290 ADVERTISED_Pause); 1790 ADVERTISED_Pause);
1291 break; 1791 break;
1292 } 1792 }
1293} 1793}
1294 1794
1295
1296u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode) 1795u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
1297{ 1796{
1298 if (!BP_NOMCP(bp)) { 1797 if (!BP_NOMCP(bp)) {
@@ -1302,7 +1801,7 @@ u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
1302 /* Initialize link parameters structure variables */ 1801 /* Initialize link parameters structure variables */
1303 /* It is recommended to turn off RX FC for jumbo frames 1802 /* It is recommended to turn off RX FC for jumbo frames
1304 for better performance */ 1803 for better performance */
1305 if (bp->dev->mtu > 5000) 1804 if ((CHIP_IS_E1x(bp)) && (bp->dev->mtu > 5000))
1306 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX; 1805 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
1307 else 1806 else
1308 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH; 1807 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
@@ -1416,13 +1915,11 @@ static void bnx2x_init_port_minmax(struct bnx2x *bp)
1416static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp) 1915static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
1417{ 1916{
1418 int all_zero = 1; 1917 int all_zero = 1;
1419 int port = BP_PORT(bp);
1420 int vn; 1918 int vn;
1421 1919
1422 bp->vn_weight_sum = 0; 1920 bp->vn_weight_sum = 0;
1423 for (vn = VN_0; vn < E1HVN_MAX; vn++) { 1921 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
1424 int func = 2*vn + port; 1922 u32 vn_cfg = bp->mf_config[vn];
1425 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
1426 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >> 1923 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
1427 FUNC_MF_CFG_MIN_BW_SHIFT) * 100; 1924 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
1428 1925
@@ -1450,11 +1947,12 @@ static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
1450 CMNG_FLAGS_PER_PORT_FAIRNESS_VN; 1947 CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
1451} 1948}
1452 1949
1453static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func) 1950static void bnx2x_init_vn_minmax(struct bnx2x *bp, int vn)
1454{ 1951{
1455 struct rate_shaping_vars_per_vn m_rs_vn; 1952 struct rate_shaping_vars_per_vn m_rs_vn;
1456 struct fairness_vars_per_vn m_fair_vn; 1953 struct fairness_vars_per_vn m_fair_vn;
1457 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config); 1954 u32 vn_cfg = bp->mf_config[vn];
1955 int func = 2*vn + BP_PORT(bp);
1458 u16 vn_min_rate, vn_max_rate; 1956 u16 vn_min_rate, vn_max_rate;
1459 int i; 1957 int i;
1460 1958
@@ -1467,11 +1965,12 @@ static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
1467 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >> 1965 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
1468 FUNC_MF_CFG_MIN_BW_SHIFT) * 100; 1966 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
1469 /* If min rate is zero - set it to 1 */ 1967 /* If min rate is zero - set it to 1 */
1470 if (!vn_min_rate) 1968 if (bp->vn_weight_sum && (vn_min_rate == 0))
1471 vn_min_rate = DEF_MIN_RATE; 1969 vn_min_rate = DEF_MIN_RATE;
1472 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >> 1970 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
1473 FUNC_MF_CFG_MAX_BW_SHIFT) * 100; 1971 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
1474 } 1972 }
1973
1475 DP(NETIF_MSG_IFUP, 1974 DP(NETIF_MSG_IFUP,
1476 "func %d: vn_min_rate %d vn_max_rate %d vn_weight_sum %d\n", 1975 "func %d: vn_min_rate %d vn_max_rate %d vn_weight_sum %d\n",
1477 func, vn_min_rate, vn_max_rate, bp->vn_weight_sum); 1976 func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
@@ -1512,6 +2011,83 @@ static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
1512 ((u32 *)(&m_fair_vn))[i]); 2011 ((u32 *)(&m_fair_vn))[i]);
1513} 2012}
1514 2013
2014static int bnx2x_get_cmng_fns_mode(struct bnx2x *bp)
2015{
2016 if (CHIP_REV_IS_SLOW(bp))
2017 return CMNG_FNS_NONE;
2018 if (IS_MF(bp))
2019 return CMNG_FNS_MINMAX;
2020
2021 return CMNG_FNS_NONE;
2022}
2023
2024static void bnx2x_read_mf_cfg(struct bnx2x *bp)
2025{
2026 int vn;
2027
2028 if (BP_NOMCP(bp))
2029 return; /* what should be the default bvalue in this case */
2030
2031 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2032 int /*abs*/func = 2*vn + BP_PORT(bp);
2033 bp->mf_config[vn] =
2034 MF_CFG_RD(bp, func_mf_config[func].config);
2035 }
2036}
2037
2038static void bnx2x_cmng_fns_init(struct bnx2x *bp, u8 read_cfg, u8 cmng_type)
2039{
2040
2041 if (cmng_type == CMNG_FNS_MINMAX) {
2042 int vn;
2043
2044 /* clear cmng_enables */
2045 bp->cmng.flags.cmng_enables = 0;
2046
2047 /* read mf conf from shmem */
2048 if (read_cfg)
2049 bnx2x_read_mf_cfg(bp);
2050
2051 /* Init rate shaping and fairness contexts */
2052 bnx2x_init_port_minmax(bp);
2053
2054 /* vn_weight_sum and enable fairness if not 0 */
2055 bnx2x_calc_vn_weight_sum(bp);
2056
2057 /* calculate and set min-max rate for each vn */
2058 for (vn = VN_0; vn < E1HVN_MAX; vn++)
2059 bnx2x_init_vn_minmax(bp, vn);
2060
2061 /* always enable rate shaping and fairness */
2062 bp->cmng.flags.cmng_enables |=
2063 CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
2064 if (!bp->vn_weight_sum)
2065 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
2066 " fairness will be disabled\n");
2067 return;
2068 }
2069
2070 /* rate shaping and fairness are disabled */
2071 DP(NETIF_MSG_IFUP,
2072 "rate shaping and fairness are disabled\n");
2073}
2074
2075static inline void bnx2x_link_sync_notify(struct bnx2x *bp)
2076{
2077 int port = BP_PORT(bp);
2078 int func;
2079 int vn;
2080
2081 /* Set the attention towards other drivers on the same port */
2082 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2083 if (vn == BP_E1HVN(bp))
2084 continue;
2085
2086 func = ((vn << 1) | port);
2087 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2088 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2089 }
2090}
1515 2091
1516/* This function is called upon link interrupt */ 2092/* This function is called upon link interrupt */
1517static void bnx2x_link_attn(struct bnx2x *bp) 2093static void bnx2x_link_attn(struct bnx2x *bp)
@@ -1525,7 +2101,7 @@ static void bnx2x_link_attn(struct bnx2x *bp)
1525 if (bp->link_vars.link_up) { 2101 if (bp->link_vars.link_up) {
1526 2102
1527 /* dropless flow control */ 2103 /* dropless flow control */
1528 if (CHIP_IS_E1H(bp) && bp->dropless_fc) { 2104 if (!CHIP_IS_E1(bp) && bp->dropless_fc) {
1529 int port = BP_PORT(bp); 2105 int port = BP_PORT(bp);
1530 u32 pause_enabled = 0; 2106 u32 pause_enabled = 0;
1531 2107
@@ -1553,37 +2129,19 @@ static void bnx2x_link_attn(struct bnx2x *bp)
1553 if (prev_link_status != bp->link_vars.link_status) 2129 if (prev_link_status != bp->link_vars.link_status)
1554 bnx2x_link_report(bp); 2130 bnx2x_link_report(bp);
1555 2131
1556 if (IS_E1HMF(bp)) { 2132 if (IS_MF(bp))
1557 int port = BP_PORT(bp); 2133 bnx2x_link_sync_notify(bp);
1558 int func;
1559 int vn;
1560
1561 /* Set the attention towards other drivers on the same port */
1562 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
1563 if (vn == BP_E1HVN(bp))
1564 continue;
1565
1566 func = ((vn << 1) | port);
1567 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
1568 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
1569 }
1570
1571 if (bp->link_vars.link_up) {
1572 int i;
1573 2134
1574 /* Init rate shaping and fairness contexts */ 2135 if (bp->link_vars.link_up && bp->link_vars.line_speed) {
1575 bnx2x_init_port_minmax(bp); 2136 int cmng_fns = bnx2x_get_cmng_fns_mode(bp);
1576 2137
1577 for (vn = VN_0; vn < E1HVN_MAX; vn++) 2138 if (cmng_fns != CMNG_FNS_NONE) {
1578 bnx2x_init_vn_minmax(bp, 2*vn + port); 2139 bnx2x_cmng_fns_init(bp, false, cmng_fns);
1579 2140 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
1580 /* Store it to internal memory */ 2141 } else
1581 for (i = 0; 2142 /* rate shaping and fairness are disabled */
1582 i < sizeof(struct cmng_struct_per_port) / 4; i++) 2143 DP(NETIF_MSG_IFUP,
1583 REG_WR(bp, BAR_XSTRORM_INTMEM + 2144 "single function mode without fairness\n");
1584 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
1585 ((u32 *)(&bp->cmng))[i]);
1586 }
1587 } 2145 }
1588} 2146}
1589 2147
@@ -1599,7 +2157,9 @@ void bnx2x__link_status_update(struct bnx2x *bp)
1599 else 2157 else
1600 bnx2x_stats_handle(bp, STATS_EVENT_STOP); 2158 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
1601 2159
1602 bnx2x_calc_vn_weight_sum(bp); 2160 /* the link status update could be the result of a DCC event
2161 hence re-read the shmem mf configuration */
2162 bnx2x_read_mf_cfg(bp);
1603 2163
1604 /* indicate link status */ 2164 /* indicate link status */
1605 bnx2x_link_report(bp); 2165 bnx2x_link_report(bp);
@@ -1615,8 +2175,13 @@ static void bnx2x_pmf_update(struct bnx2x *bp)
1615 2175
1616 /* enable nig attention */ 2176 /* enable nig attention */
1617 val = (0xff0f | (1 << (BP_E1HVN(bp) + 4))); 2177 val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
1618 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val); 2178 if (bp->common.int_block == INT_BLOCK_HC) {
1619 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val); 2179 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2180 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
2181 } else if (CHIP_IS_E2(bp)) {
2182 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, val);
2183 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, val);
2184 }
1620 2185
1621 bnx2x_stats_handle(bp, STATS_EVENT_PMF); 2186 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
1622} 2187}
@@ -1632,22 +2197,23 @@ static void bnx2x_pmf_update(struct bnx2x *bp)
1632/* send the MCP a request, block until there is a reply */ 2197/* send the MCP a request, block until there is a reply */
1633u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param) 2198u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param)
1634{ 2199{
1635 int func = BP_FUNC(bp); 2200 int mb_idx = BP_FW_MB_IDX(bp);
1636 u32 seq = ++bp->fw_seq; 2201 u32 seq = ++bp->fw_seq;
1637 u32 rc = 0; 2202 u32 rc = 0;
1638 u32 cnt = 1; 2203 u32 cnt = 1;
1639 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10; 2204 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
1640 2205
1641 mutex_lock(&bp->fw_mb_mutex); 2206 mutex_lock(&bp->fw_mb_mutex);
1642 SHMEM_WR(bp, func_mb[func].drv_mb_param, param); 2207 SHMEM_WR(bp, func_mb[mb_idx].drv_mb_param, param);
1643 SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq)); 2208 SHMEM_WR(bp, func_mb[mb_idx].drv_mb_header, (command | seq));
2209
1644 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq)); 2210 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
1645 2211
1646 do { 2212 do {
1647 /* let the FW do it's magic ... */ 2213 /* let the FW do it's magic ... */
1648 msleep(delay); 2214 msleep(delay);
1649 2215
1650 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header); 2216 rc = SHMEM_RD(bp, func_mb[mb_idx].fw_mb_header);
1651 2217
1652 /* Give the FW up to 5 second (500*10ms) */ 2218 /* Give the FW up to 5 second (500*10ms) */
1653 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500)); 2219 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500));
@@ -1669,6 +2235,315 @@ u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param)
1669 return rc; 2235 return rc;
1670} 2236}
1671 2237
2238/* must be called under rtnl_lock */
2239void bnx2x_rxq_set_mac_filters(struct bnx2x *bp, u16 cl_id, u32 filters)
2240{
2241 u32 mask = (1 << cl_id);
2242
2243 /* initial seeting is BNX2X_ACCEPT_NONE */
2244 u8 drop_all_ucast = 1, drop_all_bcast = 1, drop_all_mcast = 1;
2245 u8 accp_all_ucast = 0, accp_all_bcast = 0, accp_all_mcast = 0;
2246 u8 unmatched_unicast = 0;
2247
2248 if (filters & BNX2X_PROMISCUOUS_MODE) {
2249 /* promiscious - accept all, drop none */
2250 drop_all_ucast = drop_all_bcast = drop_all_mcast = 0;
2251 accp_all_ucast = accp_all_bcast = accp_all_mcast = 1;
2252 }
2253 if (filters & BNX2X_ACCEPT_UNICAST) {
2254 /* accept matched ucast */
2255 drop_all_ucast = 0;
2256 }
2257 if (filters & BNX2X_ACCEPT_MULTICAST) {
2258 /* accept matched mcast */
2259 drop_all_mcast = 0;
2260 }
2261 if (filters & BNX2X_ACCEPT_ALL_UNICAST) {
2262 /* accept all mcast */
2263 drop_all_ucast = 0;
2264 accp_all_ucast = 1;
2265 }
2266 if (filters & BNX2X_ACCEPT_ALL_MULTICAST) {
2267 /* accept all mcast */
2268 drop_all_mcast = 0;
2269 accp_all_mcast = 1;
2270 }
2271 if (filters & BNX2X_ACCEPT_BROADCAST) {
2272 /* accept (all) bcast */
2273 drop_all_bcast = 0;
2274 accp_all_bcast = 1;
2275 }
2276
2277 bp->mac_filters.ucast_drop_all = drop_all_ucast ?
2278 bp->mac_filters.ucast_drop_all | mask :
2279 bp->mac_filters.ucast_drop_all & ~mask;
2280
2281 bp->mac_filters.mcast_drop_all = drop_all_mcast ?
2282 bp->mac_filters.mcast_drop_all | mask :
2283 bp->mac_filters.mcast_drop_all & ~mask;
2284
2285 bp->mac_filters.bcast_drop_all = drop_all_bcast ?
2286 bp->mac_filters.bcast_drop_all | mask :
2287 bp->mac_filters.bcast_drop_all & ~mask;
2288
2289 bp->mac_filters.ucast_accept_all = accp_all_ucast ?
2290 bp->mac_filters.ucast_accept_all | mask :
2291 bp->mac_filters.ucast_accept_all & ~mask;
2292
2293 bp->mac_filters.mcast_accept_all = accp_all_mcast ?
2294 bp->mac_filters.mcast_accept_all | mask :
2295 bp->mac_filters.mcast_accept_all & ~mask;
2296
2297 bp->mac_filters.bcast_accept_all = accp_all_bcast ?
2298 bp->mac_filters.bcast_accept_all | mask :
2299 bp->mac_filters.bcast_accept_all & ~mask;
2300
2301 bp->mac_filters.unmatched_unicast = unmatched_unicast ?
2302 bp->mac_filters.unmatched_unicast | mask :
2303 bp->mac_filters.unmatched_unicast & ~mask;
2304}
2305
2306void bnx2x_func_init(struct bnx2x *bp, struct bnx2x_func_init_params *p)
2307{
2308 struct tstorm_eth_function_common_config tcfg = {0};
2309 u16 rss_flgs;
2310
2311 /* tpa */
2312 if (p->func_flgs & FUNC_FLG_TPA)
2313 tcfg.config_flags |=
2314 TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA;
2315
2316 /* set rss flags */
2317 rss_flgs = (p->rss->mode <<
2318 TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_MODE_SHIFT);
2319
2320 if (p->rss->cap & RSS_IPV4_CAP)
2321 rss_flgs |= RSS_IPV4_CAP_MASK;
2322 if (p->rss->cap & RSS_IPV4_TCP_CAP)
2323 rss_flgs |= RSS_IPV4_TCP_CAP_MASK;
2324 if (p->rss->cap & RSS_IPV6_CAP)
2325 rss_flgs |= RSS_IPV6_CAP_MASK;
2326 if (p->rss->cap & RSS_IPV6_TCP_CAP)
2327 rss_flgs |= RSS_IPV6_TCP_CAP_MASK;
2328
2329 tcfg.config_flags |= rss_flgs;
2330 tcfg.rss_result_mask = p->rss->result_mask;
2331
2332 storm_memset_func_cfg(bp, &tcfg, p->func_id);
2333
2334 /* Enable the function in the FW */
2335 storm_memset_vf_to_pf(bp, p->func_id, p->pf_id);
2336 storm_memset_func_en(bp, p->func_id, 1);
2337
2338 /* statistics */
2339 if (p->func_flgs & FUNC_FLG_STATS) {
2340 struct stats_indication_flags stats_flags = {0};
2341 stats_flags.collect_eth = 1;
2342
2343 storm_memset_xstats_flags(bp, &stats_flags, p->func_id);
2344 storm_memset_xstats_addr(bp, p->fw_stat_map, p->func_id);
2345
2346 storm_memset_tstats_flags(bp, &stats_flags, p->func_id);
2347 storm_memset_tstats_addr(bp, p->fw_stat_map, p->func_id);
2348
2349 storm_memset_ustats_flags(bp, &stats_flags, p->func_id);
2350 storm_memset_ustats_addr(bp, p->fw_stat_map, p->func_id);
2351
2352 storm_memset_cstats_flags(bp, &stats_flags, p->func_id);
2353 storm_memset_cstats_addr(bp, p->fw_stat_map, p->func_id);
2354 }
2355
2356 /* spq */
2357 if (p->func_flgs & FUNC_FLG_SPQ) {
2358 storm_memset_spq_addr(bp, p->spq_map, p->func_id);
2359 REG_WR(bp, XSEM_REG_FAST_MEMORY +
2360 XSTORM_SPQ_PROD_OFFSET(p->func_id), p->spq_prod);
2361 }
2362}
2363
2364static inline u16 bnx2x_get_cl_flags(struct bnx2x *bp,
2365 struct bnx2x_fastpath *fp)
2366{
2367 u16 flags = 0;
2368
2369 /* calculate queue flags */
2370 flags |= QUEUE_FLG_CACHE_ALIGN;
2371 flags |= QUEUE_FLG_HC;
2372 flags |= IS_MF(bp) ? QUEUE_FLG_OV : 0;
2373
2374 flags |= QUEUE_FLG_VLAN;
2375 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
2376
2377 if (!fp->disable_tpa)
2378 flags |= QUEUE_FLG_TPA;
2379
2380 flags |= QUEUE_FLG_STATS;
2381
2382 return flags;
2383}
2384
2385static void bnx2x_pf_rx_cl_prep(struct bnx2x *bp,
2386 struct bnx2x_fastpath *fp, struct rxq_pause_params *pause,
2387 struct bnx2x_rxq_init_params *rxq_init)
2388{
2389 u16 max_sge = 0;
2390 u16 sge_sz = 0;
2391 u16 tpa_agg_size = 0;
2392
2393 /* calculate queue flags */
2394 u16 flags = bnx2x_get_cl_flags(bp, fp);
2395
2396 if (!fp->disable_tpa) {
2397 pause->sge_th_hi = 250;
2398 pause->sge_th_lo = 150;
2399 tpa_agg_size = min_t(u32,
2400 (min_t(u32, 8, MAX_SKB_FRAGS) *
2401 SGE_PAGE_SIZE * PAGES_PER_SGE), 0xffff);
2402 max_sge = SGE_PAGE_ALIGN(bp->dev->mtu) >>
2403 SGE_PAGE_SHIFT;
2404 max_sge = ((max_sge + PAGES_PER_SGE - 1) &
2405 (~(PAGES_PER_SGE-1))) >> PAGES_PER_SGE_SHIFT;
2406 sge_sz = (u16)min_t(u32, SGE_PAGE_SIZE * PAGES_PER_SGE,
2407 0xffff);
2408 }
2409
2410 /* pause - not for e1 */
2411 if (!CHIP_IS_E1(bp)) {
2412 pause->bd_th_hi = 350;
2413 pause->bd_th_lo = 250;
2414 pause->rcq_th_hi = 350;
2415 pause->rcq_th_lo = 250;
2416 pause->sge_th_hi = 0;
2417 pause->sge_th_lo = 0;
2418 pause->pri_map = 1;
2419 }
2420
2421 /* rxq setup */
2422 rxq_init->flags = flags;
2423 rxq_init->cxt = &bp->context.vcxt[fp->cid].eth;
2424 rxq_init->dscr_map = fp->rx_desc_mapping;
2425 rxq_init->sge_map = fp->rx_sge_mapping;
2426 rxq_init->rcq_map = fp->rx_comp_mapping;
2427 rxq_init->rcq_np_map = fp->rx_comp_mapping + BCM_PAGE_SIZE;
2428 rxq_init->mtu = bp->dev->mtu;
2429 rxq_init->buf_sz = bp->rx_buf_size;
2430 rxq_init->cl_qzone_id = fp->cl_qzone_id;
2431 rxq_init->cl_id = fp->cl_id;
2432 rxq_init->spcl_id = fp->cl_id;
2433 rxq_init->stat_id = fp->cl_id;
2434 rxq_init->tpa_agg_sz = tpa_agg_size;
2435 rxq_init->sge_buf_sz = sge_sz;
2436 rxq_init->max_sges_pkt = max_sge;
2437 rxq_init->cache_line_log = BNX2X_RX_ALIGN_SHIFT;
2438 rxq_init->fw_sb_id = fp->fw_sb_id;
2439
2440 rxq_init->sb_cq_index = U_SB_ETH_RX_CQ_INDEX;
2441
2442 rxq_init->cid = HW_CID(bp, fp->cid);
2443
2444 rxq_init->hc_rate = bp->rx_ticks ? (1000000 / bp->rx_ticks) : 0;
2445}
2446
2447static void bnx2x_pf_tx_cl_prep(struct bnx2x *bp,
2448 struct bnx2x_fastpath *fp, struct bnx2x_txq_init_params *txq_init)
2449{
2450 u16 flags = bnx2x_get_cl_flags(bp, fp);
2451
2452 txq_init->flags = flags;
2453 txq_init->cxt = &bp->context.vcxt[fp->cid].eth;
2454 txq_init->dscr_map = fp->tx_desc_mapping;
2455 txq_init->stat_id = fp->cl_id;
2456 txq_init->cid = HW_CID(bp, fp->cid);
2457 txq_init->sb_cq_index = C_SB_ETH_TX_CQ_INDEX;
2458 txq_init->traffic_type = LLFC_TRAFFIC_TYPE_NW;
2459 txq_init->fw_sb_id = fp->fw_sb_id;
2460 txq_init->hc_rate = bp->tx_ticks ? (1000000 / bp->tx_ticks) : 0;
2461}
2462
2463void bnx2x_pf_init(struct bnx2x *bp)
2464{
2465 struct bnx2x_func_init_params func_init = {0};
2466 struct bnx2x_rss_params rss = {0};
2467 struct event_ring_data eq_data = { {0} };
2468 u16 flags;
2469
2470 /* pf specific setups */
2471 if (!CHIP_IS_E1(bp))
2472 storm_memset_ov(bp, bp->mf_ov, BP_FUNC(bp));
2473
2474 if (CHIP_IS_E2(bp)) {
2475 /* reset IGU PF statistics: MSIX + ATTN */
2476 /* PF */
2477 REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT +
2478 BNX2X_IGU_STAS_MSG_VF_CNT*4 +
2479 (CHIP_MODE_IS_4_PORT(bp) ?
2480 BP_FUNC(bp) : BP_VN(bp))*4, 0);
2481 /* ATTN */
2482 REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT +
2483 BNX2X_IGU_STAS_MSG_VF_CNT*4 +
2484 BNX2X_IGU_STAS_MSG_PF_CNT*4 +
2485 (CHIP_MODE_IS_4_PORT(bp) ?
2486 BP_FUNC(bp) : BP_VN(bp))*4, 0);
2487 }
2488
2489 /* function setup flags */
2490 flags = (FUNC_FLG_STATS | FUNC_FLG_LEADING | FUNC_FLG_SPQ);
2491
2492 if (CHIP_IS_E1x(bp))
2493 flags |= (bp->flags & TPA_ENABLE_FLAG) ? FUNC_FLG_TPA : 0;
2494 else
2495 flags |= FUNC_FLG_TPA;
2496
2497 /* function setup */
2498
2499 /**
2500 * Although RSS is meaningless when there is a single HW queue we
2501 * still need it enabled in order to have HW Rx hash generated.
2502 */
2503 rss.cap = (RSS_IPV4_CAP | RSS_IPV4_TCP_CAP |
2504 RSS_IPV6_CAP | RSS_IPV6_TCP_CAP);
2505 rss.mode = bp->multi_mode;
2506 rss.result_mask = MULTI_MASK;
2507 func_init.rss = &rss;
2508
2509 func_init.func_flgs = flags;
2510 func_init.pf_id = BP_FUNC(bp);
2511 func_init.func_id = BP_FUNC(bp);
2512 func_init.fw_stat_map = bnx2x_sp_mapping(bp, fw_stats);
2513 func_init.spq_map = bp->spq_mapping;
2514 func_init.spq_prod = bp->spq_prod_idx;
2515
2516 bnx2x_func_init(bp, &func_init);
2517
2518 memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
2519
2520 /*
2521 Congestion management values depend on the link rate
2522 There is no active link so initial link rate is set to 10 Gbps.
2523 When the link comes up The congestion management values are
2524 re-calculated according to the actual link rate.
2525 */
2526 bp->link_vars.line_speed = SPEED_10000;
2527 bnx2x_cmng_fns_init(bp, true, bnx2x_get_cmng_fns_mode(bp));
2528
2529 /* Only the PMF sets the HW */
2530 if (bp->port.pmf)
2531 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
2532
2533 /* no rx until link is up */
2534 bp->rx_mode = BNX2X_RX_MODE_NONE;
2535 bnx2x_set_storm_rx_mode(bp);
2536
2537 /* init Event Queue */
2538 eq_data.base_addr.hi = U64_HI(bp->eq_mapping);
2539 eq_data.base_addr.lo = U64_LO(bp->eq_mapping);
2540 eq_data.producer = bp->eq_prod;
2541 eq_data.index_id = HC_SP_INDEX_EQ_CONS;
2542 eq_data.sb_id = DEF_SB_ID;
2543 storm_memset_eq_data(bp, &eq_data, BP_FUNC(bp));
2544}
2545
2546
1672static void bnx2x_e1h_disable(struct bnx2x *bp) 2547static void bnx2x_e1h_disable(struct bnx2x *bp)
1673{ 2548{
1674 int port = BP_PORT(bp); 2549 int port = BP_PORT(bp);
@@ -1695,40 +2570,6 @@ static void bnx2x_e1h_enable(struct bnx2x *bp)
1695 */ 2570 */
1696} 2571}
1697 2572
1698static void bnx2x_update_min_max(struct bnx2x *bp)
1699{
1700 int port = BP_PORT(bp);
1701 int vn, i;
1702
1703 /* Init rate shaping and fairness contexts */
1704 bnx2x_init_port_minmax(bp);
1705
1706 bnx2x_calc_vn_weight_sum(bp);
1707
1708 for (vn = VN_0; vn < E1HVN_MAX; vn++)
1709 bnx2x_init_vn_minmax(bp, 2*vn + port);
1710
1711 if (bp->port.pmf) {
1712 int func;
1713
1714 /* Set the attention towards other drivers on the same port */
1715 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
1716 if (vn == BP_E1HVN(bp))
1717 continue;
1718
1719 func = ((vn << 1) | port);
1720 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
1721 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
1722 }
1723
1724 /* Store it to internal memory */
1725 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
1726 REG_WR(bp, BAR_XSTRORM_INTMEM +
1727 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
1728 ((u32 *)(&bp->cmng))[i]);
1729 }
1730}
1731
1732static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event) 2573static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
1733{ 2574{
1734 DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event); 2575 DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event);
@@ -1740,7 +2581,7 @@ static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
1740 * where the bp->flags can change so it is done without any 2581 * where the bp->flags can change so it is done without any
1741 * locks 2582 * locks
1742 */ 2583 */
1743 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) { 2584 if (bp->mf_config[BP_VN(bp)] & FUNC_MF_CFG_FUNC_DISABLED) {
1744 DP(NETIF_MSG_IFDOWN, "mf_cfg function disabled\n"); 2585 DP(NETIF_MSG_IFDOWN, "mf_cfg function disabled\n");
1745 bp->flags |= MF_FUNC_DIS; 2586 bp->flags |= MF_FUNC_DIS;
1746 2587
@@ -1755,7 +2596,9 @@ static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
1755 } 2596 }
1756 if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) { 2597 if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) {
1757 2598
1758 bnx2x_update_min_max(bp); 2599 bnx2x_cmng_fns_init(bp, true, CMNG_FNS_MINMAX);
2600 bnx2x_link_sync_notify(bp);
2601 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
1759 dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION; 2602 dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION;
1760 } 2603 }
1761 2604
@@ -1790,16 +2633,17 @@ static inline void bnx2x_sp_prod_update(struct bnx2x *bp)
1790 /* Make sure that BD data is updated before writing the producer */ 2633 /* Make sure that BD data is updated before writing the producer */
1791 wmb(); 2634 wmb();
1792 2635
1793 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func), 2636 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
1794 bp->spq_prod_idx); 2637 bp->spq_prod_idx);
1795 mmiowb(); 2638 mmiowb();
1796} 2639}
1797 2640
1798/* the slow path queue is odd since completions arrive on the fastpath ring */ 2641/* the slow path queue is odd since completions arrive on the fastpath ring */
1799int bnx2x_sp_post(struct bnx2x *bp, int command, int cid, 2642int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
1800 u32 data_hi, u32 data_lo, int common) 2643 u32 data_hi, u32 data_lo, int common)
1801{ 2644{
1802 struct eth_spe *spe; 2645 struct eth_spe *spe;
2646 u16 type;
1803 2647
1804#ifdef BNX2X_STOP_ON_ERROR 2648#ifdef BNX2X_STOP_ON_ERROR
1805 if (unlikely(bp->panic)) 2649 if (unlikely(bp->panic))
@@ -1808,7 +2652,7 @@ int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
1808 2652
1809 spin_lock_bh(&bp->spq_lock); 2653 spin_lock_bh(&bp->spq_lock);
1810 2654
1811 if (!bp->spq_left) { 2655 if (!atomic_read(&bp->spq_left)) {
1812 BNX2X_ERR("BUG! SPQ ring full!\n"); 2656 BNX2X_ERR("BUG! SPQ ring full!\n");
1813 spin_unlock_bh(&bp->spq_lock); 2657 spin_unlock_bh(&bp->spq_lock);
1814 bnx2x_panic(); 2658 bnx2x_panic();
@@ -1821,22 +2665,42 @@ int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
1821 spe->hdr.conn_and_cmd_data = 2665 spe->hdr.conn_and_cmd_data =
1822 cpu_to_le32((command << SPE_HDR_CMD_ID_SHIFT) | 2666 cpu_to_le32((command << SPE_HDR_CMD_ID_SHIFT) |
1823 HW_CID(bp, cid)); 2667 HW_CID(bp, cid));
1824 spe->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE); 2668
1825 if (common) 2669 if (common)
1826 spe->hdr.type |= 2670 /* Common ramrods:
1827 cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT)); 2671 * FUNC_START, FUNC_STOP, CFC_DEL, STATS, SET_MAC
2672 * TRAFFIC_STOP, TRAFFIC_START
2673 */
2674 type = (NONE_CONNECTION_TYPE << SPE_HDR_CONN_TYPE_SHIFT)
2675 & SPE_HDR_CONN_TYPE;
2676 else
2677 /* ETH ramrods: SETUP, HALT */
2678 type = (ETH_CONNECTION_TYPE << SPE_HDR_CONN_TYPE_SHIFT)
2679 & SPE_HDR_CONN_TYPE;
2680
2681 type |= ((BP_FUNC(bp) << SPE_HDR_FUNCTION_ID_SHIFT) &
2682 SPE_HDR_FUNCTION_ID);
1828 2683
1829 spe->data.mac_config_addr.hi = cpu_to_le32(data_hi); 2684 spe->hdr.type = cpu_to_le16(type);
1830 spe->data.mac_config_addr.lo = cpu_to_le32(data_lo);
1831 2685
1832 bp->spq_left--; 2686 spe->data.update_data_addr.hi = cpu_to_le32(data_hi);
2687 spe->data.update_data_addr.lo = cpu_to_le32(data_lo);
2688
2689 /* stats ramrod has it's own slot on the spq */
2690 if (command != RAMROD_CMD_ID_COMMON_STAT_QUERY)
2691 /* It's ok if the actual decrement is issued towards the memory
2692 * somewhere between the spin_lock and spin_unlock. Thus no
2693 * more explict memory barrier is needed.
2694 */
2695 atomic_dec(&bp->spq_left);
1833 2696
1834 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/, 2697 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
1835 "SPQE[%x] (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n", 2698 "SPQE[%x] (%x:%x) command %d hw_cid %x data (%x:%x) "
2699 "type(0x%x) left %x\n",
1836 bp->spq_prod_idx, (u32)U64_HI(bp->spq_mapping), 2700 bp->spq_prod_idx, (u32)U64_HI(bp->spq_mapping),
1837 (u32)(U64_LO(bp->spq_mapping) + 2701 (u32)(U64_LO(bp->spq_mapping) +
1838 (void *)bp->spq_prod_bd - (void *)bp->spq), command, 2702 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
1839 HW_CID(bp, cid), data_hi, data_lo, bp->spq_left); 2703 HW_CID(bp, cid), data_hi, data_lo, type, atomic_read(&bp->spq_left));
1840 2704
1841 bnx2x_sp_prod_update(bp); 2705 bnx2x_sp_prod_update(bp);
1842 spin_unlock_bh(&bp->spq_lock); 2706 spin_unlock_bh(&bp->spq_lock);
@@ -1873,32 +2737,27 @@ static void bnx2x_release_alr(struct bnx2x *bp)
1873 REG_WR(bp, GRCBASE_MCP + 0x9c, 0); 2737 REG_WR(bp, GRCBASE_MCP + 0x9c, 0);
1874} 2738}
1875 2739
2740#define BNX2X_DEF_SB_ATT_IDX 0x0001
2741#define BNX2X_DEF_SB_IDX 0x0002
2742
1876static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp) 2743static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
1877{ 2744{
1878 struct host_def_status_block *def_sb = bp->def_status_blk; 2745 struct host_sp_status_block *def_sb = bp->def_status_blk;
1879 u16 rc = 0; 2746 u16 rc = 0;
1880 2747
1881 barrier(); /* status block is written to by the chip */ 2748 barrier(); /* status block is written to by the chip */
1882 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) { 2749 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
1883 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index; 2750 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
1884 rc |= 1; 2751 rc |= BNX2X_DEF_SB_ATT_IDX;
1885 }
1886 if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
1887 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
1888 rc |= 2;
1889 }
1890 if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
1891 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
1892 rc |= 4;
1893 }
1894 if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
1895 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
1896 rc |= 8;
1897 } 2752 }
1898 if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) { 2753
1899 bp->def_t_idx = def_sb->t_def_status_block.status_block_index; 2754 if (bp->def_idx != def_sb->sp_sb.running_index) {
1900 rc |= 16; 2755 bp->def_idx = def_sb->sp_sb.running_index;
2756 rc |= BNX2X_DEF_SB_IDX;
1901 } 2757 }
2758
2759 /* Do not reorder: indecies reading should complete before handling */
2760 barrier();
1902 return rc; 2761 return rc;
1903} 2762}
1904 2763
@@ -1909,14 +2768,13 @@ static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
1909static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted) 2768static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
1910{ 2769{
1911 int port = BP_PORT(bp); 2770 int port = BP_PORT(bp);
1912 u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
1913 COMMAND_REG_ATTN_BITS_SET);
1914 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 : 2771 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
1915 MISC_REG_AEU_MASK_ATTN_FUNC_0; 2772 MISC_REG_AEU_MASK_ATTN_FUNC_0;
1916 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 : 2773 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
1917 NIG_REG_MASK_INTERRUPT_PORT0; 2774 NIG_REG_MASK_INTERRUPT_PORT0;
1918 u32 aeu_mask; 2775 u32 aeu_mask;
1919 u32 nig_mask = 0; 2776 u32 nig_mask = 0;
2777 u32 reg_addr;
1920 2778
1921 if (bp->attn_state & asserted) 2779 if (bp->attn_state & asserted)
1922 BNX2X_ERR("IGU ERROR\n"); 2780 BNX2X_ERR("IGU ERROR\n");
@@ -1991,9 +2849,15 @@ static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
1991 2849
1992 } /* if hardwired */ 2850 } /* if hardwired */
1993 2851
1994 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n", 2852 if (bp->common.int_block == INT_BLOCK_HC)
1995 asserted, hc_addr); 2853 reg_addr = (HC_REG_COMMAND_REG + port*32 +
1996 REG_WR(bp, hc_addr, asserted); 2854 COMMAND_REG_ATTN_BITS_SET);
2855 else
2856 reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_SET_UPPER*8);
2857
2858 DP(NETIF_MSG_HW, "about to mask 0x%08x at %s addr 0x%x\n", asserted,
2859 (bp->common.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr);
2860 REG_WR(bp, reg_addr, asserted);
1997 2861
1998 /* now set back the mask */ 2862 /* now set back the mask */
1999 if (asserted & ATTN_NIG_FOR_FUNC) { 2863 if (asserted & ATTN_NIG_FOR_FUNC) {
@@ -2114,6 +2978,10 @@ static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
2114 /* RQ_USDMDP_FIFO_OVERFLOW */ 2978 /* RQ_USDMDP_FIFO_OVERFLOW */
2115 if (val & 0x18000) 2979 if (val & 0x18000)
2116 BNX2X_ERR("FATAL error from PXP\n"); 2980 BNX2X_ERR("FATAL error from PXP\n");
2981 if (CHIP_IS_E2(bp)) {
2982 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_1);
2983 BNX2X_ERR("PXP hw attention-1 0x%x\n", val);
2984 }
2117 } 2985 }
2118 2986
2119 if (attn & HW_INTERRUT_ASSERT_SET_2) { 2987 if (attn & HW_INTERRUT_ASSERT_SET_2) {
@@ -2144,9 +3012,10 @@ static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
2144 int func = BP_FUNC(bp); 3012 int func = BP_FUNC(bp);
2145 3013
2146 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0); 3014 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
2147 bp->mf_config = SHMEM_RD(bp, 3015 bp->mf_config[BP_VN(bp)] = MF_CFG_RD(bp,
2148 mf_cfg.func_mf_config[func].config); 3016 func_mf_config[BP_ABS_FUNC(bp)].config);
2149 val = SHMEM_RD(bp, func_mb[func].drv_status); 3017 val = SHMEM_RD(bp,
3018 func_mb[BP_FW_MB_IDX(bp)].drv_status);
2150 if (val & DRV_STATUS_DCC_EVENT_MASK) 3019 if (val & DRV_STATUS_DCC_EVENT_MASK)
2151 bnx2x_dcc_event(bp, 3020 bnx2x_dcc_event(bp,
2152 (val & DRV_STATUS_DCC_EVENT_MASK)); 3021 (val & DRV_STATUS_DCC_EVENT_MASK));
@@ -2176,13 +3045,13 @@ static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
2176 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) { 3045 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
2177 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn); 3046 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
2178 if (attn & BNX2X_GRC_TIMEOUT) { 3047 if (attn & BNX2X_GRC_TIMEOUT) {
2179 val = CHIP_IS_E1H(bp) ? 3048 val = CHIP_IS_E1(bp) ? 0 :
2180 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0; 3049 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN);
2181 BNX2X_ERR("GRC time-out 0x%08x\n", val); 3050 BNX2X_ERR("GRC time-out 0x%08x\n", val);
2182 } 3051 }
2183 if (attn & BNX2X_GRC_RSV) { 3052 if (attn & BNX2X_GRC_RSV) {
2184 val = CHIP_IS_E1H(bp) ? 3053 val = CHIP_IS_E1(bp) ? 0 :
2185 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0; 3054 REG_RD(bp, MISC_REG_GRC_RSV_ATTN);
2186 BNX2X_ERR("GRC reserved 0x%08x\n", val); 3055 BNX2X_ERR("GRC reserved 0x%08x\n", val);
2187 } 3056 }
2188 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff); 3057 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
@@ -2195,6 +3064,7 @@ static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
2195#define RESET_DONE_FLAG_MASK (~LOAD_COUNTER_MASK) 3064#define RESET_DONE_FLAG_MASK (~LOAD_COUNTER_MASK)
2196#define RESET_DONE_FLAG_SHIFT LOAD_COUNTER_BITS 3065#define RESET_DONE_FLAG_SHIFT LOAD_COUNTER_BITS
2197#define CHIP_PARITY_SUPPORTED(bp) (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp)) 3066#define CHIP_PARITY_SUPPORTED(bp) (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp))
3067
2198/* 3068/*
2199 * should be run under rtnl lock 3069 * should be run under rtnl lock
2200 */ 3070 */
@@ -2487,6 +3357,74 @@ bool bnx2x_chk_parity_attn(struct bnx2x *bp)
2487 attn.sig[3]); 3357 attn.sig[3]);
2488} 3358}
2489 3359
3360
3361static inline void bnx2x_attn_int_deasserted4(struct bnx2x *bp, u32 attn)
3362{
3363 u32 val;
3364 if (attn & AEU_INPUTS_ATTN_BITS_PGLUE_HW_INTERRUPT) {
3365
3366 val = REG_RD(bp, PGLUE_B_REG_PGLUE_B_INT_STS_CLR);
3367 BNX2X_ERR("PGLUE hw attention 0x%x\n", val);
3368 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR)
3369 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3370 "ADDRESS_ERROR\n");
3371 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR)
3372 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3373 "INCORRECT_RCV_BEHAVIOR\n");
3374 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN)
3375 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3376 "WAS_ERROR_ATTN\n");
3377 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN)
3378 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3379 "VF_LENGTH_VIOLATION_ATTN\n");
3380 if (val &
3381 PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN)
3382 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3383 "VF_GRC_SPACE_VIOLATION_ATTN\n");
3384 if (val &
3385 PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN)
3386 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3387 "VF_MSIX_BAR_VIOLATION_ATTN\n");
3388 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN)
3389 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3390 "TCPL_ERROR_ATTN\n");
3391 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN)
3392 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3393 "TCPL_IN_TWO_RCBS_ATTN\n");
3394 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW)
3395 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3396 "CSSNOOP_FIFO_OVERFLOW\n");
3397 }
3398 if (attn & AEU_INPUTS_ATTN_BITS_ATC_HW_INTERRUPT) {
3399 val = REG_RD(bp, ATC_REG_ATC_INT_STS_CLR);
3400 BNX2X_ERR("ATC hw attention 0x%x\n", val);
3401 if (val & ATC_ATC_INT_STS_REG_ADDRESS_ERROR)
3402 BNX2X_ERR("ATC_ATC_INT_STS_REG_ADDRESS_ERROR\n");
3403 if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND)
3404 BNX2X_ERR("ATC_ATC_INT_STS_REG"
3405 "_ATC_TCPL_TO_NOT_PEND\n");
3406 if (val & ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS)
3407 BNX2X_ERR("ATC_ATC_INT_STS_REG_"
3408 "ATC_GPA_MULTIPLE_HITS\n");
3409 if (val & ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT)
3410 BNX2X_ERR("ATC_ATC_INT_STS_REG_"
3411 "ATC_RCPL_TO_EMPTY_CNT\n");
3412 if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR)
3413 BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR\n");
3414 if (val & ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU)
3415 BNX2X_ERR("ATC_ATC_INT_STS_REG_"
3416 "ATC_IREQ_LESS_THAN_STU\n");
3417 }
3418
3419 if (attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR |
3420 AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)) {
3421 BNX2X_ERR("FATAL parity attention set4 0x%x\n",
3422 (u32)(attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR |
3423 AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)));
3424 }
3425
3426}
3427
2490static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted) 3428static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
2491{ 3429{
2492 struct attn_route attn, *group_mask; 3430 struct attn_route attn, *group_mask;
@@ -2517,17 +3455,28 @@ static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
2517 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4); 3455 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
2518 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4); 3456 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
2519 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4); 3457 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
2520 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n", 3458 if (CHIP_IS_E2(bp))
2521 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]); 3459 attn.sig[4] =
3460 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 + port*4);
3461 else
3462 attn.sig[4] = 0;
3463
3464 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x %08x\n",
3465 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3], attn.sig[4]);
2522 3466
2523 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) { 3467 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
2524 if (deasserted & (1 << index)) { 3468 if (deasserted & (1 << index)) {
2525 group_mask = &bp->attn_group[index]; 3469 group_mask = &bp->attn_group[index];
2526 3470
2527 DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n", 3471 DP(NETIF_MSG_HW, "group[%d]: %08x %08x "
2528 index, group_mask->sig[0], group_mask->sig[1], 3472 "%08x %08x %08x\n",
2529 group_mask->sig[2], group_mask->sig[3]); 3473 index,
3474 group_mask->sig[0], group_mask->sig[1],
3475 group_mask->sig[2], group_mask->sig[3],
3476 group_mask->sig[4]);
2530 3477
3478 bnx2x_attn_int_deasserted4(bp,
3479 attn.sig[4] & group_mask->sig[4]);
2531 bnx2x_attn_int_deasserted3(bp, 3480 bnx2x_attn_int_deasserted3(bp,
2532 attn.sig[3] & group_mask->sig[3]); 3481 attn.sig[3] & group_mask->sig[3]);
2533 bnx2x_attn_int_deasserted1(bp, 3482 bnx2x_attn_int_deasserted1(bp,
@@ -2541,11 +3490,15 @@ static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
2541 3490
2542 bnx2x_release_alr(bp); 3491 bnx2x_release_alr(bp);
2543 3492
2544 reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR); 3493 if (bp->common.int_block == INT_BLOCK_HC)
3494 reg_addr = (HC_REG_COMMAND_REG + port*32 +
3495 COMMAND_REG_ATTN_BITS_CLR);
3496 else
3497 reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_CLR_UPPER*8);
2545 3498
2546 val = ~deasserted; 3499 val = ~deasserted;
2547 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n", 3500 DP(NETIF_MSG_HW, "about to mask 0x%08x at %s addr 0x%x\n", val,
2548 val, reg_addr); 3501 (bp->common.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr);
2549 REG_WR(bp, reg_addr, val); 3502 REG_WR(bp, reg_addr, val);
2550 3503
2551 if (~bp->attn_state & deasserted) 3504 if (~bp->attn_state & deasserted)
@@ -2598,6 +3551,141 @@ static void bnx2x_attn_int(struct bnx2x *bp)
2598 bnx2x_attn_int_deasserted(bp, deasserted); 3551 bnx2x_attn_int_deasserted(bp, deasserted);
2599} 3552}
2600 3553
3554static inline void bnx2x_update_eq_prod(struct bnx2x *bp, u16 prod)
3555{
3556 /* No memory barriers */
3557 storm_memset_eq_prod(bp, prod, BP_FUNC(bp));
3558 mmiowb(); /* keep prod updates ordered */
3559}
3560
3561#ifdef BCM_CNIC
3562static int bnx2x_cnic_handle_cfc_del(struct bnx2x *bp, u32 cid,
3563 union event_ring_elem *elem)
3564{
3565 if (!bp->cnic_eth_dev.starting_cid ||
3566 cid < bp->cnic_eth_dev.starting_cid)
3567 return 1;
3568
3569 DP(BNX2X_MSG_SP, "got delete ramrod for CNIC CID %d\n", cid);
3570
3571 if (unlikely(elem->message.data.cfc_del_event.error)) {
3572 BNX2X_ERR("got delete ramrod for CNIC CID %d with error!\n",
3573 cid);
3574 bnx2x_panic_dump(bp);
3575 }
3576 bnx2x_cnic_cfc_comp(bp, cid);
3577 return 0;
3578}
3579#endif
3580
3581static void bnx2x_eq_int(struct bnx2x *bp)
3582{
3583 u16 hw_cons, sw_cons, sw_prod;
3584 union event_ring_elem *elem;
3585 u32 cid;
3586 u8 opcode;
3587 int spqe_cnt = 0;
3588
3589 hw_cons = le16_to_cpu(*bp->eq_cons_sb);
3590
3591 /* The hw_cos range is 1-255, 257 - the sw_cons range is 0-254, 256.
3592 * when we get the the next-page we nned to adjust so the loop
3593 * condition below will be met. The next element is the size of a
3594 * regular element and hence incrementing by 1
3595 */
3596 if ((hw_cons & EQ_DESC_MAX_PAGE) == EQ_DESC_MAX_PAGE)
3597 hw_cons++;
3598
3599 /* This function may never run in parralel with itself for a
3600 * specific bp, thus there is no need in "paired" read memory
3601 * barrier here.
3602 */
3603 sw_cons = bp->eq_cons;
3604 sw_prod = bp->eq_prod;
3605
3606 DP(BNX2X_MSG_SP, "EQ: hw_cons %u sw_cons %u bp->spq_left %u\n",
3607 hw_cons, sw_cons, atomic_read(&bp->spq_left));
3608
3609 for (; sw_cons != hw_cons;
3610 sw_prod = NEXT_EQ_IDX(sw_prod), sw_cons = NEXT_EQ_IDX(sw_cons)) {
3611
3612
3613 elem = &bp->eq_ring[EQ_DESC(sw_cons)];
3614
3615 cid = SW_CID(elem->message.data.cfc_del_event.cid);
3616 opcode = elem->message.opcode;
3617
3618
3619 /* handle eq element */
3620 switch (opcode) {
3621 case EVENT_RING_OPCODE_STAT_QUERY:
3622 DP(NETIF_MSG_TIMER, "got statistics comp event\n");
3623 /* nothing to do with stats comp */
3624 continue;
3625
3626 case EVENT_RING_OPCODE_CFC_DEL:
3627 /* handle according to cid range */
3628 /*
3629 * we may want to verify here that the bp state is
3630 * HALTING
3631 */
3632 DP(NETIF_MSG_IFDOWN,
3633 "got delete ramrod for MULTI[%d]\n", cid);
3634#ifdef BCM_CNIC
3635 if (!bnx2x_cnic_handle_cfc_del(bp, cid, elem))
3636 goto next_spqe;
3637#endif
3638 bnx2x_fp(bp, cid, state) =
3639 BNX2X_FP_STATE_CLOSED;
3640
3641 goto next_spqe;
3642 }
3643
3644 switch (opcode | bp->state) {
3645 case (EVENT_RING_OPCODE_FUNCTION_START |
3646 BNX2X_STATE_OPENING_WAIT4_PORT):
3647 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
3648 bp->state = BNX2X_STATE_FUNC_STARTED;
3649 break;
3650
3651 case (EVENT_RING_OPCODE_FUNCTION_STOP |
3652 BNX2X_STATE_CLOSING_WAIT4_HALT):
3653 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
3654 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
3655 break;
3656
3657 case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_OPEN):
3658 case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_DIAG):
3659 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
3660 bp->set_mac_pending = 0;
3661 break;
3662
3663 case (EVENT_RING_OPCODE_SET_MAC |
3664 BNX2X_STATE_CLOSING_WAIT4_HALT):
3665 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
3666 bp->set_mac_pending = 0;
3667 break;
3668 default:
3669 /* unknown event log error and continue */
3670 BNX2X_ERR("Unknown EQ event %d\n",
3671 elem->message.opcode);
3672 }
3673next_spqe:
3674 spqe_cnt++;
3675 } /* for */
3676
3677 smp_mb__before_atomic_inc();
3678 atomic_add(spqe_cnt, &bp->spq_left);
3679
3680 bp->eq_cons = sw_cons;
3681 bp->eq_prod = sw_prod;
3682 /* Make sure that above mem writes were issued towards the memory */
3683 smp_wmb();
3684
3685 /* update producer */
3686 bnx2x_update_eq_prod(bp, bp->eq_prod);
3687}
3688
2601static void bnx2x_sp_task(struct work_struct *work) 3689static void bnx2x_sp_task(struct work_struct *work)
2602{ 3690{
2603 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work); 3691 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
@@ -2616,31 +3704,29 @@ static void bnx2x_sp_task(struct work_struct *work)
2616 DP(NETIF_MSG_INTR, "got a slowpath interrupt (status 0x%x)\n", status); 3704 DP(NETIF_MSG_INTR, "got a slowpath interrupt (status 0x%x)\n", status);
2617 3705
2618 /* HW attentions */ 3706 /* HW attentions */
2619 if (status & 0x1) { 3707 if (status & BNX2X_DEF_SB_ATT_IDX) {
2620 bnx2x_attn_int(bp); 3708 bnx2x_attn_int(bp);
2621 status &= ~0x1; 3709 status &= ~BNX2X_DEF_SB_ATT_IDX;
2622 } 3710 }
2623 3711
2624 /* CStorm events: STAT_QUERY */ 3712 /* SP events: STAT_QUERY and others */
2625 if (status & 0x2) { 3713 if (status & BNX2X_DEF_SB_IDX) {
2626 DP(BNX2X_MSG_SP, "CStorm events: STAT_QUERY\n"); 3714
2627 status &= ~0x2; 3715 /* Handle EQ completions */
3716 bnx2x_eq_int(bp);
3717
3718 bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID,
3719 le16_to_cpu(bp->def_idx), IGU_INT_NOP, 1);
3720
3721 status &= ~BNX2X_DEF_SB_IDX;
2628 } 3722 }
2629 3723
2630 if (unlikely(status)) 3724 if (unlikely(status))
2631 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n", 3725 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
2632 status); 3726 status);
2633 3727
2634 bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx), 3728 bnx2x_ack_sb(bp, bp->igu_dsb_id, ATTENTION_ID,
2635 IGU_INT_NOP, 1); 3729 le16_to_cpu(bp->def_att_idx), IGU_INT_ENABLE, 1);
2636 bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
2637 IGU_INT_NOP, 1);
2638 bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
2639 IGU_INT_NOP, 1);
2640 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
2641 IGU_INT_NOP, 1);
2642 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
2643 IGU_INT_ENABLE, 1);
2644} 3730}
2645 3731
2646irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance) 3732irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
@@ -2654,7 +3740,8 @@ irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
2654 return IRQ_HANDLED; 3740 return IRQ_HANDLED;
2655 } 3741 }
2656 3742
2657 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, 0, IGU_INT_DISABLE, 0); 3743 bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, 0,
3744 IGU_INT_DISABLE, 0);
2658 3745
2659#ifdef BNX2X_STOP_ON_ERROR 3746#ifdef BNX2X_STOP_ON_ERROR
2660 if (unlikely(bp->panic)) 3747 if (unlikely(bp->panic))
@@ -2698,7 +3785,7 @@ static void bnx2x_timer(unsigned long data)
2698 } 3785 }
2699 3786
2700 if (!BP_NOMCP(bp)) { 3787 if (!BP_NOMCP(bp)) {
2701 int func = BP_FUNC(bp); 3788 int mb_idx = BP_FW_MB_IDX(bp);
2702 u32 drv_pulse; 3789 u32 drv_pulse;
2703 u32 mcp_pulse; 3790 u32 mcp_pulse;
2704 3791
@@ -2706,9 +3793,9 @@ static void bnx2x_timer(unsigned long data)
2706 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK; 3793 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
2707 /* TBD - add SYSTEM_TIME */ 3794 /* TBD - add SYSTEM_TIME */
2708 drv_pulse = bp->fw_drv_pulse_wr_seq; 3795 drv_pulse = bp->fw_drv_pulse_wr_seq;
2709 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse); 3796 SHMEM_WR(bp, func_mb[mb_idx].drv_pulse_mb, drv_pulse);
2710 3797
2711 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) & 3798 mcp_pulse = (SHMEM_RD(bp, func_mb[mb_idx].mcp_pulse_mb) &
2712 MCP_PULSE_SEQ_MASK); 3799 MCP_PULSE_SEQ_MASK);
2713 /* The delta between driver pulse and mcp response 3800 /* The delta between driver pulse and mcp response
2714 * should be 1 (before mcp response) or 0 (after mcp response) 3801 * should be 1 (before mcp response) or 0 (after mcp response)
@@ -2736,324 +3823,310 @@ timer_restart:
2736 * nic init service functions 3823 * nic init service functions
2737 */ 3824 */
2738 3825
2739static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id) 3826static inline void bnx2x_fill(struct bnx2x *bp, u32 addr, int fill, u32 len)
2740{ 3827{
2741 int port = BP_PORT(bp); 3828 u32 i;
3829 if (!(len%4) && !(addr%4))
3830 for (i = 0; i < len; i += 4)
3831 REG_WR(bp, addr + i, fill);
3832 else
3833 for (i = 0; i < len; i++)
3834 REG_WR8(bp, addr + i, fill);
2742 3835
2743 /* "CSTORM" */
2744 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
2745 CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), 0,
2746 CSTORM_SB_STATUS_BLOCK_U_SIZE / 4);
2747 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
2748 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), 0,
2749 CSTORM_SB_STATUS_BLOCK_C_SIZE / 4);
2750} 3836}
2751 3837
2752void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb, 3838/* helper: writes FP SP data to FW - data_size in dwords */
2753 dma_addr_t mapping, int sb_id) 3839static inline void bnx2x_wr_fp_sb_data(struct bnx2x *bp,
3840 int fw_sb_id,
3841 u32 *sb_data_p,
3842 u32 data_size)
2754{ 3843{
2755 int port = BP_PORT(bp);
2756 int func = BP_FUNC(bp);
2757 int index; 3844 int index;
2758 u64 section; 3845 for (index = 0; index < data_size; index++)
3846 REG_WR(bp, BAR_CSTRORM_INTMEM +
3847 CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) +
3848 sizeof(u32)*index,
3849 *(sb_data_p + index));
3850}
3851
3852static inline void bnx2x_zero_fp_sb(struct bnx2x *bp, int fw_sb_id)
3853{
3854 u32 *sb_data_p;
3855 u32 data_size = 0;
3856 struct hc_status_block_data_e2 sb_data_e2;
3857 struct hc_status_block_data_e1x sb_data_e1x;
3858
3859 /* disable the function first */
3860 if (CHIP_IS_E2(bp)) {
3861 memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2));
3862 sb_data_e2.common.p_func.pf_id = HC_FUNCTION_DISABLED;
3863 sb_data_e2.common.p_func.vf_id = HC_FUNCTION_DISABLED;
3864 sb_data_e2.common.p_func.vf_valid = false;
3865 sb_data_p = (u32 *)&sb_data_e2;
3866 data_size = sizeof(struct hc_status_block_data_e2)/sizeof(u32);
3867 } else {
3868 memset(&sb_data_e1x, 0,
3869 sizeof(struct hc_status_block_data_e1x));
3870 sb_data_e1x.common.p_func.pf_id = HC_FUNCTION_DISABLED;
3871 sb_data_e1x.common.p_func.vf_id = HC_FUNCTION_DISABLED;
3872 sb_data_e1x.common.p_func.vf_valid = false;
3873 sb_data_p = (u32 *)&sb_data_e1x;
3874 data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32);
3875 }
3876 bnx2x_wr_fp_sb_data(bp, fw_sb_id, sb_data_p, data_size);
2759 3877
2760 /* USTORM */ 3878 bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
2761 section = ((u64)mapping) + offsetof(struct host_status_block, 3879 CSTORM_STATUS_BLOCK_OFFSET(fw_sb_id), 0,
2762 u_status_block); 3880 CSTORM_STATUS_BLOCK_SIZE);
2763 sb->u_status_block.status_block_id = sb_id; 3881 bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
2764 3882 CSTORM_SYNC_BLOCK_OFFSET(fw_sb_id), 0,
2765 REG_WR(bp, BAR_CSTRORM_INTMEM + 3883 CSTORM_SYNC_BLOCK_SIZE);
2766 CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id), U64_LO(section)); 3884}
2767 REG_WR(bp, BAR_CSTRORM_INTMEM +
2768 ((CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id)) + 4),
2769 U64_HI(section));
2770 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_USB_FUNC_OFF +
2771 CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), func);
2772
2773 for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
2774 REG_WR16(bp, BAR_CSTRORM_INTMEM +
2775 CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id, index), 1);
2776 3885
2777 /* CSTORM */ 3886/* helper: writes SP SB data to FW */
2778 section = ((u64)mapping) + offsetof(struct host_status_block, 3887static inline void bnx2x_wr_sp_sb_data(struct bnx2x *bp,
2779 c_status_block); 3888 struct hc_sp_status_block_data *sp_sb_data)
2780 sb->c_status_block.status_block_id = sb_id; 3889{
3890 int func = BP_FUNC(bp);
3891 int i;
3892 for (i = 0; i < sizeof(struct hc_sp_status_block_data)/sizeof(u32); i++)
3893 REG_WR(bp, BAR_CSTRORM_INTMEM +
3894 CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) +
3895 i*sizeof(u32),
3896 *((u32 *)sp_sb_data + i));
3897}
2781 3898
2782 REG_WR(bp, BAR_CSTRORM_INTMEM + 3899static inline void bnx2x_zero_sp_sb(struct bnx2x *bp)
2783 CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id), U64_LO(section)); 3900{
2784 REG_WR(bp, BAR_CSTRORM_INTMEM + 3901 int func = BP_FUNC(bp);
2785 ((CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id)) + 4), 3902 struct hc_sp_status_block_data sp_sb_data;
2786 U64_HI(section)); 3903 memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data));
2787 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
2788 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), func);
2789 3904
2790 for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++) 3905 sp_sb_data.p_func.pf_id = HC_FUNCTION_DISABLED;
2791 REG_WR16(bp, BAR_CSTRORM_INTMEM + 3906 sp_sb_data.p_func.vf_id = HC_FUNCTION_DISABLED;
2792 CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id, index), 1); 3907 sp_sb_data.p_func.vf_valid = false;
3908
3909 bnx2x_wr_sp_sb_data(bp, &sp_sb_data);
3910
3911 bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
3912 CSTORM_SP_STATUS_BLOCK_OFFSET(func), 0,
3913 CSTORM_SP_STATUS_BLOCK_SIZE);
3914 bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
3915 CSTORM_SP_SYNC_BLOCK_OFFSET(func), 0,
3916 CSTORM_SP_SYNC_BLOCK_SIZE);
2793 3917
2794 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
2795} 3918}
2796 3919
2797static void bnx2x_zero_def_sb(struct bnx2x *bp) 3920
3921static inline
3922void bnx2x_setup_ndsb_state_machine(struct hc_status_block_sm *hc_sm,
3923 int igu_sb_id, int igu_seg_id)
2798{ 3924{
2799 int func = BP_FUNC(bp); 3925 hc_sm->igu_sb_id = igu_sb_id;
3926 hc_sm->igu_seg_id = igu_seg_id;
3927 hc_sm->timer_value = 0xFF;
3928 hc_sm->time_to_expire = 0xFFFFFFFF;
3929}
2800 3930
2801 bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY + 3931void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid,
2802 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0, 3932 u8 vf_valid, int fw_sb_id, int igu_sb_id)
2803 sizeof(struct tstorm_def_status_block)/4); 3933{
2804 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY + 3934 int igu_seg_id;
2805 CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), 0, 3935
2806 sizeof(struct cstorm_def_status_block_u)/4); 3936 struct hc_status_block_data_e2 sb_data_e2;
2807 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY + 3937 struct hc_status_block_data_e1x sb_data_e1x;
2808 CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), 0, 3938 struct hc_status_block_sm *hc_sm_p;
2809 sizeof(struct cstorm_def_status_block_c)/4); 3939 struct hc_index_data *hc_index_p;
2810 bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY + 3940 int data_size;
2811 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0, 3941 u32 *sb_data_p;
2812 sizeof(struct xstorm_def_status_block)/4); 3942
3943 if (CHIP_INT_MODE_IS_BC(bp))
3944 igu_seg_id = HC_SEG_ACCESS_NORM;
3945 else
3946 igu_seg_id = IGU_SEG_ACCESS_NORM;
3947
3948 bnx2x_zero_fp_sb(bp, fw_sb_id);
3949
3950 if (CHIP_IS_E2(bp)) {
3951 memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2));
3952 sb_data_e2.common.p_func.pf_id = BP_FUNC(bp);
3953 sb_data_e2.common.p_func.vf_id = vfid;
3954 sb_data_e2.common.p_func.vf_valid = vf_valid;
3955 sb_data_e2.common.p_func.vnic_id = BP_VN(bp);
3956 sb_data_e2.common.same_igu_sb_1b = true;
3957 sb_data_e2.common.host_sb_addr.hi = U64_HI(mapping);
3958 sb_data_e2.common.host_sb_addr.lo = U64_LO(mapping);
3959 hc_sm_p = sb_data_e2.common.state_machine;
3960 hc_index_p = sb_data_e2.index_data;
3961 sb_data_p = (u32 *)&sb_data_e2;
3962 data_size = sizeof(struct hc_status_block_data_e2)/sizeof(u32);
3963 } else {
3964 memset(&sb_data_e1x, 0,
3965 sizeof(struct hc_status_block_data_e1x));
3966 sb_data_e1x.common.p_func.pf_id = BP_FUNC(bp);
3967 sb_data_e1x.common.p_func.vf_id = 0xff;
3968 sb_data_e1x.common.p_func.vf_valid = false;
3969 sb_data_e1x.common.p_func.vnic_id = BP_VN(bp);
3970 sb_data_e1x.common.same_igu_sb_1b = true;
3971 sb_data_e1x.common.host_sb_addr.hi = U64_HI(mapping);
3972 sb_data_e1x.common.host_sb_addr.lo = U64_LO(mapping);
3973 hc_sm_p = sb_data_e1x.common.state_machine;
3974 hc_index_p = sb_data_e1x.index_data;
3975 sb_data_p = (u32 *)&sb_data_e1x;
3976 data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32);
3977 }
3978
3979 bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_RX_ID],
3980 igu_sb_id, igu_seg_id);
3981 bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_TX_ID],
3982 igu_sb_id, igu_seg_id);
3983
3984 DP(NETIF_MSG_HW, "Init FW SB %d\n", fw_sb_id);
3985
3986 /* write indecies to HW */
3987 bnx2x_wr_fp_sb_data(bp, fw_sb_id, sb_data_p, data_size);
3988}
3989
3990static void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u16 fw_sb_id,
3991 u8 sb_index, u8 disable, u16 usec)
3992{
3993 int port = BP_PORT(bp);
3994 u8 ticks = usec / BNX2X_BTR;
3995
3996 storm_memset_hc_timeout(bp, port, fw_sb_id, sb_index, ticks);
3997
3998 disable = disable ? 1 : (usec ? 0 : 1);
3999 storm_memset_hc_disable(bp, port, fw_sb_id, sb_index, disable);
2813} 4000}
2814 4001
2815static void bnx2x_init_def_sb(struct bnx2x *bp, 4002static void bnx2x_update_coalesce_sb(struct bnx2x *bp, u16 fw_sb_id,
2816 struct host_def_status_block *def_sb, 4003 u16 tx_usec, u16 rx_usec)
2817 dma_addr_t mapping, int sb_id)
2818{ 4004{
4005 bnx2x_update_coalesce_sb_index(bp, fw_sb_id, U_SB_ETH_RX_CQ_INDEX,
4006 false, rx_usec);
4007 bnx2x_update_coalesce_sb_index(bp, fw_sb_id, C_SB_ETH_TX_CQ_INDEX,
4008 false, tx_usec);
4009}
4010
4011static void bnx2x_init_def_sb(struct bnx2x *bp)
4012{
4013 struct host_sp_status_block *def_sb = bp->def_status_blk;
4014 dma_addr_t mapping = bp->def_status_blk_mapping;
4015 int igu_sp_sb_index;
4016 int igu_seg_id;
2819 int port = BP_PORT(bp); 4017 int port = BP_PORT(bp);
2820 int func = BP_FUNC(bp); 4018 int func = BP_FUNC(bp);
2821 int index, val, reg_offset; 4019 int reg_offset;
2822 u64 section; 4020 u64 section;
4021 int index;
4022 struct hc_sp_status_block_data sp_sb_data;
4023 memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data));
4024
4025 if (CHIP_INT_MODE_IS_BC(bp)) {
4026 igu_sp_sb_index = DEF_SB_IGU_ID;
4027 igu_seg_id = HC_SEG_ACCESS_DEF;
4028 } else {
4029 igu_sp_sb_index = bp->igu_dsb_id;
4030 igu_seg_id = IGU_SEG_ACCESS_DEF;
4031 }
2823 4032
2824 /* ATTN */ 4033 /* ATTN */
2825 section = ((u64)mapping) + offsetof(struct host_def_status_block, 4034 section = ((u64)mapping) + offsetof(struct host_sp_status_block,
2826 atten_status_block); 4035 atten_status_block);
2827 def_sb->atten_status_block.status_block_id = sb_id; 4036 def_sb->atten_status_block.status_block_id = igu_sp_sb_index;
2828 4037
2829 bp->attn_state = 0; 4038 bp->attn_state = 0;
2830 4039
2831 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 : 4040 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2832 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0); 4041 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
2833
2834 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) { 4042 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
2835 bp->attn_group[index].sig[0] = REG_RD(bp, 4043 int sindex;
2836 reg_offset + 0x10*index); 4044 /* take care of sig[0]..sig[4] */
2837 bp->attn_group[index].sig[1] = REG_RD(bp, 4045 for (sindex = 0; sindex < 4; sindex++)
2838 reg_offset + 0x4 + 0x10*index); 4046 bp->attn_group[index].sig[sindex] =
2839 bp->attn_group[index].sig[2] = REG_RD(bp, 4047 REG_RD(bp, reg_offset + sindex*0x4 + 0x10*index);
2840 reg_offset + 0x8 + 0x10*index); 4048
2841 bp->attn_group[index].sig[3] = REG_RD(bp, 4049 if (CHIP_IS_E2(bp))
2842 reg_offset + 0xc + 0x10*index); 4050 /*
4051 * enable5 is separate from the rest of the registers,
4052 * and therefore the address skip is 4
4053 * and not 16 between the different groups
4054 */
4055 bp->attn_group[index].sig[4] = REG_RD(bp,
4056 reg_offset + 0x10 + 0x4*index);
4057 else
4058 bp->attn_group[index].sig[4] = 0;
2843 } 4059 }
2844 4060
2845 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L : 4061 if (bp->common.int_block == INT_BLOCK_HC) {
2846 HC_REG_ATTN_MSG0_ADDR_L); 4062 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
2847 4063 HC_REG_ATTN_MSG0_ADDR_L);
2848 REG_WR(bp, reg_offset, U64_LO(section));
2849 REG_WR(bp, reg_offset + 4, U64_HI(section));
2850
2851 reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
2852 4064
2853 val = REG_RD(bp, reg_offset); 4065 REG_WR(bp, reg_offset, U64_LO(section));
2854 val |= sb_id; 4066 REG_WR(bp, reg_offset + 4, U64_HI(section));
2855 REG_WR(bp, reg_offset, val); 4067 } else if (CHIP_IS_E2(bp)) {
4068 REG_WR(bp, IGU_REG_ATTN_MSG_ADDR_L, U64_LO(section));
4069 REG_WR(bp, IGU_REG_ATTN_MSG_ADDR_H, U64_HI(section));
4070 }
2856 4071
2857 /* USTORM */ 4072 section = ((u64)mapping) + offsetof(struct host_sp_status_block,
2858 section = ((u64)mapping) + offsetof(struct host_def_status_block, 4073 sp_sb);
2859 u_def_status_block);
2860 def_sb->u_def_status_block.status_block_id = sb_id;
2861
2862 REG_WR(bp, BAR_CSTRORM_INTMEM +
2863 CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func), U64_LO(section));
2864 REG_WR(bp, BAR_CSTRORM_INTMEM +
2865 ((CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func)) + 4),
2866 U64_HI(section));
2867 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_USB_FUNC_OFF +
2868 CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), func);
2869
2870 for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
2871 REG_WR16(bp, BAR_CSTRORM_INTMEM +
2872 CSTORM_DEF_SB_HC_DISABLE_U_OFFSET(func, index), 1);
2873 4074
2874 /* CSTORM */ 4075 bnx2x_zero_sp_sb(bp);
2875 section = ((u64)mapping) + offsetof(struct host_def_status_block,
2876 c_def_status_block);
2877 def_sb->c_def_status_block.status_block_id = sb_id;
2878
2879 REG_WR(bp, BAR_CSTRORM_INTMEM +
2880 CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func), U64_LO(section));
2881 REG_WR(bp, BAR_CSTRORM_INTMEM +
2882 ((CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func)) + 4),
2883 U64_HI(section));
2884 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
2885 CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), func);
2886
2887 for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
2888 REG_WR16(bp, BAR_CSTRORM_INTMEM +
2889 CSTORM_DEF_SB_HC_DISABLE_C_OFFSET(func, index), 1);
2890 4076
2891 /* TSTORM */ 4077 sp_sb_data.host_sb_addr.lo = U64_LO(section);
2892 section = ((u64)mapping) + offsetof(struct host_def_status_block, 4078 sp_sb_data.host_sb_addr.hi = U64_HI(section);
2893 t_def_status_block); 4079 sp_sb_data.igu_sb_id = igu_sp_sb_index;
2894 def_sb->t_def_status_block.status_block_id = sb_id; 4080 sp_sb_data.igu_seg_id = igu_seg_id;
2895 4081 sp_sb_data.p_func.pf_id = func;
2896 REG_WR(bp, BAR_TSTRORM_INTMEM + 4082 sp_sb_data.p_func.vnic_id = BP_VN(bp);
2897 TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section)); 4083 sp_sb_data.p_func.vf_id = 0xff;
2898 REG_WR(bp, BAR_TSTRORM_INTMEM +
2899 ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
2900 U64_HI(section));
2901 REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
2902 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
2903
2904 for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
2905 REG_WR16(bp, BAR_TSTRORM_INTMEM +
2906 TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
2907 4084
2908 /* XSTORM */ 4085 bnx2x_wr_sp_sb_data(bp, &sp_sb_data);
2909 section = ((u64)mapping) + offsetof(struct host_def_status_block,
2910 x_def_status_block);
2911 def_sb->x_def_status_block.status_block_id = sb_id;
2912
2913 REG_WR(bp, BAR_XSTRORM_INTMEM +
2914 XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
2915 REG_WR(bp, BAR_XSTRORM_INTMEM +
2916 ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
2917 U64_HI(section));
2918 REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
2919 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
2920
2921 for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
2922 REG_WR16(bp, BAR_XSTRORM_INTMEM +
2923 XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
2924 4086
2925 bp->stats_pending = 0; 4087 bp->stats_pending = 0;
2926 bp->set_mac_pending = 0; 4088 bp->set_mac_pending = 0;
2927 4089
2928 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0); 4090 bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, 0, IGU_INT_ENABLE, 0);
2929} 4091}
2930 4092
2931void bnx2x_update_coalesce(struct bnx2x *bp) 4093void bnx2x_update_coalesce(struct bnx2x *bp)
2932{ 4094{
2933 int port = BP_PORT(bp);
2934 int i; 4095 int i;
2935 4096
2936 for_each_queue(bp, i) { 4097 for_each_queue(bp, i)
2937 int sb_id = bp->fp[i].sb_id; 4098 bnx2x_update_coalesce_sb(bp, bp->fp[i].fw_sb_id,
2938 4099 bp->rx_ticks, bp->tx_ticks);
2939 /* HC_INDEX_U_ETH_RX_CQ_CONS */
2940 REG_WR8(bp, BAR_CSTRORM_INTMEM +
2941 CSTORM_SB_HC_TIMEOUT_U_OFFSET(port, sb_id,
2942 U_SB_ETH_RX_CQ_INDEX),
2943 bp->rx_ticks/(4 * BNX2X_BTR));
2944 REG_WR16(bp, BAR_CSTRORM_INTMEM +
2945 CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id,
2946 U_SB_ETH_RX_CQ_INDEX),
2947 (bp->rx_ticks/(4 * BNX2X_BTR)) ? 0 : 1);
2948
2949 /* HC_INDEX_C_ETH_TX_CQ_CONS */
2950 REG_WR8(bp, BAR_CSTRORM_INTMEM +
2951 CSTORM_SB_HC_TIMEOUT_C_OFFSET(port, sb_id,
2952 C_SB_ETH_TX_CQ_INDEX),
2953 bp->tx_ticks/(4 * BNX2X_BTR));
2954 REG_WR16(bp, BAR_CSTRORM_INTMEM +
2955 CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id,
2956 C_SB_ETH_TX_CQ_INDEX),
2957 (bp->tx_ticks/(4 * BNX2X_BTR)) ? 0 : 1);
2958 }
2959} 4100}
2960 4101
2961static void bnx2x_init_sp_ring(struct bnx2x *bp) 4102static void bnx2x_init_sp_ring(struct bnx2x *bp)
2962{ 4103{
2963 int func = BP_FUNC(bp);
2964
2965 spin_lock_init(&bp->spq_lock); 4104 spin_lock_init(&bp->spq_lock);
4105 atomic_set(&bp->spq_left, MAX_SPQ_PENDING);
2966 4106
2967 bp->spq_left = MAX_SPQ_PENDING;
2968 bp->spq_prod_idx = 0; 4107 bp->spq_prod_idx = 0;
2969 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX; 4108 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
2970 bp->spq_prod_bd = bp->spq; 4109 bp->spq_prod_bd = bp->spq;
2971 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT; 4110 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
2972
2973 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
2974 U64_LO(bp->spq_mapping));
2975 REG_WR(bp,
2976 XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
2977 U64_HI(bp->spq_mapping));
2978
2979 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
2980 bp->spq_prod_idx);
2981} 4111}
2982 4112
2983static void bnx2x_init_context(struct bnx2x *bp) 4113static void bnx2x_init_eq_ring(struct bnx2x *bp)
2984{ 4114{
2985 int i; 4115 int i;
4116 for (i = 1; i <= NUM_EQ_PAGES; i++) {
4117 union event_ring_elem *elem =
4118 &bp->eq_ring[EQ_DESC_CNT_PAGE * i - 1];
2986 4119
2987 /* Rx */ 4120 elem->next_page.addr.hi =
2988 for_each_queue(bp, i) { 4121 cpu_to_le32(U64_HI(bp->eq_mapping +
2989 struct eth_context *context = bnx2x_sp(bp, context[i].eth); 4122 BCM_PAGE_SIZE * (i % NUM_EQ_PAGES)));
2990 struct bnx2x_fastpath *fp = &bp->fp[i]; 4123 elem->next_page.addr.lo =
2991 u8 cl_id = fp->cl_id; 4124 cpu_to_le32(U64_LO(bp->eq_mapping +
2992 4125 BCM_PAGE_SIZE*(i % NUM_EQ_PAGES)));
2993 context->ustorm_st_context.common.sb_index_numbers =
2994 BNX2X_RX_SB_INDEX_NUM;
2995 context->ustorm_st_context.common.clientId = cl_id;
2996 context->ustorm_st_context.common.status_block_id = fp->sb_id;
2997 context->ustorm_st_context.common.flags =
2998 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT |
2999 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS);
3000 context->ustorm_st_context.common.statistics_counter_id =
3001 cl_id;
3002 context->ustorm_st_context.common.mc_alignment_log_size =
3003 BNX2X_RX_ALIGN_SHIFT;
3004 context->ustorm_st_context.common.bd_buff_size =
3005 bp->rx_buf_size;
3006 context->ustorm_st_context.common.bd_page_base_hi =
3007 U64_HI(fp->rx_desc_mapping);
3008 context->ustorm_st_context.common.bd_page_base_lo =
3009 U64_LO(fp->rx_desc_mapping);
3010 if (!fp->disable_tpa) {
3011 context->ustorm_st_context.common.flags |=
3012 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA;
3013 context->ustorm_st_context.common.sge_buff_size =
3014 (u16)min_t(u32, SGE_PAGE_SIZE*PAGES_PER_SGE,
3015 0xffff);
3016 context->ustorm_st_context.common.sge_page_base_hi =
3017 U64_HI(fp->rx_sge_mapping);
3018 context->ustorm_st_context.common.sge_page_base_lo =
3019 U64_LO(fp->rx_sge_mapping);
3020
3021 context->ustorm_st_context.common.max_sges_for_packet =
3022 SGE_PAGE_ALIGN(bp->dev->mtu) >> SGE_PAGE_SHIFT;
3023 context->ustorm_st_context.common.max_sges_for_packet =
3024 ((context->ustorm_st_context.common.
3025 max_sges_for_packet + PAGES_PER_SGE - 1) &
3026 (~(PAGES_PER_SGE - 1))) >> PAGES_PER_SGE_SHIFT;
3027 }
3028
3029 context->ustorm_ag_context.cdu_usage =
3030 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
3031 CDU_REGION_NUMBER_UCM_AG,
3032 ETH_CONNECTION_TYPE);
3033
3034 context->xstorm_ag_context.cdu_reserved =
3035 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
3036 CDU_REGION_NUMBER_XCM_AG,
3037 ETH_CONNECTION_TYPE);
3038 }
3039
3040 /* Tx */
3041 for_each_queue(bp, i) {
3042 struct bnx2x_fastpath *fp = &bp->fp[i];
3043 struct eth_context *context =
3044 bnx2x_sp(bp, context[i].eth);
3045
3046 context->cstorm_st_context.sb_index_number =
3047 C_SB_ETH_TX_CQ_INDEX;
3048 context->cstorm_st_context.status_block_id = fp->sb_id;
3049
3050 context->xstorm_st_context.tx_bd_page_base_hi =
3051 U64_HI(fp->tx_desc_mapping);
3052 context->xstorm_st_context.tx_bd_page_base_lo =
3053 U64_LO(fp->tx_desc_mapping);
3054 context->xstorm_st_context.statistics_data = (fp->cl_id |
3055 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
3056 } 4126 }
4127 bp->eq_cons = 0;
4128 bp->eq_prod = NUM_EQ_DESC;
4129 bp->eq_cons_sb = BNX2X_EQ_INDEX;
3057} 4130}
3058 4131
3059static void bnx2x_init_ind_table(struct bnx2x *bp) 4132static void bnx2x_init_ind_table(struct bnx2x *bp)
@@ -3072,47 +4145,11 @@ static void bnx2x_init_ind_table(struct bnx2x *bp)
3072 bp->fp->cl_id + (i % bp->num_queues)); 4145 bp->fp->cl_id + (i % bp->num_queues));
3073} 4146}
3074 4147
3075void bnx2x_set_client_config(struct bnx2x *bp)
3076{
3077 struct tstorm_eth_client_config tstorm_client = {0};
3078 int port = BP_PORT(bp);
3079 int i;
3080
3081 tstorm_client.mtu = bp->dev->mtu;
3082 tstorm_client.config_flags =
3083 (TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE |
3084 TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE);
3085#ifdef BCM_VLAN
3086 if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) {
3087 tstorm_client.config_flags |=
3088 TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE;
3089 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
3090 }
3091#endif
3092
3093 for_each_queue(bp, i) {
3094 tstorm_client.statistics_counter_id = bp->fp[i].cl_id;
3095
3096 REG_WR(bp, BAR_TSTRORM_INTMEM +
3097 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
3098 ((u32 *)&tstorm_client)[0]);
3099 REG_WR(bp, BAR_TSTRORM_INTMEM +
3100 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
3101 ((u32 *)&tstorm_client)[1]);
3102 }
3103
3104 DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
3105 ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
3106}
3107
3108void bnx2x_set_storm_rx_mode(struct bnx2x *bp) 4148void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
3109{ 4149{
3110 struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
3111 int mode = bp->rx_mode; 4150 int mode = bp->rx_mode;
3112 int mask = bp->rx_mode_cl_mask; 4151 u16 cl_id;
3113 int func = BP_FUNC(bp); 4152
3114 int port = BP_PORT(bp);
3115 int i;
3116 /* All but management unicast packets should pass to the host as well */ 4153 /* All but management unicast packets should pass to the host as well */
3117 u32 llh_mask = 4154 u32 llh_mask =
3118 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST | 4155 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST |
@@ -3120,28 +4157,32 @@ void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
3120 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN | 4157 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN |
3121 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN; 4158 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN;
3122 4159
3123 DP(NETIF_MSG_IFUP, "rx mode %d mask 0x%x\n", mode, mask);
3124
3125 switch (mode) { 4160 switch (mode) {
3126 case BNX2X_RX_MODE_NONE: /* no Rx */ 4161 case BNX2X_RX_MODE_NONE: /* no Rx */
3127 tstorm_mac_filter.ucast_drop_all = mask; 4162 cl_id = BP_L_ID(bp);
3128 tstorm_mac_filter.mcast_drop_all = mask; 4163 bnx2x_rxq_set_mac_filters(bp, cl_id, BNX2X_ACCEPT_NONE);
3129 tstorm_mac_filter.bcast_drop_all = mask;
3130 break; 4164 break;
3131 4165
3132 case BNX2X_RX_MODE_NORMAL: 4166 case BNX2X_RX_MODE_NORMAL:
3133 tstorm_mac_filter.bcast_accept_all = mask; 4167 cl_id = BP_L_ID(bp);
4168 bnx2x_rxq_set_mac_filters(bp, cl_id,
4169 BNX2X_ACCEPT_UNICAST |
4170 BNX2X_ACCEPT_BROADCAST |
4171 BNX2X_ACCEPT_MULTICAST);
3134 break; 4172 break;
3135 4173
3136 case BNX2X_RX_MODE_ALLMULTI: 4174 case BNX2X_RX_MODE_ALLMULTI:
3137 tstorm_mac_filter.mcast_accept_all = mask; 4175 cl_id = BP_L_ID(bp);
3138 tstorm_mac_filter.bcast_accept_all = mask; 4176 bnx2x_rxq_set_mac_filters(bp, cl_id,
4177 BNX2X_ACCEPT_UNICAST |
4178 BNX2X_ACCEPT_BROADCAST |
4179 BNX2X_ACCEPT_ALL_MULTICAST);
3139 break; 4180 break;
3140 4181
3141 case BNX2X_RX_MODE_PROMISC: 4182 case BNX2X_RX_MODE_PROMISC:
3142 tstorm_mac_filter.ucast_accept_all = mask; 4183 cl_id = BP_L_ID(bp);
3143 tstorm_mac_filter.mcast_accept_all = mask; 4184 bnx2x_rxq_set_mac_filters(bp, cl_id, BNX2X_PROMISCUOUS_MODE);
3144 tstorm_mac_filter.bcast_accept_all = mask; 4185
3145 /* pass management unicast packets as well */ 4186 /* pass management unicast packets as well */
3146 llh_mask |= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST; 4187 llh_mask |= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST;
3147 break; 4188 break;
@@ -3152,262 +4193,64 @@ void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
3152 } 4193 }
3153 4194
3154 REG_WR(bp, 4195 REG_WR(bp,
3155 (port ? NIG_REG_LLH1_BRB1_DRV_MASK : NIG_REG_LLH0_BRB1_DRV_MASK), 4196 BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
4197 NIG_REG_LLH0_BRB1_DRV_MASK,
3156 llh_mask); 4198 llh_mask);
3157 4199
3158 for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) { 4200 DP(NETIF_MSG_IFUP, "rx mode %d\n"
3159 REG_WR(bp, BAR_TSTRORM_INTMEM + 4201 "drop_ucast 0x%x\ndrop_mcast 0x%x\ndrop_bcast 0x%x\n"
3160 TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4, 4202 "accp_ucast 0x%x\naccp_mcast 0x%x\naccp_bcast 0x%x\n", mode,
3161 ((u32 *)&tstorm_mac_filter)[i]); 4203 bp->mac_filters.ucast_drop_all,
4204 bp->mac_filters.mcast_drop_all,
4205 bp->mac_filters.bcast_drop_all,
4206 bp->mac_filters.ucast_accept_all,
4207 bp->mac_filters.mcast_accept_all,
4208 bp->mac_filters.bcast_accept_all
4209 );
3162 4210
3163/* DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i, 4211 storm_memset_mac_filters(bp, &bp->mac_filters, BP_FUNC(bp));
3164 ((u32 *)&tstorm_mac_filter)[i]); */
3165 }
3166
3167 if (mode != BNX2X_RX_MODE_NONE)
3168 bnx2x_set_client_config(bp);
3169} 4212}
3170 4213
3171static void bnx2x_init_internal_common(struct bnx2x *bp) 4214static void bnx2x_init_internal_common(struct bnx2x *bp)
3172{ 4215{
3173 int i; 4216 int i;
3174 4217
3175 /* Zero this manually as its initialization is 4218 if (!CHIP_IS_E1(bp)) {
3176 currently missing in the initTool */
3177 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
3178 REG_WR(bp, BAR_USTRORM_INTMEM +
3179 USTORM_AGG_DATA_OFFSET + i * 4, 0);
3180}
3181
3182static void bnx2x_init_internal_port(struct bnx2x *bp)
3183{
3184 int port = BP_PORT(bp);
3185
3186 REG_WR(bp,
3187 BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_U_OFFSET(port), BNX2X_BTR);
3188 REG_WR(bp,
3189 BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_C_OFFSET(port), BNX2X_BTR);
3190 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
3191 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
3192}
3193
3194static void bnx2x_init_internal_func(struct bnx2x *bp)
3195{
3196 struct tstorm_eth_function_common_config tstorm_config = {0};
3197 struct stats_indication_flags stats_flags = {0};
3198 int port = BP_PORT(bp);
3199 int func = BP_FUNC(bp);
3200 int i, j;
3201 u32 offset;
3202 u16 max_agg_size;
3203
3204 tstorm_config.config_flags = RSS_FLAGS(bp);
3205
3206 if (is_multi(bp))
3207 tstorm_config.rss_result_mask = MULTI_MASK;
3208
3209 /* Enable TPA if needed */
3210 if (bp->flags & TPA_ENABLE_FLAG)
3211 tstorm_config.config_flags |=
3212 TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA;
3213
3214 if (IS_E1HMF(bp))
3215 tstorm_config.config_flags |=
3216 TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM;
3217
3218 tstorm_config.leading_client_id = BP_L_ID(bp);
3219
3220 REG_WR(bp, BAR_TSTRORM_INTMEM +
3221 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
3222 (*(u32 *)&tstorm_config));
3223
3224 bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
3225 bp->rx_mode_cl_mask = (1 << BP_L_ID(bp));
3226 bnx2x_set_storm_rx_mode(bp);
3227 4219
3228 for_each_queue(bp, i) { 4220 /* xstorm needs to know whether to add ovlan to packets or not,
3229 u8 cl_id = bp->fp[i].cl_id; 4221 * in switch-independent we'll write 0 to here... */
3230
3231 /* reset xstorm per client statistics */
3232 offset = BAR_XSTRORM_INTMEM +
3233 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
3234 for (j = 0;
3235 j < sizeof(struct xstorm_per_client_stats) / 4; j++)
3236 REG_WR(bp, offset + j*4, 0);
3237
3238 /* reset tstorm per client statistics */
3239 offset = BAR_TSTRORM_INTMEM +
3240 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
3241 for (j = 0;
3242 j < sizeof(struct tstorm_per_client_stats) / 4; j++)
3243 REG_WR(bp, offset + j*4, 0);
3244
3245 /* reset ustorm per client statistics */
3246 offset = BAR_USTRORM_INTMEM +
3247 USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
3248 for (j = 0;
3249 j < sizeof(struct ustorm_per_client_stats) / 4; j++)
3250 REG_WR(bp, offset + j*4, 0);
3251 }
3252
3253 /* Init statistics related context */
3254 stats_flags.collect_eth = 1;
3255
3256 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
3257 ((u32 *)&stats_flags)[0]);
3258 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
3259 ((u32 *)&stats_flags)[1]);
3260
3261 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
3262 ((u32 *)&stats_flags)[0]);
3263 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
3264 ((u32 *)&stats_flags)[1]);
3265
3266 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func),
3267 ((u32 *)&stats_flags)[0]);
3268 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func) + 4,
3269 ((u32 *)&stats_flags)[1]);
3270
3271 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
3272 ((u32 *)&stats_flags)[0]);
3273 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
3274 ((u32 *)&stats_flags)[1]);
3275
3276 REG_WR(bp, BAR_XSTRORM_INTMEM +
3277 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
3278 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
3279 REG_WR(bp, BAR_XSTRORM_INTMEM +
3280 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
3281 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
3282
3283 REG_WR(bp, BAR_TSTRORM_INTMEM +
3284 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
3285 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
3286 REG_WR(bp, BAR_TSTRORM_INTMEM +
3287 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
3288 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
3289
3290 REG_WR(bp, BAR_USTRORM_INTMEM +
3291 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
3292 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
3293 REG_WR(bp, BAR_USTRORM_INTMEM +
3294 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
3295 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
3296
3297 if (CHIP_IS_E1H(bp)) {
3298 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET, 4222 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
3299 IS_E1HMF(bp)); 4223 bp->mf_mode);
3300 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET, 4224 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
3301 IS_E1HMF(bp)); 4225 bp->mf_mode);
3302 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET, 4226 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
3303 IS_E1HMF(bp)); 4227 bp->mf_mode);
3304 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET, 4228 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
3305 IS_E1HMF(bp)); 4229 bp->mf_mode);
3306
3307 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
3308 bp->e1hov);
3309 } 4230 }
3310 4231
3311 /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */ 4232 /* Zero this manually as its initialization is
3312 max_agg_size = min_t(u32, (min_t(u32, 8, MAX_SKB_FRAGS) * 4233 currently missing in the initTool */
3313 SGE_PAGE_SIZE * PAGES_PER_SGE), 0xffff); 4234 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
3314 for_each_queue(bp, i) {
3315 struct bnx2x_fastpath *fp = &bp->fp[i];
3316
3317 REG_WR(bp, BAR_USTRORM_INTMEM +
3318 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id),
3319 U64_LO(fp->rx_comp_mapping));
3320 REG_WR(bp, BAR_USTRORM_INTMEM +
3321 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id) + 4,
3322 U64_HI(fp->rx_comp_mapping));
3323
3324 /* Next page */
3325 REG_WR(bp, BAR_USTRORM_INTMEM +
3326 USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id),
3327 U64_LO(fp->rx_comp_mapping + BCM_PAGE_SIZE));
3328 REG_WR(bp, BAR_USTRORM_INTMEM + 4235 REG_WR(bp, BAR_USTRORM_INTMEM +
3329 USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id) + 4, 4236 USTORM_AGG_DATA_OFFSET + i * 4, 0);
3330 U64_HI(fp->rx_comp_mapping + BCM_PAGE_SIZE)); 4237 if (CHIP_IS_E2(bp)) {
3331 4238 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_IGU_MODE_OFFSET,
3332 REG_WR16(bp, BAR_USTRORM_INTMEM + 4239 CHIP_INT_MODE_IS_BC(bp) ?
3333 USTORM_MAX_AGG_SIZE_OFFSET(port, fp->cl_id), 4240 HC_IGU_BC_MODE : HC_IGU_NBC_MODE);
3334 max_agg_size);
3335 }
3336
3337 /* dropless flow control */
3338 if (CHIP_IS_E1H(bp)) {
3339 struct ustorm_eth_rx_pause_data_e1h rx_pause = {0};
3340
3341 rx_pause.bd_thr_low = 250;
3342 rx_pause.cqe_thr_low = 250;
3343 rx_pause.cos = 1;
3344 rx_pause.sge_thr_low = 0;
3345 rx_pause.bd_thr_high = 350;
3346 rx_pause.cqe_thr_high = 350;
3347 rx_pause.sge_thr_high = 0;
3348
3349 for_each_queue(bp, i) {
3350 struct bnx2x_fastpath *fp = &bp->fp[i];
3351
3352 if (!fp->disable_tpa) {
3353 rx_pause.sge_thr_low = 150;
3354 rx_pause.sge_thr_high = 250;
3355 }
3356
3357
3358 offset = BAR_USTRORM_INTMEM +
3359 USTORM_ETH_RING_PAUSE_DATA_OFFSET(port,
3360 fp->cl_id);
3361 for (j = 0;
3362 j < sizeof(struct ustorm_eth_rx_pause_data_e1h)/4;
3363 j++)
3364 REG_WR(bp, offset + j*4,
3365 ((u32 *)&rx_pause)[j]);
3366 }
3367 }
3368
3369 memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
3370
3371 /* Init rate shaping and fairness contexts */
3372 if (IS_E1HMF(bp)) {
3373 int vn;
3374
3375 /* During init there is no active link
3376 Until link is up, set link rate to 10Gbps */
3377 bp->link_vars.line_speed = SPEED_10000;
3378 bnx2x_init_port_minmax(bp);
3379
3380 if (!BP_NOMCP(bp))
3381 bp->mf_config =
3382 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
3383 bnx2x_calc_vn_weight_sum(bp);
3384
3385 for (vn = VN_0; vn < E1HVN_MAX; vn++)
3386 bnx2x_init_vn_minmax(bp, 2*vn + port);
3387
3388 /* Enable rate shaping and fairness */
3389 bp->cmng.flags.cmng_enables |=
3390 CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
3391
3392 } else {
3393 /* rate shaping and fairness are disabled */
3394 DP(NETIF_MSG_IFUP,
3395 "single function mode minmax will be disabled\n");
3396 } 4241 }
4242}
3397 4243
3398 4244static void bnx2x_init_internal_port(struct bnx2x *bp)
3399 /* Store cmng structures to internal memory */ 4245{
3400 if (bp->port.pmf) 4246 /* port */
3401 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
3402 REG_WR(bp, BAR_XSTRORM_INTMEM +
3403 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
3404 ((u32 *)(&bp->cmng))[i]);
3405} 4247}
3406 4248
3407static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code) 4249static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
3408{ 4250{
3409 switch (load_code) { 4251 switch (load_code) {
3410 case FW_MSG_CODE_DRV_LOAD_COMMON: 4252 case FW_MSG_CODE_DRV_LOAD_COMMON:
4253 case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP:
3411 bnx2x_init_internal_common(bp); 4254 bnx2x_init_internal_common(bp);
3412 /* no break */ 4255 /* no break */
3413 4256
@@ -3416,7 +4259,8 @@ static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
3416 /* no break */ 4259 /* no break */
3417 4260
3418 case FW_MSG_CODE_DRV_LOAD_FUNCTION: 4261 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
3419 bnx2x_init_internal_func(bp); 4262 /* internal memory per function is
4263 initialized inside bnx2x_pf_init */
3420 break; 4264 break;
3421 4265
3422 default: 4266 default:
@@ -3425,43 +4269,63 @@ static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
3425 } 4269 }
3426} 4270}
3427 4271
4272static void bnx2x_init_fp_sb(struct bnx2x *bp, int fp_idx)
4273{
4274 struct bnx2x_fastpath *fp = &bp->fp[fp_idx];
4275
4276 fp->state = BNX2X_FP_STATE_CLOSED;
4277
4278 fp->index = fp->cid = fp_idx;
4279 fp->cl_id = BP_L_ID(bp) + fp_idx;
4280 fp->fw_sb_id = bp->base_fw_ndsb + fp->cl_id + CNIC_CONTEXT_USE;
4281 fp->igu_sb_id = bp->igu_base_sb + fp_idx + CNIC_CONTEXT_USE;
4282 /* qZone id equals to FW (per path) client id */
4283 fp->cl_qzone_id = fp->cl_id +
4284 BP_PORT(bp)*(CHIP_IS_E2(bp) ? ETH_MAX_RX_CLIENTS_E2 :
4285 ETH_MAX_RX_CLIENTS_E1H);
4286 /* init shortcut */
4287 fp->ustorm_rx_prods_offset = CHIP_IS_E2(bp) ?
4288 USTORM_RX_PRODS_E2_OFFSET(fp->cl_qzone_id) :
4289 USTORM_RX_PRODS_E1X_OFFSET(BP_PORT(bp), fp->cl_id);
4290 /* Setup SB indicies */
4291 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
4292 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
4293
4294 DP(NETIF_MSG_IFUP, "queue[%d]: bnx2x_init_sb(%p,%p) "
4295 "cl_id %d fw_sb %d igu_sb %d\n",
4296 fp_idx, bp, fp->status_blk.e1x_sb, fp->cl_id, fp->fw_sb_id,
4297 fp->igu_sb_id);
4298 bnx2x_init_sb(bp, fp->status_blk_mapping, BNX2X_VF_ID_INVALID, false,
4299 fp->fw_sb_id, fp->igu_sb_id);
4300
4301 bnx2x_update_fpsb_idx(fp);
4302}
4303
3428void bnx2x_nic_init(struct bnx2x *bp, u32 load_code) 4304void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
3429{ 4305{
3430 int i; 4306 int i;
3431 4307
3432 for_each_queue(bp, i) { 4308 for_each_queue(bp, i)
3433 struct bnx2x_fastpath *fp = &bp->fp[i]; 4309 bnx2x_init_fp_sb(bp, i);
3434
3435 fp->bp = bp;
3436 fp->state = BNX2X_FP_STATE_CLOSED;
3437 fp->index = i;
3438 fp->cl_id = BP_L_ID(bp) + i;
3439#ifdef BCM_CNIC 4310#ifdef BCM_CNIC
3440 fp->sb_id = fp->cl_id + 1; 4311
3441#else 4312 bnx2x_init_sb(bp, bp->cnic_sb_mapping,
3442 fp->sb_id = fp->cl_id; 4313 BNX2X_VF_ID_INVALID, false,
4314 CNIC_SB_ID(bp), CNIC_IGU_SB_ID(bp));
4315
3443#endif 4316#endif
3444 DP(NETIF_MSG_IFUP,
3445 "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d sb %d\n",
3446 i, bp, fp->status_blk, fp->cl_id, fp->sb_id);
3447 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
3448 fp->sb_id);
3449 bnx2x_update_fpsb_idx(fp);
3450 }
3451 4317
3452 /* ensure status block indices were read */ 4318 /* ensure status block indices were read */
3453 rmb(); 4319 rmb();
3454 4320
3455 4321 bnx2x_init_def_sb(bp);
3456 bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
3457 DEF_SB_ID);
3458 bnx2x_update_dsb_idx(bp); 4322 bnx2x_update_dsb_idx(bp);
3459 bnx2x_update_coalesce(bp);
3460 bnx2x_init_rx_rings(bp); 4323 bnx2x_init_rx_rings(bp);
3461 bnx2x_init_tx_ring(bp); 4324 bnx2x_init_tx_rings(bp);
3462 bnx2x_init_sp_ring(bp); 4325 bnx2x_init_sp_ring(bp);
3463 bnx2x_init_context(bp); 4326 bnx2x_init_eq_ring(bp);
3464 bnx2x_init_internal(bp, load_code); 4327 bnx2x_init_internal(bp, load_code);
4328 bnx2x_pf_init(bp);
3465 bnx2x_init_ind_table(bp); 4329 bnx2x_init_ind_table(bp);
3466 bnx2x_stats_init(bp); 4330 bnx2x_stats_init(bp);
3467 4331
@@ -3522,7 +4386,6 @@ gunzip_nomem1:
3522static void bnx2x_gunzip_end(struct bnx2x *bp) 4386static void bnx2x_gunzip_end(struct bnx2x *bp)
3523{ 4387{
3524 kfree(bp->strm->workspace); 4388 kfree(bp->strm->workspace);
3525
3526 kfree(bp->strm); 4389 kfree(bp->strm);
3527 bp->strm = NULL; 4390 bp->strm = NULL;
3528 4391
@@ -3620,8 +4483,6 @@ static int bnx2x_int_mem_test(struct bnx2x *bp)
3620 else 4483 else
3621 factor = 1; 4484 factor = 1;
3622 4485
3623 DP(NETIF_MSG_HW, "start part1\n");
3624
3625 /* Disable inputs of parser neighbor blocks */ 4486 /* Disable inputs of parser neighbor blocks */
3626 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0); 4487 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
3627 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0); 4488 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
@@ -3758,9 +4619,19 @@ static int bnx2x_int_mem_test(struct bnx2x *bp)
3758static void enable_blocks_attention(struct bnx2x *bp) 4619static void enable_blocks_attention(struct bnx2x *bp)
3759{ 4620{
3760 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0); 4621 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
3761 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0); 4622 if (CHIP_IS_E2(bp))
4623 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0x40);
4624 else
4625 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
3762 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0); 4626 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
3763 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0); 4627 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
4628 /*
4629 * mask read length error interrupts in brb for parser
4630 * (parsing unit and 'checksum and crc' unit)
4631 * these errors are legal (PU reads fixed length and CAC can cause
4632 * read length error on truncated packets)
4633 */
4634 REG_WR(bp, BRB1_REG_BRB1_INT_MASK, 0xFC00);
3764 REG_WR(bp, QM_REG_QM_INT_MASK, 0); 4635 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
3765 REG_WR(bp, TM_REG_TM_INT_MASK, 0); 4636 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
3766 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0); 4637 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
@@ -3779,8 +4650,16 @@ static void enable_blocks_attention(struct bnx2x *bp)
3779 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0); 4650 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
3780/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */ 4651/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
3781/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */ 4652/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
4653
3782 if (CHIP_REV_IS_FPGA(bp)) 4654 if (CHIP_REV_IS_FPGA(bp))
3783 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000); 4655 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
4656 else if (CHIP_IS_E2(bp))
4657 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0,
4658 (PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_OF
4659 | PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_AFT
4660 | PXP2_PXP2_INT_MASK_0_REG_PGL_PCIE_ATTN
4661 | PXP2_PXP2_INT_MASK_0_REG_PGL_READ_BLOCKED
4662 | PXP2_PXP2_INT_MASK_0_REG_PGL_WRITE_BLOCKED));
3784 else 4663 else
3785 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000); 4664 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
3786 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0); 4665 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
@@ -3798,34 +4677,34 @@ static const struct {
3798 u32 addr; 4677 u32 addr;
3799 u32 mask; 4678 u32 mask;
3800} bnx2x_parity_mask[] = { 4679} bnx2x_parity_mask[] = {
3801 {PXP_REG_PXP_PRTY_MASK, 0xffffffff}, 4680 {PXP_REG_PXP_PRTY_MASK, 0x3ffffff},
3802 {PXP2_REG_PXP2_PRTY_MASK_0, 0xffffffff}, 4681 {PXP2_REG_PXP2_PRTY_MASK_0, 0xffffffff},
3803 {PXP2_REG_PXP2_PRTY_MASK_1, 0xffffffff}, 4682 {PXP2_REG_PXP2_PRTY_MASK_1, 0x7f},
3804 {HC_REG_HC_PRTY_MASK, 0xffffffff}, 4683 {HC_REG_HC_PRTY_MASK, 0x7},
3805 {MISC_REG_MISC_PRTY_MASK, 0xffffffff}, 4684 {MISC_REG_MISC_PRTY_MASK, 0x1},
3806 {QM_REG_QM_PRTY_MASK, 0x0}, 4685 {QM_REG_QM_PRTY_MASK, 0x0},
3807 {DORQ_REG_DORQ_PRTY_MASK, 0x0}, 4686 {DORQ_REG_DORQ_PRTY_MASK, 0x0},
3808 {GRCBASE_UPB + PB_REG_PB_PRTY_MASK, 0x0}, 4687 {GRCBASE_UPB + PB_REG_PB_PRTY_MASK, 0x0},
3809 {GRCBASE_XPB + PB_REG_PB_PRTY_MASK, 0x0}, 4688 {GRCBASE_XPB + PB_REG_PB_PRTY_MASK, 0x0},
3810 {SRC_REG_SRC_PRTY_MASK, 0x4}, /* bit 2 */ 4689 {SRC_REG_SRC_PRTY_MASK, 0x4}, /* bit 2 */
3811 {CDU_REG_CDU_PRTY_MASK, 0x0}, 4690 {CDU_REG_CDU_PRTY_MASK, 0x0},
3812 {CFC_REG_CFC_PRTY_MASK, 0x0}, 4691 {CFC_REG_CFC_PRTY_MASK, 0x0},
3813 {DBG_REG_DBG_PRTY_MASK, 0x0}, 4692 {DBG_REG_DBG_PRTY_MASK, 0x0},
3814 {DMAE_REG_DMAE_PRTY_MASK, 0x0}, 4693 {DMAE_REG_DMAE_PRTY_MASK, 0x0},
3815 {BRB1_REG_BRB1_PRTY_MASK, 0x0}, 4694 {BRB1_REG_BRB1_PRTY_MASK, 0x0},
3816 {PRS_REG_PRS_PRTY_MASK, (1<<6)},/* bit 6 */ 4695 {PRS_REG_PRS_PRTY_MASK, (1<<6)},/* bit 6 */
3817 {TSDM_REG_TSDM_PRTY_MASK, 0x18},/* bit 3,4 */ 4696 {TSDM_REG_TSDM_PRTY_MASK, 0x18}, /* bit 3,4 */
3818 {CSDM_REG_CSDM_PRTY_MASK, 0x8}, /* bit 3 */ 4697 {CSDM_REG_CSDM_PRTY_MASK, 0x8}, /* bit 3 */
3819 {USDM_REG_USDM_PRTY_MASK, 0x38},/* bit 3,4,5 */ 4698 {USDM_REG_USDM_PRTY_MASK, 0x38}, /* bit 3,4,5 */
3820 {XSDM_REG_XSDM_PRTY_MASK, 0x8}, /* bit 3 */ 4699 {XSDM_REG_XSDM_PRTY_MASK, 0x8}, /* bit 3 */
3821 {TSEM_REG_TSEM_PRTY_MASK_0, 0x0}, 4700 {TSEM_REG_TSEM_PRTY_MASK_0, 0x0},
3822 {TSEM_REG_TSEM_PRTY_MASK_1, 0x0}, 4701 {TSEM_REG_TSEM_PRTY_MASK_1, 0x0},
3823 {USEM_REG_USEM_PRTY_MASK_0, 0x0}, 4702 {USEM_REG_USEM_PRTY_MASK_0, 0x0},
3824 {USEM_REG_USEM_PRTY_MASK_1, 0x0}, 4703 {USEM_REG_USEM_PRTY_MASK_1, 0x0},
3825 {CSEM_REG_CSEM_PRTY_MASK_0, 0x0}, 4704 {CSEM_REG_CSEM_PRTY_MASK_0, 0x0},
3826 {CSEM_REG_CSEM_PRTY_MASK_1, 0x0}, 4705 {CSEM_REG_CSEM_PRTY_MASK_1, 0x0},
3827 {XSEM_REG_XSEM_PRTY_MASK_0, 0x0}, 4706 {XSEM_REG_XSEM_PRTY_MASK_0, 0x0},
3828 {XSEM_REG_XSEM_PRTY_MASK_1, 0x0} 4707 {XSEM_REG_XSEM_PRTY_MASK_1, 0x0}
3829}; 4708};
3830 4709
3831static void enable_blocks_parity(struct bnx2x *bp) 4710static void enable_blocks_parity(struct bnx2x *bp)
@@ -3917,26 +4796,97 @@ static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
3917 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val); 4796 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
3918} 4797}
3919 4798
3920static int bnx2x_init_common(struct bnx2x *bp) 4799static void bnx2x_pretend_func(struct bnx2x *bp, u8 pretend_func_num)
4800{
4801 u32 offset = 0;
4802
4803 if (CHIP_IS_E1(bp))
4804 return;
4805 if (CHIP_IS_E1H(bp) && (pretend_func_num >= E1H_FUNC_MAX))
4806 return;
4807
4808 switch (BP_ABS_FUNC(bp)) {
4809 case 0:
4810 offset = PXP2_REG_PGL_PRETEND_FUNC_F0;
4811 break;
4812 case 1:
4813 offset = PXP2_REG_PGL_PRETEND_FUNC_F1;
4814 break;
4815 case 2:
4816 offset = PXP2_REG_PGL_PRETEND_FUNC_F2;
4817 break;
4818 case 3:
4819 offset = PXP2_REG_PGL_PRETEND_FUNC_F3;
4820 break;
4821 case 4:
4822 offset = PXP2_REG_PGL_PRETEND_FUNC_F4;
4823 break;
4824 case 5:
4825 offset = PXP2_REG_PGL_PRETEND_FUNC_F5;
4826 break;
4827 case 6:
4828 offset = PXP2_REG_PGL_PRETEND_FUNC_F6;
4829 break;
4830 case 7:
4831 offset = PXP2_REG_PGL_PRETEND_FUNC_F7;
4832 break;
4833 default:
4834 return;
4835 }
4836
4837 REG_WR(bp, offset, pretend_func_num);
4838 REG_RD(bp, offset);
4839 DP(NETIF_MSG_HW, "Pretending to func %d\n", pretend_func_num);
4840}
4841
4842static void bnx2x_pf_disable(struct bnx2x *bp)
4843{
4844 u32 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
4845 val &= ~IGU_PF_CONF_FUNC_EN;
4846
4847 REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
4848 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0);
4849 REG_WR(bp, CFC_REG_WEAK_ENABLE_PF, 0);
4850}
4851
4852static int bnx2x_init_hw_common(struct bnx2x *bp, u32 load_code)
3921{ 4853{
3922 u32 val, i; 4854 u32 val, i;
3923#ifdef BCM_CNIC
3924 u32 wb_write[2];
3925#endif
3926 4855
3927 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_FUNC(bp)); 4856 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_ABS_FUNC(bp));
3928 4857
3929 bnx2x_reset_common(bp); 4858 bnx2x_reset_common(bp);
3930 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff); 4859 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
3931 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc); 4860 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
3932 4861
3933 bnx2x_init_block(bp, MISC_BLOCK, COMMON_STAGE); 4862 bnx2x_init_block(bp, MISC_BLOCK, COMMON_STAGE);
3934 if (CHIP_IS_E1H(bp)) 4863 if (!CHIP_IS_E1(bp))
3935 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp)); 4864 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_MF(bp));
4865
4866 if (CHIP_IS_E2(bp)) {
4867 u8 fid;
4868
4869 /**
4870 * 4-port mode or 2-port mode we need to turn of master-enable
4871 * for everyone, after that, turn it back on for self.
4872 * so, we disregard multi-function or not, and always disable
4873 * for all functions on the given path, this means 0,2,4,6 for
4874 * path 0 and 1,3,5,7 for path 1
4875 */
4876 for (fid = BP_PATH(bp); fid < E2_FUNC_MAX*2; fid += 2) {
4877 if (fid == BP_ABS_FUNC(bp)) {
4878 REG_WR(bp,
4879 PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER,
4880 1);
4881 continue;
4882 }
3936 4883
3937 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100); 4884 bnx2x_pretend_func(bp, fid);
3938 msleep(30); 4885 /* clear pf enable */
3939 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0); 4886 bnx2x_pf_disable(bp);
4887 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
4888 }
4889 }
3940 4890
3941 bnx2x_init_block(bp, PXP_BLOCK, COMMON_STAGE); 4891 bnx2x_init_block(bp, PXP_BLOCK, COMMON_STAGE);
3942 if (CHIP_IS_E1(bp)) { 4892 if (CHIP_IS_E1(bp)) {
@@ -3964,12 +4914,7 @@ static int bnx2x_init_common(struct bnx2x *bp)
3964 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1); 4914 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
3965#endif 4915#endif
3966 4916
3967 REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2); 4917 bnx2x_ilt_init_page_size(bp, INITOP_SET);
3968#ifdef BCM_CNIC
3969 REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
3970 REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
3971 REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
3972#endif
3973 4918
3974 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp)) 4919 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
3975 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1); 4920 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
@@ -3988,9 +4933,65 @@ static int bnx2x_init_common(struct bnx2x *bp)
3988 return -EBUSY; 4933 return -EBUSY;
3989 } 4934 }
3990 4935
4936 /* Timers bug workaround E2 only. We need to set the entire ILT to
4937 * have entries with value "0" and valid bit on.
4938 * This needs to be done by the first PF that is loaded in a path
4939 * (i.e. common phase)
4940 */
4941 if (CHIP_IS_E2(bp)) {
4942 struct ilt_client_info ilt_cli;
4943 struct bnx2x_ilt ilt;
4944 memset(&ilt_cli, 0, sizeof(struct ilt_client_info));
4945 memset(&ilt, 0, sizeof(struct bnx2x_ilt));
4946
4947 /* initalize dummy TM client */
4948 ilt_cli.start = 0;
4949 ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1;
4950 ilt_cli.client_num = ILT_CLIENT_TM;
4951
4952 /* Step 1: set zeroes to all ilt page entries with valid bit on
4953 * Step 2: set the timers first/last ilt entry to point
4954 * to the entire range to prevent ILT range error for 3rd/4th
4955 * vnic (this code assumes existance of the vnic)
4956 *
4957 * both steps performed by call to bnx2x_ilt_client_init_op()
4958 * with dummy TM client
4959 *
4960 * we must use pretend since PXP2_REG_RQ_##blk##_FIRST_ILT
4961 * and his brother are split registers
4962 */
4963 bnx2x_pretend_func(bp, (BP_PATH(bp) + 6));
4964 bnx2x_ilt_client_init_op_ilt(bp, &ilt, &ilt_cli, INITOP_CLEAR);
4965 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
4966
4967 REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN, BNX2X_PXP_DRAM_ALIGN);
4968 REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN_RD, BNX2X_PXP_DRAM_ALIGN);
4969 REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN_SEL, 1);
4970 }
4971
4972
3991 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0); 4973 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
3992 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0); 4974 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
3993 4975
4976 if (CHIP_IS_E2(bp)) {
4977 int factor = CHIP_REV_IS_EMUL(bp) ? 1000 :
4978 (CHIP_REV_IS_FPGA(bp) ? 400 : 0);
4979 bnx2x_init_block(bp, PGLUE_B_BLOCK, COMMON_STAGE);
4980
4981 bnx2x_init_block(bp, ATC_BLOCK, COMMON_STAGE);
4982
4983 /* let the HW do it's magic ... */
4984 do {
4985 msleep(200);
4986 val = REG_RD(bp, ATC_REG_ATC_INIT_DONE);
4987 } while (factor-- && (val != 1));
4988
4989 if (val != 1) {
4990 BNX2X_ERR("ATC_INIT failed\n");
4991 return -EBUSY;
4992 }
4993 }
4994
3994 bnx2x_init_block(bp, DMAE_BLOCK, COMMON_STAGE); 4995 bnx2x_init_block(bp, DMAE_BLOCK, COMMON_STAGE);
3995 4996
3996 /* clean the DMAE memory */ 4997 /* clean the DMAE memory */
@@ -4009,20 +5010,12 @@ static int bnx2x_init_common(struct bnx2x *bp)
4009 5010
4010 bnx2x_init_block(bp, QM_BLOCK, COMMON_STAGE); 5011 bnx2x_init_block(bp, QM_BLOCK, COMMON_STAGE);
4011 5012
4012#ifdef BCM_CNIC 5013 if (CHIP_MODE_IS_4_PORT(bp))
4013 wb_write[0] = 0; 5014 bnx2x_init_block(bp, QM_4PORT_BLOCK, COMMON_STAGE);
4014 wb_write[1] = 0; 5015
4015 for (i = 0; i < 64; i++) { 5016 /* QM queues pointers table */
4016 REG_WR(bp, QM_REG_BASEADDR + i*4, 1024 * 4 * (i%16)); 5017 bnx2x_qm_init_ptr_table(bp, bp->qm_cid_count, INITOP_SET);
4017 bnx2x_init_ind_wr(bp, QM_REG_PTRTBL + i*8, wb_write, 2);
4018 5018
4019 if (CHIP_IS_E1H(bp)) {
4020 REG_WR(bp, QM_REG_BASEADDR_EXT_A + i*4, 1024*4*(i%16));
4021 bnx2x_init_ind_wr(bp, QM_REG_PTRTBL_EXT_A + i*8,
4022 wb_write, 2);
4023 }
4024 }
4025#endif
4026 /* soft reset pulse */ 5019 /* soft reset pulse */
4027 REG_WR(bp, QM_REG_SOFT_RESET, 1); 5020 REG_WR(bp, QM_REG_SOFT_RESET, 1);
4028 REG_WR(bp, QM_REG_SOFT_RESET, 0); 5021 REG_WR(bp, QM_REG_SOFT_RESET, 0);
@@ -4032,21 +5025,35 @@ static int bnx2x_init_common(struct bnx2x *bp)
4032#endif 5025#endif
4033 5026
4034 bnx2x_init_block(bp, DQ_BLOCK, COMMON_STAGE); 5027 bnx2x_init_block(bp, DQ_BLOCK, COMMON_STAGE);
4035 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT); 5028 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BNX2X_DB_SHIFT);
5029
4036 if (!CHIP_REV_IS_SLOW(bp)) { 5030 if (!CHIP_REV_IS_SLOW(bp)) {
4037 /* enable hw interrupt from doorbell Q */ 5031 /* enable hw interrupt from doorbell Q */
4038 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0); 5032 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
4039 } 5033 }
4040 5034
4041 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE); 5035 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5036 if (CHIP_MODE_IS_4_PORT(bp)) {
5037 REG_WR(bp, BRB1_REG_FULL_LB_XOFF_THRESHOLD, 248);
5038 REG_WR(bp, BRB1_REG_FULL_LB_XON_THRESHOLD, 328);
5039 }
5040
4042 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE); 5041 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
4043 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf); 5042 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
4044#ifndef BCM_CNIC 5043#ifndef BCM_CNIC
4045 /* set NIC mode */ 5044 /* set NIC mode */
4046 REG_WR(bp, PRS_REG_NIC_MODE, 1); 5045 REG_WR(bp, PRS_REG_NIC_MODE, 1);
4047#endif 5046#endif
4048 if (CHIP_IS_E1H(bp)) 5047 if (!CHIP_IS_E1(bp))
4049 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp)); 5048 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_MF(bp));
5049
5050 if (CHIP_IS_E2(bp)) {
5051 /* Bit-map indicating which L2 hdrs may appear after the
5052 basic Ethernet header */
5053 int has_ovlan = IS_MF(bp);
5054 REG_WR(bp, PRS_REG_HDRS_AFTER_BASIC, (has_ovlan ? 7 : 6));
5055 REG_WR(bp, PRS_REG_MUST_HAVE_HDRS, (has_ovlan ? 1 : 0));
5056 }
4050 5057
4051 bnx2x_init_block(bp, TSDM_BLOCK, COMMON_STAGE); 5058 bnx2x_init_block(bp, TSDM_BLOCK, COMMON_STAGE);
4052 bnx2x_init_block(bp, CSDM_BLOCK, COMMON_STAGE); 5059 bnx2x_init_block(bp, CSDM_BLOCK, COMMON_STAGE);
@@ -4063,6 +5070,9 @@ static int bnx2x_init_common(struct bnx2x *bp)
4063 bnx2x_init_block(bp, CSEM_BLOCK, COMMON_STAGE); 5070 bnx2x_init_block(bp, CSEM_BLOCK, COMMON_STAGE);
4064 bnx2x_init_block(bp, XSEM_BLOCK, COMMON_STAGE); 5071 bnx2x_init_block(bp, XSEM_BLOCK, COMMON_STAGE);
4065 5072
5073 if (CHIP_MODE_IS_4_PORT(bp))
5074 bnx2x_init_block(bp, XSEM_4PORT_BLOCK, COMMON_STAGE);
5075
4066 /* sync semi rtc */ 5076 /* sync semi rtc */
4067 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 5077 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
4068 0x80000000); 5078 0x80000000);
@@ -4073,9 +5083,16 @@ static int bnx2x_init_common(struct bnx2x *bp)
4073 bnx2x_init_block(bp, XPB_BLOCK, COMMON_STAGE); 5083 bnx2x_init_block(bp, XPB_BLOCK, COMMON_STAGE);
4074 bnx2x_init_block(bp, PBF_BLOCK, COMMON_STAGE); 5084 bnx2x_init_block(bp, PBF_BLOCK, COMMON_STAGE);
4075 5085
5086 if (CHIP_IS_E2(bp)) {
5087 int has_ovlan = IS_MF(bp);
5088 REG_WR(bp, PBF_REG_HDRS_AFTER_BASIC, (has_ovlan ? 7 : 6));
5089 REG_WR(bp, PBF_REG_MUST_HAVE_HDRS, (has_ovlan ? 1 : 0));
5090 }
5091
4076 REG_WR(bp, SRC_REG_SOFT_RST, 1); 5092 REG_WR(bp, SRC_REG_SOFT_RST, 1);
4077 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) 5093 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4)
4078 REG_WR(bp, i, random32()); 5094 REG_WR(bp, i, random32());
5095
4079 bnx2x_init_block(bp, SRCH_BLOCK, COMMON_STAGE); 5096 bnx2x_init_block(bp, SRCH_BLOCK, COMMON_STAGE);
4080#ifdef BCM_CNIC 5097#ifdef BCM_CNIC
4081 REG_WR(bp, SRC_REG_KEYSEARCH_0, 0x63285672); 5098 REG_WR(bp, SRC_REG_KEYSEARCH_0, 0x63285672);
@@ -4110,6 +5127,11 @@ static int bnx2x_init_common(struct bnx2x *bp)
4110 REG_WR(bp, CFC_REG_DEBUG0, 0x20020000); 5127 REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
4111 5128
4112 bnx2x_init_block(bp, HC_BLOCK, COMMON_STAGE); 5129 bnx2x_init_block(bp, HC_BLOCK, COMMON_STAGE);
5130
5131 if (CHIP_IS_E2(bp) && BP_NOMCP(bp))
5132 REG_WR(bp, IGU_REG_RESET_MEMORIES, 0x36);
5133
5134 bnx2x_init_block(bp, IGU_BLOCK, COMMON_STAGE);
4113 bnx2x_init_block(bp, MISC_AEU_BLOCK, COMMON_STAGE); 5135 bnx2x_init_block(bp, MISC_AEU_BLOCK, COMMON_STAGE);
4114 5136
4115 bnx2x_init_block(bp, PXPCS_BLOCK, COMMON_STAGE); 5137 bnx2x_init_block(bp, PXPCS_BLOCK, COMMON_STAGE);
@@ -4117,15 +5139,34 @@ static int bnx2x_init_common(struct bnx2x *bp)
4117 REG_WR(bp, 0x2814, 0xffffffff); 5139 REG_WR(bp, 0x2814, 0xffffffff);
4118 REG_WR(bp, 0x3820, 0xffffffff); 5140 REG_WR(bp, 0x3820, 0xffffffff);
4119 5141
5142 if (CHIP_IS_E2(bp)) {
5143 REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_CONTROL_5,
5144 (PXPCS_TL_CONTROL_5_ERR_UNSPPORT1 |
5145 PXPCS_TL_CONTROL_5_ERR_UNSPPORT));
5146 REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_FUNC345_STAT,
5147 (PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT4 |
5148 PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT3 |
5149 PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT2));
5150 REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_FUNC678_STAT,
5151 (PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT7 |
5152 PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT6 |
5153 PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT5));
5154 }
5155
4120 bnx2x_init_block(bp, EMAC0_BLOCK, COMMON_STAGE); 5156 bnx2x_init_block(bp, EMAC0_BLOCK, COMMON_STAGE);
4121 bnx2x_init_block(bp, EMAC1_BLOCK, COMMON_STAGE); 5157 bnx2x_init_block(bp, EMAC1_BLOCK, COMMON_STAGE);
4122 bnx2x_init_block(bp, DBU_BLOCK, COMMON_STAGE); 5158 bnx2x_init_block(bp, DBU_BLOCK, COMMON_STAGE);
4123 bnx2x_init_block(bp, DBG_BLOCK, COMMON_STAGE); 5159 bnx2x_init_block(bp, DBG_BLOCK, COMMON_STAGE);
4124 5160
4125 bnx2x_init_block(bp, NIG_BLOCK, COMMON_STAGE); 5161 bnx2x_init_block(bp, NIG_BLOCK, COMMON_STAGE);
4126 if (CHIP_IS_E1H(bp)) { 5162 if (!CHIP_IS_E1(bp)) {
4127 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp)); 5163 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_MF(bp));
4128 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp)); 5164 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_MF(bp));
5165 }
5166 if (CHIP_IS_E2(bp)) {
5167 /* Bit-map indicating which L2 hdrs may appear after the
5168 basic Ethernet header */
5169 REG_WR(bp, NIG_REG_P0_HDRS_AFTER_BASIC, (IS_MF(bp) ? 7 : 6));
4129 } 5170 }
4130 5171
4131 if (CHIP_REV_IS_SLOW(bp)) 5172 if (CHIP_REV_IS_SLOW(bp))
@@ -4149,15 +5190,17 @@ static int bnx2x_init_common(struct bnx2x *bp)
4149 } 5190 }
4150 REG_WR(bp, CFC_REG_DEBUG0, 0); 5191 REG_WR(bp, CFC_REG_DEBUG0, 0);
4151 5192
4152 /* read NIG statistic 5193 if (CHIP_IS_E1(bp)) {
4153 to see if this is our first up since powerup */ 5194 /* read NIG statistic
4154 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2); 5195 to see if this is our first up since powerup */
4155 val = *bnx2x_sp(bp, wb_data[0]); 5196 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5197 val = *bnx2x_sp(bp, wb_data[0]);
4156 5198
4157 /* do internal memory self test */ 5199 /* do internal memory self test */
4158 if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) { 5200 if ((val == 0) && bnx2x_int_mem_test(bp)) {
4159 BNX2X_ERR("internal mem self test failed\n"); 5201 BNX2X_ERR("internal mem self test failed\n");
4160 return -EBUSY; 5202 return -EBUSY;
5203 }
4161 } 5204 }
4162 5205
4163 bp->port.need_hw_lock = bnx2x_hw_lock_required(bp, 5206 bp->port.need_hw_lock = bnx2x_hw_lock_required(bp,
@@ -4174,17 +5217,30 @@ static int bnx2x_init_common(struct bnx2x *bp)
4174 enable_blocks_parity(bp); 5217 enable_blocks_parity(bp);
4175 5218
4176 if (!BP_NOMCP(bp)) { 5219 if (!BP_NOMCP(bp)) {
4177 bnx2x_acquire_phy_lock(bp); 5220 /* In E2 2-PORT mode, same ext phy is used for the two paths */
4178 bnx2x_common_init_phy(bp, bp->common.shmem_base, 5221 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
4179 bp->common.shmem2_base); 5222 CHIP_IS_E1x(bp)) {
4180 bnx2x_release_phy_lock(bp); 5223 u32 shmem_base[2], shmem2_base[2];
5224 shmem_base[0] = bp->common.shmem_base;
5225 shmem2_base[0] = bp->common.shmem2_base;
5226 if (CHIP_IS_E2(bp)) {
5227 shmem_base[1] =
5228 SHMEM2_RD(bp, other_shmem_base_addr);
5229 shmem2_base[1] =
5230 SHMEM2_RD(bp, other_shmem2_base_addr);
5231 }
5232 bnx2x_acquire_phy_lock(bp);
5233 bnx2x_common_init_phy(bp, shmem_base, shmem2_base,
5234 bp->common.chip_id);
5235 bnx2x_release_phy_lock(bp);
5236 }
4181 } else 5237 } else
4182 BNX2X_ERR("Bootcode is missing - can not initialize link\n"); 5238 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
4183 5239
4184 return 0; 5240 return 0;
4185} 5241}
4186 5242
4187static int bnx2x_init_port(struct bnx2x *bp) 5243static int bnx2x_init_hw_port(struct bnx2x *bp)
4188{ 5244{
4189 int port = BP_PORT(bp); 5245 int port = BP_PORT(bp);
4190 int init_stage = port ? PORT1_STAGE : PORT0_STAGE; 5246 int init_stage = port ? PORT1_STAGE : PORT0_STAGE;
@@ -4198,14 +5254,23 @@ static int bnx2x_init_port(struct bnx2x *bp)
4198 bnx2x_init_block(bp, PXP_BLOCK, init_stage); 5254 bnx2x_init_block(bp, PXP_BLOCK, init_stage);
4199 bnx2x_init_block(bp, PXP2_BLOCK, init_stage); 5255 bnx2x_init_block(bp, PXP2_BLOCK, init_stage);
4200 5256
5257 /* Timers bug workaround: disables the pf_master bit in pglue at
5258 * common phase, we need to enable it here before any dmae access are
5259 * attempted. Therefore we manually added the enable-master to the
5260 * port phase (it also happens in the function phase)
5261 */
5262 if (CHIP_IS_E2(bp))
5263 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
5264
4201 bnx2x_init_block(bp, TCM_BLOCK, init_stage); 5265 bnx2x_init_block(bp, TCM_BLOCK, init_stage);
4202 bnx2x_init_block(bp, UCM_BLOCK, init_stage); 5266 bnx2x_init_block(bp, UCM_BLOCK, init_stage);
4203 bnx2x_init_block(bp, CCM_BLOCK, init_stage); 5267 bnx2x_init_block(bp, CCM_BLOCK, init_stage);
4204 bnx2x_init_block(bp, XCM_BLOCK, init_stage); 5268 bnx2x_init_block(bp, XCM_BLOCK, init_stage);
4205 5269
4206#ifdef BCM_CNIC 5270 /* QM cid (connection) count */
4207 REG_WR(bp, QM_REG_CONNNUM_0 + port*4, 1024/16 - 1); 5271 bnx2x_qm_init_cid_count(bp, bp->qm_cid_count, INITOP_SET);
4208 5272
5273#ifdef BCM_CNIC
4209 bnx2x_init_block(bp, TIMERS_BLOCK, init_stage); 5274 bnx2x_init_block(bp, TIMERS_BLOCK, init_stage);
4210 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20); 5275 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20);
4211 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31); 5276 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31);
@@ -4213,29 +5278,41 @@ static int bnx2x_init_port(struct bnx2x *bp)
4213 5278
4214 bnx2x_init_block(bp, DQ_BLOCK, init_stage); 5279 bnx2x_init_block(bp, DQ_BLOCK, init_stage);
4215 5280
4216 bnx2x_init_block(bp, BRB1_BLOCK, init_stage); 5281 if (CHIP_MODE_IS_4_PORT(bp))
4217 if (CHIP_REV_IS_SLOW(bp) && !CHIP_IS_E1H(bp)) { 5282 bnx2x_init_block(bp, QM_4PORT_BLOCK, init_stage);
4218 /* no pause for emulation and FPGA */ 5283
4219 low = 0; 5284 if (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp)) {
4220 high = 513; 5285 bnx2x_init_block(bp, BRB1_BLOCK, init_stage);
4221 } else { 5286 if (CHIP_REV_IS_SLOW(bp) && CHIP_IS_E1(bp)) {
4222 if (IS_E1HMF(bp)) 5287 /* no pause for emulation and FPGA */
4223 low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246); 5288 low = 0;
4224 else if (bp->dev->mtu > 4096) { 5289 high = 513;
4225 if (bp->flags & ONE_PORT_FLAG) 5290 } else {
4226 low = 160; 5291 if (IS_MF(bp))
4227 else { 5292 low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
4228 val = bp->dev->mtu; 5293 else if (bp->dev->mtu > 4096) {
4229 /* (24*1024 + val*4)/256 */ 5294 if (bp->flags & ONE_PORT_FLAG)
4230 low = 96 + (val/64) + ((val % 64) ? 1 : 0); 5295 low = 160;
4231 } 5296 else {
4232 } else 5297 val = bp->dev->mtu;
4233 low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160); 5298 /* (24*1024 + val*4)/256 */
4234 high = low + 56; /* 14*1024/256 */ 5299 low = 96 + (val/64) +
5300 ((val % 64) ? 1 : 0);
5301 }
5302 } else
5303 low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
5304 high = low + 56; /* 14*1024/256 */
5305 }
5306 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
5307 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
4235 } 5308 }
4236 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
4237 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
4238 5309
5310 if (CHIP_MODE_IS_4_PORT(bp)) {
5311 REG_WR(bp, BRB1_REG_PAUSE_0_XOFF_THRESHOLD_0 + port*8, 248);
5312 REG_WR(bp, BRB1_REG_PAUSE_0_XON_THRESHOLD_0 + port*8, 328);
5313 REG_WR(bp, (BP_PORT(bp) ? BRB1_REG_MAC_GUARANTIED_1 :
5314 BRB1_REG_MAC_GUARANTIED_0), 40);
5315 }
4239 5316
4240 bnx2x_init_block(bp, PRS_BLOCK, init_stage); 5317 bnx2x_init_block(bp, PRS_BLOCK, init_stage);
4241 5318
@@ -4248,24 +5325,28 @@ static int bnx2x_init_port(struct bnx2x *bp)
4248 bnx2x_init_block(bp, USEM_BLOCK, init_stage); 5325 bnx2x_init_block(bp, USEM_BLOCK, init_stage);
4249 bnx2x_init_block(bp, CSEM_BLOCK, init_stage); 5326 bnx2x_init_block(bp, CSEM_BLOCK, init_stage);
4250 bnx2x_init_block(bp, XSEM_BLOCK, init_stage); 5327 bnx2x_init_block(bp, XSEM_BLOCK, init_stage);
5328 if (CHIP_MODE_IS_4_PORT(bp))
5329 bnx2x_init_block(bp, XSEM_4PORT_BLOCK, init_stage);
4251 5330
4252 bnx2x_init_block(bp, UPB_BLOCK, init_stage); 5331 bnx2x_init_block(bp, UPB_BLOCK, init_stage);
4253 bnx2x_init_block(bp, XPB_BLOCK, init_stage); 5332 bnx2x_init_block(bp, XPB_BLOCK, init_stage);
4254 5333
4255 bnx2x_init_block(bp, PBF_BLOCK, init_stage); 5334 bnx2x_init_block(bp, PBF_BLOCK, init_stage);
4256 5335
4257 /* configure PBF to work without PAUSE mtu 9000 */ 5336 if (!CHIP_IS_E2(bp)) {
4258 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0); 5337 /* configure PBF to work without PAUSE mtu 9000 */
5338 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
4259 5339
4260 /* update threshold */ 5340 /* update threshold */
4261 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16)); 5341 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
4262 /* update init credit */ 5342 /* update init credit */
4263 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22); 5343 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
4264 5344
4265 /* probe changes */ 5345 /* probe changes */
4266 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1); 5346 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
4267 msleep(5); 5347 udelay(50);
4268 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0); 5348 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
5349 }
4269 5350
4270#ifdef BCM_CNIC 5351#ifdef BCM_CNIC
4271 bnx2x_init_block(bp, SRCH_BLOCK, init_stage); 5352 bnx2x_init_block(bp, SRCH_BLOCK, init_stage);
@@ -4279,13 +5360,15 @@ static int bnx2x_init_port(struct bnx2x *bp)
4279 } 5360 }
4280 bnx2x_init_block(bp, HC_BLOCK, init_stage); 5361 bnx2x_init_block(bp, HC_BLOCK, init_stage);
4281 5362
5363 bnx2x_init_block(bp, IGU_BLOCK, init_stage);
5364
4282 bnx2x_init_block(bp, MISC_AEU_BLOCK, init_stage); 5365 bnx2x_init_block(bp, MISC_AEU_BLOCK, init_stage);
4283 /* init aeu_mask_attn_func_0/1: 5366 /* init aeu_mask_attn_func_0/1:
4284 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use 5367 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
4285 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF 5368 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
4286 * bits 4-7 are used for "per vn group attention" */ 5369 * bits 4-7 are used for "per vn group attention" */
4287 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 5370 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
4288 (IS_E1HMF(bp) ? 0xF7 : 0x7)); 5371 (IS_MF(bp) ? 0xF7 : 0x7));
4289 5372
4290 bnx2x_init_block(bp, PXPCS_BLOCK, init_stage); 5373 bnx2x_init_block(bp, PXPCS_BLOCK, init_stage);
4291 bnx2x_init_block(bp, EMAC0_BLOCK, init_stage); 5374 bnx2x_init_block(bp, EMAC0_BLOCK, init_stage);
@@ -4297,11 +5380,25 @@ static int bnx2x_init_port(struct bnx2x *bp)
4297 5380
4298 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1); 5381 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
4299 5382
4300 if (CHIP_IS_E1H(bp)) { 5383 if (!CHIP_IS_E1(bp)) {
4301 /* 0x2 disable e1hov, 0x1 enable */ 5384 /* 0x2 disable mf_ov, 0x1 enable */
4302 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4, 5385 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
4303 (IS_E1HMF(bp) ? 0x1 : 0x2)); 5386 (IS_MF(bp) ? 0x1 : 0x2));
4304 5387
5388 if (CHIP_IS_E2(bp)) {
5389 val = 0;
5390 switch (bp->mf_mode) {
5391 case MULTI_FUNCTION_SD:
5392 val = 1;
5393 break;
5394 case MULTI_FUNCTION_SI:
5395 val = 2;
5396 break;
5397 }
5398
5399 REG_WR(bp, (BP_PORT(bp) ? NIG_REG_LLH1_CLS_TYPE :
5400 NIG_REG_LLH0_CLS_TYPE), val);
5401 }
4305 { 5402 {
4306 REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0); 5403 REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
4307 REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0); 5404 REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
@@ -4327,140 +5424,323 @@ static int bnx2x_init_port(struct bnx2x *bp)
4327 return 0; 5424 return 0;
4328} 5425}
4329 5426
4330#define ILT_PER_FUNC (768/2)
4331#define FUNC_ILT_BASE(func) (func * ILT_PER_FUNC)
4332/* the phys address is shifted right 12 bits and has an added
4333 1=valid bit added to the 53rd bit
4334 then since this is a wide register(TM)
4335 we split it into two 32 bit writes
4336 */
4337#define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
4338#define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
4339#define PXP_ONE_ILT(x) (((x) << 10) | x)
4340#define PXP_ILT_RANGE(f, l) (((l) << 10) | f)
4341
4342#ifdef BCM_CNIC
4343#define CNIC_ILT_LINES 127
4344#define CNIC_CTX_PER_ILT 16
4345#else
4346#define CNIC_ILT_LINES 0
4347#endif
4348
4349static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr) 5427static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
4350{ 5428{
4351 int reg; 5429 int reg;
4352 5430
4353 if (CHIP_IS_E1H(bp)) 5431 if (CHIP_IS_E1(bp))
4354 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
4355 else /* E1 */
4356 reg = PXP2_REG_RQ_ONCHIP_AT + index*8; 5432 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
5433 else
5434 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
4357 5435
4358 bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr)); 5436 bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
4359} 5437}
4360 5438
4361static int bnx2x_init_func(struct bnx2x *bp) 5439static inline void bnx2x_igu_clear_sb(struct bnx2x *bp, u8 idu_sb_id)
5440{
5441 bnx2x_igu_clear_sb_gen(bp, idu_sb_id, true /*PF*/);
5442}
5443
5444static inline void bnx2x_clear_func_ilt(struct bnx2x *bp, u32 func)
5445{
5446 u32 i, base = FUNC_ILT_BASE(func);
5447 for (i = base; i < base + ILT_PER_FUNC; i++)
5448 bnx2x_ilt_wr(bp, i, 0);
5449}
5450
5451static int bnx2x_init_hw_func(struct bnx2x *bp)
4362{ 5452{
4363 int port = BP_PORT(bp); 5453 int port = BP_PORT(bp);
4364 int func = BP_FUNC(bp); 5454 int func = BP_FUNC(bp);
5455 struct bnx2x_ilt *ilt = BP_ILT(bp);
5456 u16 cdu_ilt_start;
4365 u32 addr, val; 5457 u32 addr, val;
4366 int i; 5458 u32 main_mem_base, main_mem_size, main_mem_prty_clr;
5459 int i, main_mem_width;
4367 5460
4368 DP(BNX2X_MSG_MCP, "starting func init func %d\n", func); 5461 DP(BNX2X_MSG_MCP, "starting func init func %d\n", func);
4369 5462
4370 /* set MSI reconfigure capability */ 5463 /* set MSI reconfigure capability */
4371 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0); 5464 if (bp->common.int_block == INT_BLOCK_HC) {
4372 val = REG_RD(bp, addr); 5465 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
4373 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0; 5466 val = REG_RD(bp, addr);
4374 REG_WR(bp, addr, val); 5467 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
5468 REG_WR(bp, addr, val);
5469 }
4375 5470
4376 i = FUNC_ILT_BASE(func); 5471 ilt = BP_ILT(bp);
5472 cdu_ilt_start = ilt->clients[ILT_CLIENT_CDU].start;
4377 5473
4378 bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context)); 5474 for (i = 0; i < L2_ILT_LINES(bp); i++) {
4379 if (CHIP_IS_E1H(bp)) { 5475 ilt->lines[cdu_ilt_start + i].page =
4380 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i); 5476 bp->context.vcxt + (ILT_PAGE_CIDS * i);
4381 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES); 5477 ilt->lines[cdu_ilt_start + i].page_mapping =
4382 } else /* E1 */ 5478 bp->context.cxt_mapping + (CDU_ILT_PAGE_SZ * i);
4383 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4, 5479 /* cdu ilt pages are allocated manually so there's no need to
4384 PXP_ILT_RANGE(i, i + CNIC_ILT_LINES)); 5480 set the size */
5481 }
5482 bnx2x_ilt_init_op(bp, INITOP_SET);
4385 5483
4386#ifdef BCM_CNIC 5484#ifdef BCM_CNIC
4387 i += 1 + CNIC_ILT_LINES; 5485 bnx2x_src_init_t2(bp, bp->t2, bp->t2_mapping, SRC_CONN_NUM);
4388 bnx2x_ilt_wr(bp, i, bp->timers_mapping);
4389 if (CHIP_IS_E1(bp))
4390 REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
4391 else {
4392 REG_WR(bp, PXP2_REG_RQ_TM_FIRST_ILT, i);
4393 REG_WR(bp, PXP2_REG_RQ_TM_LAST_ILT, i);
4394 }
4395 5486
4396 i++; 5487 /* T1 hash bits value determines the T1 number of entries */
4397 bnx2x_ilt_wr(bp, i, bp->qm_mapping); 5488 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + port*4, SRC_HASH_BITS);
4398 if (CHIP_IS_E1(bp)) 5489#endif
4399 REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i)); 5490
4400 else { 5491#ifndef BCM_CNIC
4401 REG_WR(bp, PXP2_REG_RQ_QM_FIRST_ILT, i); 5492 /* set NIC mode */
4402 REG_WR(bp, PXP2_REG_RQ_QM_LAST_ILT, i); 5493 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5494#endif /* BCM_CNIC */
5495
5496 if (CHIP_IS_E2(bp)) {
5497 u32 pf_conf = IGU_PF_CONF_FUNC_EN;
5498
5499 /* Turn on a single ISR mode in IGU if driver is going to use
5500 * INT#x or MSI
5501 */
5502 if (!(bp->flags & USING_MSIX_FLAG))
5503 pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN;
5504 /*
5505 * Timers workaround bug: function init part.
5506 * Need to wait 20msec after initializing ILT,
5507 * needed to make sure there are no requests in
5508 * one of the PXP internal queues with "old" ILT addresses
5509 */
5510 msleep(20);
5511 /*
5512 * Master enable - Due to WB DMAE writes performed before this
5513 * register is re-initialized as part of the regular function
5514 * init
5515 */
5516 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
5517 /* Enable the function in IGU */
5518 REG_WR(bp, IGU_REG_PF_CONFIGURATION, pf_conf);
4403 } 5519 }
4404 5520
4405 i++; 5521 bp->dmae_ready = 1;
4406 bnx2x_ilt_wr(bp, i, bp->t1_mapping); 5522
4407 if (CHIP_IS_E1(bp)) 5523 bnx2x_init_block(bp, PGLUE_B_BLOCK, FUNC0_STAGE + func);
4408 REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i)); 5524
4409 else { 5525 if (CHIP_IS_E2(bp))
4410 REG_WR(bp, PXP2_REG_RQ_SRC_FIRST_ILT, i); 5526 REG_WR(bp, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, func);
4411 REG_WR(bp, PXP2_REG_RQ_SRC_LAST_ILT, i); 5527
5528 bnx2x_init_block(bp, MISC_BLOCK, FUNC0_STAGE + func);
5529 bnx2x_init_block(bp, TCM_BLOCK, FUNC0_STAGE + func);
5530 bnx2x_init_block(bp, UCM_BLOCK, FUNC0_STAGE + func);
5531 bnx2x_init_block(bp, CCM_BLOCK, FUNC0_STAGE + func);
5532 bnx2x_init_block(bp, XCM_BLOCK, FUNC0_STAGE + func);
5533 bnx2x_init_block(bp, TSEM_BLOCK, FUNC0_STAGE + func);
5534 bnx2x_init_block(bp, USEM_BLOCK, FUNC0_STAGE + func);
5535 bnx2x_init_block(bp, CSEM_BLOCK, FUNC0_STAGE + func);
5536 bnx2x_init_block(bp, XSEM_BLOCK, FUNC0_STAGE + func);
5537
5538 if (CHIP_IS_E2(bp)) {
5539 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_PATH_ID_OFFSET,
5540 BP_PATH(bp));
5541 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_PATH_ID_OFFSET,
5542 BP_PATH(bp));
4412 } 5543 }
4413 5544
4414 /* tell the searcher where the T2 table is */ 5545 if (CHIP_MODE_IS_4_PORT(bp))
4415 REG_WR(bp, SRC_REG_COUNTFREE0 + port*4, 16*1024/64); 5546 bnx2x_init_block(bp, XSEM_4PORT_BLOCK, FUNC0_STAGE + func);
4416 5547
4417 bnx2x_wb_wr(bp, SRC_REG_FIRSTFREE0 + port*16, 5548 if (CHIP_IS_E2(bp))
4418 U64_LO(bp->t2_mapping), U64_HI(bp->t2_mapping)); 5549 REG_WR(bp, QM_REG_PF_EN, 1);
4419 5550
4420 bnx2x_wb_wr(bp, SRC_REG_LASTFREE0 + port*16, 5551 bnx2x_init_block(bp, QM_BLOCK, FUNC0_STAGE + func);
4421 U64_LO((u64)bp->t2_mapping + 16*1024 - 64),
4422 U64_HI((u64)bp->t2_mapping + 16*1024 - 64));
4423 5552
4424 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + port*4, 10); 5553 if (CHIP_MODE_IS_4_PORT(bp))
4425#endif 5554 bnx2x_init_block(bp, QM_4PORT_BLOCK, FUNC0_STAGE + func);
5555
5556 bnx2x_init_block(bp, TIMERS_BLOCK, FUNC0_STAGE + func);
5557 bnx2x_init_block(bp, DQ_BLOCK, FUNC0_STAGE + func);
5558 bnx2x_init_block(bp, BRB1_BLOCK, FUNC0_STAGE + func);
5559 bnx2x_init_block(bp, PRS_BLOCK, FUNC0_STAGE + func);
5560 bnx2x_init_block(bp, TSDM_BLOCK, FUNC0_STAGE + func);
5561 bnx2x_init_block(bp, CSDM_BLOCK, FUNC0_STAGE + func);
5562 bnx2x_init_block(bp, USDM_BLOCK, FUNC0_STAGE + func);
5563 bnx2x_init_block(bp, XSDM_BLOCK, FUNC0_STAGE + func);
5564 bnx2x_init_block(bp, UPB_BLOCK, FUNC0_STAGE + func);
5565 bnx2x_init_block(bp, XPB_BLOCK, FUNC0_STAGE + func);
5566 bnx2x_init_block(bp, PBF_BLOCK, FUNC0_STAGE + func);
5567 if (CHIP_IS_E2(bp))
5568 REG_WR(bp, PBF_REG_DISABLE_PF, 0);
4426 5569
4427 if (CHIP_IS_E1H(bp)) { 5570 bnx2x_init_block(bp, CDU_BLOCK, FUNC0_STAGE + func);
4428 bnx2x_init_block(bp, MISC_BLOCK, FUNC0_STAGE + func);
4429 bnx2x_init_block(bp, TCM_BLOCK, FUNC0_STAGE + func);
4430 bnx2x_init_block(bp, UCM_BLOCK, FUNC0_STAGE + func);
4431 bnx2x_init_block(bp, CCM_BLOCK, FUNC0_STAGE + func);
4432 bnx2x_init_block(bp, XCM_BLOCK, FUNC0_STAGE + func);
4433 bnx2x_init_block(bp, TSEM_BLOCK, FUNC0_STAGE + func);
4434 bnx2x_init_block(bp, USEM_BLOCK, FUNC0_STAGE + func);
4435 bnx2x_init_block(bp, CSEM_BLOCK, FUNC0_STAGE + func);
4436 bnx2x_init_block(bp, XSEM_BLOCK, FUNC0_STAGE + func);
4437 5571
5572 bnx2x_init_block(bp, CFC_BLOCK, FUNC0_STAGE + func);
5573
5574 if (CHIP_IS_E2(bp))
5575 REG_WR(bp, CFC_REG_WEAK_ENABLE_PF, 1);
5576
5577 if (IS_MF(bp)) {
4438 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1); 5578 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
4439 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov); 5579 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->mf_ov);
4440 } 5580 }
4441 5581
5582 bnx2x_init_block(bp, MISC_AEU_BLOCK, FUNC0_STAGE + func);
5583
4442 /* HC init per function */ 5584 /* HC init per function */
4443 if (CHIP_IS_E1H(bp)) { 5585 if (bp->common.int_block == INT_BLOCK_HC) {
5586 if (CHIP_IS_E1H(bp)) {
5587 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
5588
5589 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5590 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5591 }
5592 bnx2x_init_block(bp, HC_BLOCK, FUNC0_STAGE + func);
5593
5594 } else {
5595 int num_segs, sb_idx, prod_offset;
5596
4444 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0); 5597 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
4445 5598
4446 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0); 5599 if (CHIP_IS_E2(bp)) {
4447 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0); 5600 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, 0);
5601 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, 0);
5602 }
5603
5604 bnx2x_init_block(bp, IGU_BLOCK, FUNC0_STAGE + func);
5605
5606 if (CHIP_IS_E2(bp)) {
5607 int dsb_idx = 0;
5608 /**
5609 * Producer memory:
5610 * E2 mode: address 0-135 match to the mapping memory;
5611 * 136 - PF0 default prod; 137 - PF1 default prod;
5612 * 138 - PF2 default prod; 139 - PF3 default prod;
5613 * 140 - PF0 attn prod; 141 - PF1 attn prod;
5614 * 142 - PF2 attn prod; 143 - PF3 attn prod;
5615 * 144-147 reserved.
5616 *
5617 * E1.5 mode - In backward compatible mode;
5618 * for non default SB; each even line in the memory
5619 * holds the U producer and each odd line hold
5620 * the C producer. The first 128 producers are for
5621 * NDSB (PF0 - 0-31; PF1 - 32-63 and so on). The last 20
5622 * producers are for the DSB for each PF.
5623 * Each PF has five segments: (the order inside each
5624 * segment is PF0; PF1; PF2; PF3) - 128-131 U prods;
5625 * 132-135 C prods; 136-139 X prods; 140-143 T prods;
5626 * 144-147 attn prods;
5627 */
5628 /* non-default-status-blocks */
5629 num_segs = CHIP_INT_MODE_IS_BC(bp) ?
5630 IGU_BC_NDSB_NUM_SEGS : IGU_NORM_NDSB_NUM_SEGS;
5631 for (sb_idx = 0; sb_idx < bp->igu_sb_cnt; sb_idx++) {
5632 prod_offset = (bp->igu_base_sb + sb_idx) *
5633 num_segs;
5634
5635 for (i = 0; i < num_segs; i++) {
5636 addr = IGU_REG_PROD_CONS_MEMORY +
5637 (prod_offset + i) * 4;
5638 REG_WR(bp, addr, 0);
5639 }
5640 /* send consumer update with value 0 */
5641 bnx2x_ack_sb(bp, bp->igu_base_sb + sb_idx,
5642 USTORM_ID, 0, IGU_INT_NOP, 1);
5643 bnx2x_igu_clear_sb(bp,
5644 bp->igu_base_sb + sb_idx);
5645 }
5646
5647 /* default-status-blocks */
5648 num_segs = CHIP_INT_MODE_IS_BC(bp) ?
5649 IGU_BC_DSB_NUM_SEGS : IGU_NORM_DSB_NUM_SEGS;
5650
5651 if (CHIP_MODE_IS_4_PORT(bp))
5652 dsb_idx = BP_FUNC(bp);
5653 else
5654 dsb_idx = BP_E1HVN(bp);
5655
5656 prod_offset = (CHIP_INT_MODE_IS_BC(bp) ?
5657 IGU_BC_BASE_DSB_PROD + dsb_idx :
5658 IGU_NORM_BASE_DSB_PROD + dsb_idx);
5659
5660 for (i = 0; i < (num_segs * E1HVN_MAX);
5661 i += E1HVN_MAX) {
5662 addr = IGU_REG_PROD_CONS_MEMORY +
5663 (prod_offset + i)*4;
5664 REG_WR(bp, addr, 0);
5665 }
5666 /* send consumer update with 0 */
5667 if (CHIP_INT_MODE_IS_BC(bp)) {
5668 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5669 USTORM_ID, 0, IGU_INT_NOP, 1);
5670 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5671 CSTORM_ID, 0, IGU_INT_NOP, 1);
5672 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5673 XSTORM_ID, 0, IGU_INT_NOP, 1);
5674 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5675 TSTORM_ID, 0, IGU_INT_NOP, 1);
5676 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5677 ATTENTION_ID, 0, IGU_INT_NOP, 1);
5678 } else {
5679 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5680 USTORM_ID, 0, IGU_INT_NOP, 1);
5681 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5682 ATTENTION_ID, 0, IGU_INT_NOP, 1);
5683 }
5684 bnx2x_igu_clear_sb(bp, bp->igu_dsb_id);
5685
5686 /* !!! these should become driver const once
5687 rf-tool supports split-68 const */
5688 REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_LSB, 0);
5689 REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_MSB, 0);
5690 REG_WR(bp, IGU_REG_SB_MASK_LSB, 0);
5691 REG_WR(bp, IGU_REG_SB_MASK_MSB, 0);
5692 REG_WR(bp, IGU_REG_PBA_STATUS_LSB, 0);
5693 REG_WR(bp, IGU_REG_PBA_STATUS_MSB, 0);
5694 }
4448 } 5695 }
4449 bnx2x_init_block(bp, HC_BLOCK, FUNC0_STAGE + func);
4450 5696
4451 /* Reset PCIE errors for debug */ 5697 /* Reset PCIE errors for debug */
4452 REG_WR(bp, 0x2114, 0xffffffff); 5698 REG_WR(bp, 0x2114, 0xffffffff);
4453 REG_WR(bp, 0x2120, 0xffffffff); 5699 REG_WR(bp, 0x2120, 0xffffffff);
5700
5701 bnx2x_init_block(bp, EMAC0_BLOCK, FUNC0_STAGE + func);
5702 bnx2x_init_block(bp, EMAC1_BLOCK, FUNC0_STAGE + func);
5703 bnx2x_init_block(bp, DBU_BLOCK, FUNC0_STAGE + func);
5704 bnx2x_init_block(bp, DBG_BLOCK, FUNC0_STAGE + func);
5705 bnx2x_init_block(bp, MCP_BLOCK, FUNC0_STAGE + func);
5706 bnx2x_init_block(bp, DMAE_BLOCK, FUNC0_STAGE + func);
5707
5708 if (CHIP_IS_E1x(bp)) {
5709 main_mem_size = HC_REG_MAIN_MEMORY_SIZE / 2; /*dwords*/
5710 main_mem_base = HC_REG_MAIN_MEMORY +
5711 BP_PORT(bp) * (main_mem_size * 4);
5712 main_mem_prty_clr = HC_REG_HC_PRTY_STS_CLR;
5713 main_mem_width = 8;
5714
5715 val = REG_RD(bp, main_mem_prty_clr);
5716 if (val)
5717 DP(BNX2X_MSG_MCP, "Hmmm... Parity errors in HC "
5718 "block during "
5719 "function init (0x%x)!\n", val);
5720
5721 /* Clear "false" parity errors in MSI-X table */
5722 for (i = main_mem_base;
5723 i < main_mem_base + main_mem_size * 4;
5724 i += main_mem_width) {
5725 bnx2x_read_dmae(bp, i, main_mem_width / 4);
5726 bnx2x_write_dmae(bp, bnx2x_sp_mapping(bp, wb_data),
5727 i, main_mem_width / 4);
5728 }
5729 /* Clear HC parity attention */
5730 REG_RD(bp, main_mem_prty_clr);
5731 }
5732
4454 bnx2x_phy_probe(&bp->link_params); 5733 bnx2x_phy_probe(&bp->link_params);
5734
4455 return 0; 5735 return 0;
4456} 5736}
4457 5737
4458int bnx2x_init_hw(struct bnx2x *bp, u32 load_code) 5738int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
4459{ 5739{
4460 int i, rc = 0; 5740 int rc = 0;
4461 5741
4462 DP(BNX2X_MSG_MCP, "function %d load_code %x\n", 5742 DP(BNX2X_MSG_MCP, "function %d load_code %x\n",
4463 BP_FUNC(bp), load_code); 5743 BP_ABS_FUNC(bp), load_code);
4464 5744
4465 bp->dmae_ready = 0; 5745 bp->dmae_ready = 0;
4466 mutex_init(&bp->dmae_mutex); 5746 mutex_init(&bp->dmae_mutex);
@@ -4470,21 +5750,20 @@ int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
4470 5750
4471 switch (load_code) { 5751 switch (load_code) {
4472 case FW_MSG_CODE_DRV_LOAD_COMMON: 5752 case FW_MSG_CODE_DRV_LOAD_COMMON:
4473 rc = bnx2x_init_common(bp); 5753 case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP:
5754 rc = bnx2x_init_hw_common(bp, load_code);
4474 if (rc) 5755 if (rc)
4475 goto init_hw_err; 5756 goto init_hw_err;
4476 /* no break */ 5757 /* no break */
4477 5758
4478 case FW_MSG_CODE_DRV_LOAD_PORT: 5759 case FW_MSG_CODE_DRV_LOAD_PORT:
4479 bp->dmae_ready = 1; 5760 rc = bnx2x_init_hw_port(bp);
4480 rc = bnx2x_init_port(bp);
4481 if (rc) 5761 if (rc)
4482 goto init_hw_err; 5762 goto init_hw_err;
4483 /* no break */ 5763 /* no break */
4484 5764
4485 case FW_MSG_CODE_DRV_LOAD_FUNCTION: 5765 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
4486 bp->dmae_ready = 1; 5766 rc = bnx2x_init_hw_func(bp);
4487 rc = bnx2x_init_func(bp);
4488 if (rc) 5767 if (rc)
4489 goto init_hw_err; 5768 goto init_hw_err;
4490 break; 5769 break;
@@ -4495,22 +5774,14 @@ int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
4495 } 5774 }
4496 5775
4497 if (!BP_NOMCP(bp)) { 5776 if (!BP_NOMCP(bp)) {
4498 int func = BP_FUNC(bp); 5777 int mb_idx = BP_FW_MB_IDX(bp);
4499 5778
4500 bp->fw_drv_pulse_wr_seq = 5779 bp->fw_drv_pulse_wr_seq =
4501 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) & 5780 (SHMEM_RD(bp, func_mb[mb_idx].drv_pulse_mb) &
4502 DRV_PULSE_SEQ_MASK); 5781 DRV_PULSE_SEQ_MASK);
4503 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq); 5782 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
4504 } 5783 }
4505 5784
4506 /* this needs to be done before gunzip end */
4507 bnx2x_zero_def_sb(bp);
4508 for_each_queue(bp, i)
4509 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
4510#ifdef BCM_CNIC
4511 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
4512#endif
4513
4514init_hw_err: 5785init_hw_err:
4515 bnx2x_gunzip_end(bp); 5786 bnx2x_gunzip_end(bp);
4516 5787
@@ -4523,7 +5794,7 @@ void bnx2x_free_mem(struct bnx2x *bp)
4523#define BNX2X_PCI_FREE(x, y, size) \ 5794#define BNX2X_PCI_FREE(x, y, size) \
4524 do { \ 5795 do { \
4525 if (x) { \ 5796 if (x) { \
4526 dma_free_coherent(&bp->pdev->dev, size, x, y); \ 5797 dma_free_coherent(&bp->pdev->dev, size, (void *)x, y); \
4527 x = NULL; \ 5798 x = NULL; \
4528 y = 0; \ 5799 y = 0; \
4529 } \ 5800 } \
@@ -4532,7 +5803,7 @@ void bnx2x_free_mem(struct bnx2x *bp)
4532#define BNX2X_FREE(x) \ 5803#define BNX2X_FREE(x) \
4533 do { \ 5804 do { \
4534 if (x) { \ 5805 if (x) { \
4535 vfree(x); \ 5806 kfree((void *)x); \
4536 x = NULL; \ 5807 x = NULL; \
4537 } \ 5808 } \
4538 } while (0) 5809 } while (0)
@@ -4542,11 +5813,15 @@ void bnx2x_free_mem(struct bnx2x *bp)
4542 /* fastpath */ 5813 /* fastpath */
4543 /* Common */ 5814 /* Common */
4544 for_each_queue(bp, i) { 5815 for_each_queue(bp, i) {
4545
4546 /* status blocks */ 5816 /* status blocks */
4547 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk), 5817 if (CHIP_IS_E2(bp))
4548 bnx2x_fp(bp, i, status_blk_mapping), 5818 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk.e2_sb),
4549 sizeof(struct host_status_block)); 5819 bnx2x_fp(bp, i, status_blk_mapping),
5820 sizeof(struct host_hc_status_block_e2));
5821 else
5822 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk.e1x_sb),
5823 bnx2x_fp(bp, i, status_blk_mapping),
5824 sizeof(struct host_hc_status_block_e1x));
4550 } 5825 }
4551 /* Rx */ 5826 /* Rx */
4552 for_each_queue(bp, i) { 5827 for_each_queue(bp, i) {
@@ -4580,28 +5855,56 @@ void bnx2x_free_mem(struct bnx2x *bp)
4580 /* end of fastpath */ 5855 /* end of fastpath */
4581 5856
4582 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping, 5857 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
4583 sizeof(struct host_def_status_block)); 5858 sizeof(struct host_sp_status_block));
4584 5859
4585 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping, 5860 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
4586 sizeof(struct bnx2x_slowpath)); 5861 sizeof(struct bnx2x_slowpath));
4587 5862
5863 BNX2X_PCI_FREE(bp->context.vcxt, bp->context.cxt_mapping,
5864 bp->context.size);
5865
5866 bnx2x_ilt_mem_op(bp, ILT_MEMOP_FREE);
5867
5868 BNX2X_FREE(bp->ilt->lines);
5869
4588#ifdef BCM_CNIC 5870#ifdef BCM_CNIC
4589 BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024); 5871 if (CHIP_IS_E2(bp))
4590 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024); 5872 BNX2X_PCI_FREE(bp->cnic_sb.e2_sb, bp->cnic_sb_mapping,
4591 BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024); 5873 sizeof(struct host_hc_status_block_e2));
4592 BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024); 5874 else
4593 BNX2X_PCI_FREE(bp->cnic_sb, bp->cnic_sb_mapping, 5875 BNX2X_PCI_FREE(bp->cnic_sb.e1x_sb, bp->cnic_sb_mapping,
4594 sizeof(struct host_status_block)); 5876 sizeof(struct host_hc_status_block_e1x));
5877
5878 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, SRC_T2_SZ);
4595#endif 5879#endif
5880
4596 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE); 5881 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
4597 5882
5883 BNX2X_PCI_FREE(bp->eq_ring, bp->eq_mapping,
5884 BCM_PAGE_SIZE * NUM_EQ_PAGES);
5885
4598#undef BNX2X_PCI_FREE 5886#undef BNX2X_PCI_FREE
4599#undef BNX2X_KFREE 5887#undef BNX2X_KFREE
4600} 5888}
4601 5889
4602int bnx2x_alloc_mem(struct bnx2x *bp) 5890static inline void set_sb_shortcuts(struct bnx2x *bp, int index)
4603{ 5891{
5892 union host_hc_status_block status_blk = bnx2x_fp(bp, index, status_blk);
5893 if (CHIP_IS_E2(bp)) {
5894 bnx2x_fp(bp, index, sb_index_values) =
5895 (__le16 *)status_blk.e2_sb->sb.index_values;
5896 bnx2x_fp(bp, index, sb_running_index) =
5897 (__le16 *)status_blk.e2_sb->sb.running_index;
5898 } else {
5899 bnx2x_fp(bp, index, sb_index_values) =
5900 (__le16 *)status_blk.e1x_sb->sb.index_values;
5901 bnx2x_fp(bp, index, sb_running_index) =
5902 (__le16 *)status_blk.e1x_sb->sb.running_index;
5903 }
5904}
4604 5905
5906int bnx2x_alloc_mem(struct bnx2x *bp)
5907{
4605#define BNX2X_PCI_ALLOC(x, y, size) \ 5908#define BNX2X_PCI_ALLOC(x, y, size) \
4606 do { \ 5909 do { \
4607 x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \ 5910 x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \
@@ -4612,10 +5915,9 @@ int bnx2x_alloc_mem(struct bnx2x *bp)
4612 5915
4613#define BNX2X_ALLOC(x, size) \ 5916#define BNX2X_ALLOC(x, size) \
4614 do { \ 5917 do { \
4615 x = vmalloc(size); \ 5918 x = kzalloc(size, GFP_KERNEL); \
4616 if (x == NULL) \ 5919 if (x == NULL) \
4617 goto alloc_mem_err; \ 5920 goto alloc_mem_err; \
4618 memset(x, 0, size); \
4619 } while (0) 5921 } while (0)
4620 5922
4621 int i; 5923 int i;
@@ -4623,12 +5925,19 @@ int bnx2x_alloc_mem(struct bnx2x *bp)
4623 /* fastpath */ 5925 /* fastpath */
4624 /* Common */ 5926 /* Common */
4625 for_each_queue(bp, i) { 5927 for_each_queue(bp, i) {
5928 union host_hc_status_block *sb = &bnx2x_fp(bp, i, status_blk);
4626 bnx2x_fp(bp, i, bp) = bp; 5929 bnx2x_fp(bp, i, bp) = bp;
4627
4628 /* status blocks */ 5930 /* status blocks */
4629 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk), 5931 if (CHIP_IS_E2(bp))
5932 BNX2X_PCI_ALLOC(sb->e2_sb,
5933 &bnx2x_fp(bp, i, status_blk_mapping),
5934 sizeof(struct host_hc_status_block_e2));
5935 else
5936 BNX2X_PCI_ALLOC(sb->e1x_sb,
4630 &bnx2x_fp(bp, i, status_blk_mapping), 5937 &bnx2x_fp(bp, i, status_blk_mapping),
4631 sizeof(struct host_status_block)); 5938 sizeof(struct host_hc_status_block_e1x));
5939
5940 set_sb_shortcuts(bp, i);
4632 } 5941 }
4633 /* Rx */ 5942 /* Rx */
4634 for_each_queue(bp, i) { 5943 for_each_queue(bp, i) {
@@ -4664,37 +5973,41 @@ int bnx2x_alloc_mem(struct bnx2x *bp)
4664 } 5973 }
4665 /* end of fastpath */ 5974 /* end of fastpath */
4666 5975
5976#ifdef BCM_CNIC
5977 if (CHIP_IS_E2(bp))
5978 BNX2X_PCI_ALLOC(bp->cnic_sb.e2_sb, &bp->cnic_sb_mapping,
5979 sizeof(struct host_hc_status_block_e2));
5980 else
5981 BNX2X_PCI_ALLOC(bp->cnic_sb.e1x_sb, &bp->cnic_sb_mapping,
5982 sizeof(struct host_hc_status_block_e1x));
5983
5984 /* allocate searcher T2 table */
5985 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, SRC_T2_SZ);
5986#endif
5987
5988
4667 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping, 5989 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
4668 sizeof(struct host_def_status_block)); 5990 sizeof(struct host_sp_status_block));
4669 5991
4670 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping, 5992 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
4671 sizeof(struct bnx2x_slowpath)); 5993 sizeof(struct bnx2x_slowpath));
4672 5994
4673#ifdef BCM_CNIC 5995 bp->context.size = sizeof(union cdu_context) * bp->l2_cid_count;
4674 BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
4675
4676 /* allocate searcher T2 table
4677 we allocate 1/4 of alloc num for T2
4678 (which is not entered into the ILT) */
4679 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
4680
4681 /* Initialize T2 (for 1024 connections) */
4682 for (i = 0; i < 16*1024; i += 64)
4683 *(u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
4684 5996
4685 /* Timer block array (8*MAX_CONN) phys uncached for now 1024 conns */ 5997 BNX2X_PCI_ALLOC(bp->context.vcxt, &bp->context.cxt_mapping,
4686 BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024); 5998 bp->context.size);
4687 5999
4688 /* QM queues (128*MAX_CONN) */ 6000 BNX2X_ALLOC(bp->ilt->lines, sizeof(struct ilt_line) * ILT_MAX_LINES);
4689 BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
4690 6001
4691 BNX2X_PCI_ALLOC(bp->cnic_sb, &bp->cnic_sb_mapping, 6002 if (bnx2x_ilt_mem_op(bp, ILT_MEMOP_ALLOC))
4692 sizeof(struct host_status_block)); 6003 goto alloc_mem_err;
4693#endif
4694 6004
4695 /* Slow path ring */ 6005 /* Slow path ring */
4696 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE); 6006 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
4697 6007
6008 /* EQ */
6009 BNX2X_PCI_ALLOC(bp->eq_ring, &bp->eq_mapping,
6010 BCM_PAGE_SIZE * NUM_EQ_PAGES);
4698 return 0; 6011 return 0;
4699 6012
4700alloc_mem_err: 6013alloc_mem_err:
@@ -4705,97 +6018,47 @@ alloc_mem_err:
4705#undef BNX2X_ALLOC 6018#undef BNX2X_ALLOC
4706} 6019}
4707 6020
4708
4709/* 6021/*
4710 * Init service functions 6022 * Init service functions
4711 */ 6023 */
4712 6024int bnx2x_func_start(struct bnx2x *bp)
4713/**
4714 * Sets a MAC in a CAM for a few L2 Clients for E1 chip
4715 *
4716 * @param bp driver descriptor
4717 * @param set set or clear an entry (1 or 0)
4718 * @param mac pointer to a buffer containing a MAC
4719 * @param cl_bit_vec bit vector of clients to register a MAC for
4720 * @param cam_offset offset in a CAM to use
4721 * @param with_bcast set broadcast MAC as well
4722 */
4723static void bnx2x_set_mac_addr_e1_gen(struct bnx2x *bp, int set, u8 *mac,
4724 u32 cl_bit_vec, u8 cam_offset,
4725 u8 with_bcast)
4726{ 6025{
4727 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config); 6026 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_START, 0, 0, 0, 1);
4728 int port = BP_PORT(bp);
4729 6027
4730 /* CAM allocation 6028 /* Wait for completion */
4731 * unicasts 0-31:port0 32-63:port1 6029 return bnx2x_wait_ramrod(bp, BNX2X_STATE_FUNC_STARTED, 0, &(bp->state),
4732 * multicast 64-127:port0 128-191:port1 6030 WAIT_RAMROD_COMMON);
4733 */ 6031}
4734 config->hdr.length = 1 + (with_bcast ? 1 : 0);
4735 config->hdr.offset = cam_offset;
4736 config->hdr.client_id = 0xff;
4737 config->hdr.reserved1 = 0;
4738
4739 /* primary MAC */
4740 config->config_table[0].cam_entry.msb_mac_addr =
4741 swab16(*(u16 *)&mac[0]);
4742 config->config_table[0].cam_entry.middle_mac_addr =
4743 swab16(*(u16 *)&mac[2]);
4744 config->config_table[0].cam_entry.lsb_mac_addr =
4745 swab16(*(u16 *)&mac[4]);
4746 config->config_table[0].cam_entry.flags = cpu_to_le16(port);
4747 if (set)
4748 config->config_table[0].target_table_entry.flags = 0;
4749 else
4750 CAM_INVALIDATE(config->config_table[0]);
4751 config->config_table[0].target_table_entry.clients_bit_vector =
4752 cpu_to_le32(cl_bit_vec);
4753 config->config_table[0].target_table_entry.vlan_id = 0;
4754 6032
4755 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n", 6033int bnx2x_func_stop(struct bnx2x *bp)
4756 (set ? "setting" : "clearing"), 6034{
4757 config->config_table[0].cam_entry.msb_mac_addr, 6035 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_STOP, 0, 0, 0, 1);
4758 config->config_table[0].cam_entry.middle_mac_addr,
4759 config->config_table[0].cam_entry.lsb_mac_addr);
4760
4761 /* broadcast */
4762 if (with_bcast) {
4763 config->config_table[1].cam_entry.msb_mac_addr =
4764 cpu_to_le16(0xffff);
4765 config->config_table[1].cam_entry.middle_mac_addr =
4766 cpu_to_le16(0xffff);
4767 config->config_table[1].cam_entry.lsb_mac_addr =
4768 cpu_to_le16(0xffff);
4769 config->config_table[1].cam_entry.flags = cpu_to_le16(port);
4770 if (set)
4771 config->config_table[1].target_table_entry.flags =
4772 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
4773 else
4774 CAM_INVALIDATE(config->config_table[1]);
4775 config->config_table[1].target_table_entry.clients_bit_vector =
4776 cpu_to_le32(cl_bit_vec);
4777 config->config_table[1].target_table_entry.vlan_id = 0;
4778 }
4779 6036
4780 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0, 6037 /* Wait for completion */
4781 U64_HI(bnx2x_sp_mapping(bp, mac_config)), 6038 return bnx2x_wait_ramrod(bp, BNX2X_STATE_CLOSING_WAIT4_UNLOAD,
4782 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0); 6039 0, &(bp->state), WAIT_RAMROD_COMMON);
4783} 6040}
4784 6041
4785/** 6042/**
4786 * Sets a MAC in a CAM for a few L2 Clients for E1H chip 6043 * Sets a MAC in a CAM for a few L2 Clients for E1x chips
4787 * 6044 *
4788 * @param bp driver descriptor 6045 * @param bp driver descriptor
4789 * @param set set or clear an entry (1 or 0) 6046 * @param set set or clear an entry (1 or 0)
4790 * @param mac pointer to a buffer containing a MAC 6047 * @param mac pointer to a buffer containing a MAC
4791 * @param cl_bit_vec bit vector of clients to register a MAC for 6048 * @param cl_bit_vec bit vector of clients to register a MAC for
4792 * @param cam_offset offset in a CAM to use 6049 * @param cam_offset offset in a CAM to use
6050 * @param is_bcast is the set MAC a broadcast address (for E1 only)
4793 */ 6051 */
4794static void bnx2x_set_mac_addr_e1h_gen(struct bnx2x *bp, int set, u8 *mac, 6052static void bnx2x_set_mac_addr_gen(struct bnx2x *bp, int set, u8 *mac,
4795 u32 cl_bit_vec, u8 cam_offset) 6053 u32 cl_bit_vec, u8 cam_offset,
6054 u8 is_bcast)
4796{ 6055{
4797 struct mac_configuration_cmd_e1h *config = 6056 struct mac_configuration_cmd *config =
4798 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config); 6057 (struct mac_configuration_cmd *)bnx2x_sp(bp, mac_config);
6058 int ramrod_flags = WAIT_RAMROD_COMMON;
6059
6060 bp->set_mac_pending = 1;
6061 smp_wmb();
4799 6062
4800 config->hdr.length = 1; 6063 config->hdr.length = 1;
4801 config->hdr.offset = cam_offset; 6064 config->hdr.offset = cam_offset;
@@ -4812,29 +6075,41 @@ static void bnx2x_set_mac_addr_e1h_gen(struct bnx2x *bp, int set, u8 *mac,
4812 config->config_table[0].clients_bit_vector = 6075 config->config_table[0].clients_bit_vector =
4813 cpu_to_le32(cl_bit_vec); 6076 cpu_to_le32(cl_bit_vec);
4814 config->config_table[0].vlan_id = 0; 6077 config->config_table[0].vlan_id = 0;
4815 config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov); 6078 config->config_table[0].pf_id = BP_FUNC(bp);
4816 if (set) 6079 if (set)
4817 config->config_table[0].flags = BP_PORT(bp); 6080 SET_FLAG(config->config_table[0].flags,
6081 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
6082 T_ETH_MAC_COMMAND_SET);
4818 else 6083 else
4819 config->config_table[0].flags = 6084 SET_FLAG(config->config_table[0].flags,
4820 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE; 6085 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
6086 T_ETH_MAC_COMMAND_INVALIDATE);
4821 6087
4822 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) E1HOV %d CLID mask %d\n", 6088 if (is_bcast)
6089 SET_FLAG(config->config_table[0].flags,
6090 MAC_CONFIGURATION_ENTRY_BROADCAST, 1);
6091
6092 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) PF_ID %d CLID mask %d\n",
4823 (set ? "setting" : "clearing"), 6093 (set ? "setting" : "clearing"),
4824 config->config_table[0].msb_mac_addr, 6094 config->config_table[0].msb_mac_addr,
4825 config->config_table[0].middle_mac_addr, 6095 config->config_table[0].middle_mac_addr,
4826 config->config_table[0].lsb_mac_addr, bp->e1hov, cl_bit_vec); 6096 config->config_table[0].lsb_mac_addr, BP_FUNC(bp), cl_bit_vec);
4827 6097
4828 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0, 6098 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
4829 U64_HI(bnx2x_sp_mapping(bp, mac_config)), 6099 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
4830 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0); 6100 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 1);
6101
6102 /* Wait for a completion */
6103 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, ramrod_flags);
4831} 6104}
4832 6105
4833static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx, 6106int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
4834 int *state_p, int poll) 6107 int *state_p, int flags)
4835{ 6108{
4836 /* can take a while if any port is running */ 6109 /* can take a while if any port is running */
4837 int cnt = 5000; 6110 int cnt = 5000;
6111 u8 poll = flags & WAIT_RAMROD_POLL;
6112 u8 common = flags & WAIT_RAMROD_COMMON;
4838 6113
4839 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n", 6114 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
4840 poll ? "polling" : "waiting", state, idx); 6115 poll ? "polling" : "waiting", state, idx);
@@ -4842,13 +6117,17 @@ static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
4842 might_sleep(); 6117 might_sleep();
4843 while (cnt--) { 6118 while (cnt--) {
4844 if (poll) { 6119 if (poll) {
4845 bnx2x_rx_int(bp->fp, 10); 6120 if (common)
4846 /* if index is different from 0 6121 bnx2x_eq_int(bp);
4847 * the reply for some commands will 6122 else {
4848 * be on the non default queue 6123 bnx2x_rx_int(bp->fp, 10);
4849 */ 6124 /* if index is different from 0
4850 if (idx) 6125 * the reply for some commands will
4851 bnx2x_rx_int(&bp->fp[idx], 10); 6126 * be on the non default queue
6127 */
6128 if (idx)
6129 bnx2x_rx_int(&bp->fp[idx], 10);
6130 }
4852 } 6131 }
4853 6132
4854 mb(); /* state is changed by bnx2x_sp_event() */ 6133 mb(); /* state is changed by bnx2x_sp_event() */
@@ -4875,29 +6154,112 @@ static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
4875 return -EBUSY; 6154 return -EBUSY;
4876} 6155}
4877 6156
4878void bnx2x_set_eth_mac_addr_e1h(struct bnx2x *bp, int set) 6157u8 bnx2x_e1h_cam_offset(struct bnx2x *bp, u8 rel_offset)
4879{ 6158{
4880 bp->set_mac_pending++; 6159 if (CHIP_IS_E1H(bp))
4881 smp_wmb(); 6160 return E1H_FUNC_MAX * rel_offset + BP_FUNC(bp);
6161 else if (CHIP_MODE_IS_4_PORT(bp))
6162 return BP_FUNC(bp) * 32 + rel_offset;
6163 else
6164 return BP_VN(bp) * 32 + rel_offset;
6165}
6166
6167void bnx2x_set_eth_mac(struct bnx2x *bp, int set)
6168{
6169 u8 cam_offset = (CHIP_IS_E1(bp) ? (BP_PORT(bp) ? 32 : 0) :
6170 bnx2x_e1h_cam_offset(bp, CAM_ETH_LINE));
4882 6171
4883 bnx2x_set_mac_addr_e1h_gen(bp, set, bp->dev->dev_addr, 6172 /* networking MAC */
4884 (1 << bp->fp->cl_id), BP_FUNC(bp)); 6173 bnx2x_set_mac_addr_gen(bp, set, bp->dev->dev_addr,
6174 (1 << bp->fp->cl_id), cam_offset , 0);
4885 6175
4886 /* Wait for a completion */ 6176 if (CHIP_IS_E1(bp)) {
4887 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1); 6177 /* broadcast MAC */
6178 u8 bcast[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
6179 bnx2x_set_mac_addr_gen(bp, set, bcast, 0, cam_offset + 1, 1);
6180 }
4888} 6181}
6182static void bnx2x_set_e1_mc_list(struct bnx2x *bp, u8 offset)
6183{
6184 int i = 0, old;
6185 struct net_device *dev = bp->dev;
6186 struct netdev_hw_addr *ha;
6187 struct mac_configuration_cmd *config_cmd = bnx2x_sp(bp, mcast_config);
6188 dma_addr_t config_cmd_map = bnx2x_sp_mapping(bp, mcast_config);
6189
6190 netdev_for_each_mc_addr(ha, dev) {
6191 /* copy mac */
6192 config_cmd->config_table[i].msb_mac_addr =
6193 swab16(*(u16 *)&bnx2x_mc_addr(ha)[0]);
6194 config_cmd->config_table[i].middle_mac_addr =
6195 swab16(*(u16 *)&bnx2x_mc_addr(ha)[2]);
6196 config_cmd->config_table[i].lsb_mac_addr =
6197 swab16(*(u16 *)&bnx2x_mc_addr(ha)[4]);
6198
6199 config_cmd->config_table[i].vlan_id = 0;
6200 config_cmd->config_table[i].pf_id = BP_FUNC(bp);
6201 config_cmd->config_table[i].clients_bit_vector =
6202 cpu_to_le32(1 << BP_L_ID(bp));
6203
6204 SET_FLAG(config_cmd->config_table[i].flags,
6205 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
6206 T_ETH_MAC_COMMAND_SET);
4889 6207
4890void bnx2x_set_eth_mac_addr_e1(struct bnx2x *bp, int set) 6208 DP(NETIF_MSG_IFUP,
6209 "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
6210 config_cmd->config_table[i].msb_mac_addr,
6211 config_cmd->config_table[i].middle_mac_addr,
6212 config_cmd->config_table[i].lsb_mac_addr);
6213 i++;
6214 }
6215 old = config_cmd->hdr.length;
6216 if (old > i) {
6217 for (; i < old; i++) {
6218 if (CAM_IS_INVALID(config_cmd->
6219 config_table[i])) {
6220 /* already invalidated */
6221 break;
6222 }
6223 /* invalidate */
6224 SET_FLAG(config_cmd->config_table[i].flags,
6225 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
6226 T_ETH_MAC_COMMAND_INVALIDATE);
6227 }
6228 }
6229
6230 config_cmd->hdr.length = i;
6231 config_cmd->hdr.offset = offset;
6232 config_cmd->hdr.client_id = 0xff;
6233 config_cmd->hdr.reserved1 = 0;
6234
6235 bp->set_mac_pending = 1;
6236 smp_wmb();
6237
6238 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
6239 U64_HI(config_cmd_map), U64_LO(config_cmd_map), 1);
6240}
6241static void bnx2x_invlidate_e1_mc_list(struct bnx2x *bp)
4891{ 6242{
4892 bp->set_mac_pending++; 6243 int i;
6244 struct mac_configuration_cmd *config_cmd = bnx2x_sp(bp, mcast_config);
6245 dma_addr_t config_cmd_map = bnx2x_sp_mapping(bp, mcast_config);
6246 int ramrod_flags = WAIT_RAMROD_COMMON;
6247
6248 bp->set_mac_pending = 1;
4893 smp_wmb(); 6249 smp_wmb();
4894 6250
4895 bnx2x_set_mac_addr_e1_gen(bp, set, bp->dev->dev_addr, 6251 for (i = 0; i < config_cmd->hdr.length; i++)
4896 (1 << bp->fp->cl_id), (BP_PORT(bp) ? 32 : 0), 6252 SET_FLAG(config_cmd->config_table[i].flags,
4897 1); 6253 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
6254 T_ETH_MAC_COMMAND_INVALIDATE);
6255
6256 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
6257 U64_HI(config_cmd_map), U64_LO(config_cmd_map), 1);
4898 6258
4899 /* Wait for a completion */ 6259 /* Wait for a completion */
4900 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1); 6260 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending,
6261 ramrod_flags);
6262
4901} 6263}
4902 6264
4903#ifdef BCM_CNIC 6265#ifdef BCM_CNIC
@@ -4913,174 +6275,463 @@ void bnx2x_set_eth_mac_addr_e1(struct bnx2x *bp, int set)
4913 */ 6275 */
4914int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set) 6276int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set)
4915{ 6277{
4916 u32 cl_bit_vec = (1 << BCM_ISCSI_ETH_CL_ID); 6278 u8 cam_offset = (CHIP_IS_E1(bp) ? ((BP_PORT(bp) ? 32 : 0) + 2) :
4917 6279 bnx2x_e1h_cam_offset(bp, CAM_ISCSI_ETH_LINE));
4918 bp->set_mac_pending++; 6280 u32 iscsi_l2_cl_id = BNX2X_ISCSI_ETH_CL_ID;
4919 smp_wmb(); 6281 u32 cl_bit_vec = (1 << iscsi_l2_cl_id);
4920 6282
4921 /* Send a SET_MAC ramrod */ 6283 /* Send a SET_MAC ramrod */
4922 if (CHIP_IS_E1(bp)) 6284 bnx2x_set_mac_addr_gen(bp, set, bp->iscsi_mac, cl_bit_vec,
4923 bnx2x_set_mac_addr_e1_gen(bp, set, bp->iscsi_mac, 6285 cam_offset, 0);
4924 cl_bit_vec, (BP_PORT(bp) ? 32 : 0) + 2,
4925 1);
4926 else
4927 /* CAM allocation for E1H
4928 * unicasts: by func number
4929 * multicast: 20+FUNC*20, 20 each
4930 */
4931 bnx2x_set_mac_addr_e1h_gen(bp, set, bp->iscsi_mac,
4932 cl_bit_vec, E1H_FUNC_MAX + BP_FUNC(bp));
4933
4934 /* Wait for a completion when setting */
4935 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
4936
4937 return 0; 6286 return 0;
4938} 6287}
4939#endif 6288#endif
4940 6289
4941int bnx2x_setup_leading(struct bnx2x *bp) 6290static void bnx2x_fill_cl_init_data(struct bnx2x *bp,
4942{ 6291 struct bnx2x_client_init_params *params,
4943 int rc; 6292 u8 activate,
6293 struct client_init_ramrod_data *data)
6294{
6295 /* Clear the buffer */
6296 memset(data, 0, sizeof(*data));
6297
6298 /* general */
6299 data->general.client_id = params->rxq_params.cl_id;
6300 data->general.statistics_counter_id = params->rxq_params.stat_id;
6301 data->general.statistics_en_flg =
6302 (params->rxq_params.flags & QUEUE_FLG_STATS) ? 1 : 0;
6303 data->general.activate_flg = activate;
6304 data->general.sp_client_id = params->rxq_params.spcl_id;
6305
6306 /* Rx data */
6307 data->rx.tpa_en_flg =
6308 (params->rxq_params.flags & QUEUE_FLG_TPA) ? 1 : 0;
6309 data->rx.vmqueue_mode_en_flg = 0;
6310 data->rx.cache_line_alignment_log_size =
6311 params->rxq_params.cache_line_log;
6312 data->rx.enable_dynamic_hc =
6313 (params->rxq_params.flags & QUEUE_FLG_DHC) ? 1 : 0;
6314 data->rx.max_sges_for_packet = params->rxq_params.max_sges_pkt;
6315 data->rx.client_qzone_id = params->rxq_params.cl_qzone_id;
6316 data->rx.max_agg_size = params->rxq_params.tpa_agg_sz;
6317
6318 /* We don't set drop flags */
6319 data->rx.drop_ip_cs_err_flg = 0;
6320 data->rx.drop_tcp_cs_err_flg = 0;
6321 data->rx.drop_ttl0_flg = 0;
6322 data->rx.drop_udp_cs_err_flg = 0;
6323
6324 data->rx.inner_vlan_removal_enable_flg =
6325 (params->rxq_params.flags & QUEUE_FLG_VLAN) ? 1 : 0;
6326 data->rx.outer_vlan_removal_enable_flg =
6327 (params->rxq_params.flags & QUEUE_FLG_OV) ? 1 : 0;
6328 data->rx.status_block_id = params->rxq_params.fw_sb_id;
6329 data->rx.rx_sb_index_number = params->rxq_params.sb_cq_index;
6330 data->rx.bd_buff_size = cpu_to_le16(params->rxq_params.buf_sz);
6331 data->rx.sge_buff_size = cpu_to_le16(params->rxq_params.sge_buf_sz);
6332 data->rx.mtu = cpu_to_le16(params->rxq_params.mtu);
6333 data->rx.bd_page_base.lo =
6334 cpu_to_le32(U64_LO(params->rxq_params.dscr_map));
6335 data->rx.bd_page_base.hi =
6336 cpu_to_le32(U64_HI(params->rxq_params.dscr_map));
6337 data->rx.sge_page_base.lo =
6338 cpu_to_le32(U64_LO(params->rxq_params.sge_map));
6339 data->rx.sge_page_base.hi =
6340 cpu_to_le32(U64_HI(params->rxq_params.sge_map));
6341 data->rx.cqe_page_base.lo =
6342 cpu_to_le32(U64_LO(params->rxq_params.rcq_map));
6343 data->rx.cqe_page_base.hi =
6344 cpu_to_le32(U64_HI(params->rxq_params.rcq_map));
6345 data->rx.is_leading_rss =
6346 (params->ramrod_params.flags & CLIENT_IS_LEADING_RSS) ? 1 : 0;
6347 data->rx.is_approx_mcast = data->rx.is_leading_rss;
6348
6349 /* Tx data */
6350 data->tx.enforce_security_flg = 0; /* VF specific */
6351 data->tx.tx_status_block_id = params->txq_params.fw_sb_id;
6352 data->tx.tx_sb_index_number = params->txq_params.sb_cq_index;
6353 data->tx.mtu = 0; /* VF specific */
6354 data->tx.tx_bd_page_base.lo =
6355 cpu_to_le32(U64_LO(params->txq_params.dscr_map));
6356 data->tx.tx_bd_page_base.hi =
6357 cpu_to_le32(U64_HI(params->txq_params.dscr_map));
6358
6359 /* flow control data */
6360 data->fc.cqe_pause_thr_low = cpu_to_le16(params->pause.rcq_th_lo);
6361 data->fc.cqe_pause_thr_high = cpu_to_le16(params->pause.rcq_th_hi);
6362 data->fc.bd_pause_thr_low = cpu_to_le16(params->pause.bd_th_lo);
6363 data->fc.bd_pause_thr_high = cpu_to_le16(params->pause.bd_th_hi);
6364 data->fc.sge_pause_thr_low = cpu_to_le16(params->pause.sge_th_lo);
6365 data->fc.sge_pause_thr_high = cpu_to_le16(params->pause.sge_th_hi);
6366 data->fc.rx_cos_mask = cpu_to_le16(params->pause.pri_map);
6367
6368 data->fc.safc_group_num = params->txq_params.cos;
6369 data->fc.safc_group_en_flg =
6370 (params->txq_params.flags & QUEUE_FLG_COS) ? 1 : 0;
6371 data->fc.traffic_type = LLFC_TRAFFIC_TYPE_NW;
6372}
6373
6374static inline void bnx2x_set_ctx_validation(struct eth_context *cxt, u32 cid)
6375{
6376 /* ustorm cxt validation */
6377 cxt->ustorm_ag_context.cdu_usage =
6378 CDU_RSRVD_VALUE_TYPE_A(cid, CDU_REGION_NUMBER_UCM_AG,
6379 ETH_CONNECTION_TYPE);
6380 /* xcontext validation */
6381 cxt->xstorm_ag_context.cdu_reserved =
6382 CDU_RSRVD_VALUE_TYPE_A(cid, CDU_REGION_NUMBER_XCM_AG,
6383 ETH_CONNECTION_TYPE);
6384}
6385
6386int bnx2x_setup_fw_client(struct bnx2x *bp,
6387 struct bnx2x_client_init_params *params,
6388 u8 activate,
6389 struct client_init_ramrod_data *data,
6390 dma_addr_t data_mapping)
6391{
6392 u16 hc_usec;
6393 int ramrod = RAMROD_CMD_ID_ETH_CLIENT_SETUP;
6394 int ramrod_flags = 0, rc;
6395
6396 /* HC and context validation values */
6397 hc_usec = params->txq_params.hc_rate ?
6398 1000000 / params->txq_params.hc_rate : 0;
6399 bnx2x_update_coalesce_sb_index(bp,
6400 params->txq_params.fw_sb_id,
6401 params->txq_params.sb_cq_index,
6402 !(params->txq_params.flags & QUEUE_FLG_HC),
6403 hc_usec);
6404
6405 *(params->ramrod_params.pstate) = BNX2X_FP_STATE_OPENING;
6406
6407 hc_usec = params->rxq_params.hc_rate ?
6408 1000000 / params->rxq_params.hc_rate : 0;
6409 bnx2x_update_coalesce_sb_index(bp,
6410 params->rxq_params.fw_sb_id,
6411 params->rxq_params.sb_cq_index,
6412 !(params->rxq_params.flags & QUEUE_FLG_HC),
6413 hc_usec);
6414
6415 bnx2x_set_ctx_validation(params->rxq_params.cxt,
6416 params->rxq_params.cid);
6417
6418 /* zero stats */
6419 if (params->txq_params.flags & QUEUE_FLG_STATS)
6420 storm_memset_xstats_zero(bp, BP_PORT(bp),
6421 params->txq_params.stat_id);
6422
6423 if (params->rxq_params.flags & QUEUE_FLG_STATS) {
6424 storm_memset_ustats_zero(bp, BP_PORT(bp),
6425 params->rxq_params.stat_id);
6426 storm_memset_tstats_zero(bp, BP_PORT(bp),
6427 params->rxq_params.stat_id);
6428 }
6429
6430 /* Fill the ramrod data */
6431 bnx2x_fill_cl_init_data(bp, params, activate, data);
6432
6433 /* SETUP ramrod.
6434 *
6435 * bnx2x_sp_post() takes a spin_lock thus no other explict memory
6436 * barrier except from mmiowb() is needed to impose a
6437 * proper ordering of memory operations.
6438 */
6439 mmiowb();
4944 6440
4945 /* reset IGU state */
4946 bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4947 6441
4948 /* SETUP ramrod */ 6442 bnx2x_sp_post(bp, ramrod, params->ramrod_params.cid,
4949 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0); 6443 U64_HI(data_mapping), U64_LO(data_mapping), 0);
4950 6444
4951 /* Wait for completion */ 6445 /* Wait for completion */
4952 rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0); 6446 rc = bnx2x_wait_ramrod(bp, params->ramrod_params.state,
4953 6447 params->ramrod_params.index,
6448 params->ramrod_params.pstate,
6449 ramrod_flags);
4954 return rc; 6450 return rc;
4955} 6451}
4956 6452
4957int bnx2x_setup_multi(struct bnx2x *bp, int index) 6453/**
6454 * Configure interrupt mode according to current configuration.
6455 * In case of MSI-X it will also try to enable MSI-X.
6456 *
6457 * @param bp
6458 *
6459 * @return int
6460 */
6461static int __devinit bnx2x_set_int_mode(struct bnx2x *bp)
4958{ 6462{
4959 struct bnx2x_fastpath *fp = &bp->fp[index]; 6463 int rc = 0;
4960 6464
4961 /* reset IGU state */ 6465 switch (bp->int_mode) {
4962 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0); 6466 case INT_MODE_MSI:
6467 bnx2x_enable_msi(bp);
6468 /* falling through... */
6469 case INT_MODE_INTx:
6470 bp->num_queues = 1;
6471 DP(NETIF_MSG_IFUP, "set number of queues to 1\n");
6472 break;
6473 default:
6474 /* Set number of queues according to bp->multi_mode value */
6475 bnx2x_set_num_queues(bp);
4963 6476
4964 /* SETUP ramrod */ 6477 DP(NETIF_MSG_IFUP, "set number of queues to %d\n",
4965 fp->state = BNX2X_FP_STATE_OPENING; 6478 bp->num_queues);
4966 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0,
4967 fp->cl_id, 0);
4968 6479
4969 /* Wait for completion */ 6480 /* if we can't use MSI-X we only need one fp,
4970 return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index, 6481 * so try to enable MSI-X with the requested number of fp's
4971 &(fp->state), 0); 6482 * and fallback to MSI or legacy INTx with one fp
4972} 6483 */
6484 rc = bnx2x_enable_msix(bp);
6485 if (rc) {
6486 /* failed to enable MSI-X */
6487 if (bp->multi_mode)
6488 DP(NETIF_MSG_IFUP,
6489 "Multi requested but failed to "
6490 "enable MSI-X (%d), "
6491 "set number of queues to %d\n",
6492 bp->num_queues,
6493 1);
6494 bp->num_queues = 1;
6495
6496 if (!(bp->flags & DISABLE_MSI_FLAG))
6497 bnx2x_enable_msi(bp);
6498 }
4973 6499
6500 break;
6501 }
4974 6502
4975void bnx2x_set_num_queues_msix(struct bnx2x *bp) 6503 return rc;
6504}
6505
6506/* must be called prioir to any HW initializations */
6507static inline u16 bnx2x_cid_ilt_lines(struct bnx2x *bp)
4976{ 6508{
6509 return L2_ILT_LINES(bp);
6510}
4977 6511
4978 switch (bp->multi_mode) { 6512void bnx2x_ilt_set_info(struct bnx2x *bp)
4979 case ETH_RSS_MODE_DISABLED: 6513{
4980 bp->num_queues = 1; 6514 struct ilt_client_info *ilt_client;
4981 break; 6515 struct bnx2x_ilt *ilt = BP_ILT(bp);
6516 u16 line = 0;
4982 6517
4983 case ETH_RSS_MODE_REGULAR: 6518 ilt->start_line = FUNC_ILT_BASE(BP_FUNC(bp));
4984 if (num_queues) 6519 DP(BNX2X_MSG_SP, "ilt starts at line %d\n", ilt->start_line);
4985 bp->num_queues = min_t(u32, num_queues,
4986 BNX2X_MAX_QUEUES(bp));
4987 else
4988 bp->num_queues = min_t(u32, num_online_cpus(),
4989 BNX2X_MAX_QUEUES(bp));
4990 break;
4991 6520
6521 /* CDU */
6522 ilt_client = &ilt->clients[ILT_CLIENT_CDU];
6523 ilt_client->client_num = ILT_CLIENT_CDU;
6524 ilt_client->page_size = CDU_ILT_PAGE_SZ;
6525 ilt_client->flags = ILT_CLIENT_SKIP_MEM;
6526 ilt_client->start = line;
6527 line += L2_ILT_LINES(bp);
6528#ifdef BCM_CNIC
6529 line += CNIC_ILT_LINES;
6530#endif
6531 ilt_client->end = line - 1;
6532
6533 DP(BNX2X_MSG_SP, "ilt client[CDU]: start %d, end %d, psz 0x%x, "
6534 "flags 0x%x, hw psz %d\n",
6535 ilt_client->start,
6536 ilt_client->end,
6537 ilt_client->page_size,
6538 ilt_client->flags,
6539 ilog2(ilt_client->page_size >> 12));
6540
6541 /* QM */
6542 if (QM_INIT(bp->qm_cid_count)) {
6543 ilt_client = &ilt->clients[ILT_CLIENT_QM];
6544 ilt_client->client_num = ILT_CLIENT_QM;
6545 ilt_client->page_size = QM_ILT_PAGE_SZ;
6546 ilt_client->flags = 0;
6547 ilt_client->start = line;
6548
6549 /* 4 bytes for each cid */
6550 line += DIV_ROUND_UP(bp->qm_cid_count * QM_QUEUES_PER_FUNC * 4,
6551 QM_ILT_PAGE_SZ);
6552
6553 ilt_client->end = line - 1;
6554
6555 DP(BNX2X_MSG_SP, "ilt client[QM]: start %d, end %d, psz 0x%x, "
6556 "flags 0x%x, hw psz %d\n",
6557 ilt_client->start,
6558 ilt_client->end,
6559 ilt_client->page_size,
6560 ilt_client->flags,
6561 ilog2(ilt_client->page_size >> 12));
6562
6563 }
6564 /* SRC */
6565 ilt_client = &ilt->clients[ILT_CLIENT_SRC];
6566#ifdef BCM_CNIC
6567 ilt_client->client_num = ILT_CLIENT_SRC;
6568 ilt_client->page_size = SRC_ILT_PAGE_SZ;
6569 ilt_client->flags = 0;
6570 ilt_client->start = line;
6571 line += SRC_ILT_LINES;
6572 ilt_client->end = line - 1;
6573
6574 DP(BNX2X_MSG_SP, "ilt client[SRC]: start %d, end %d, psz 0x%x, "
6575 "flags 0x%x, hw psz %d\n",
6576 ilt_client->start,
6577 ilt_client->end,
6578 ilt_client->page_size,
6579 ilt_client->flags,
6580 ilog2(ilt_client->page_size >> 12));
4992 6581
4993 default: 6582#else
4994 bp->num_queues = 1; 6583 ilt_client->flags = (ILT_CLIENT_SKIP_INIT | ILT_CLIENT_SKIP_MEM);
4995 break; 6584#endif
4996 }
4997}
4998 6585
6586 /* TM */
6587 ilt_client = &ilt->clients[ILT_CLIENT_TM];
6588#ifdef BCM_CNIC
6589 ilt_client->client_num = ILT_CLIENT_TM;
6590 ilt_client->page_size = TM_ILT_PAGE_SZ;
6591 ilt_client->flags = 0;
6592 ilt_client->start = line;
6593 line += TM_ILT_LINES;
6594 ilt_client->end = line - 1;
6595
6596 DP(BNX2X_MSG_SP, "ilt client[TM]: start %d, end %d, psz 0x%x, "
6597 "flags 0x%x, hw psz %d\n",
6598 ilt_client->start,
6599 ilt_client->end,
6600 ilt_client->page_size,
6601 ilt_client->flags,
6602 ilog2(ilt_client->page_size >> 12));
4999 6603
6604#else
6605 ilt_client->flags = (ILT_CLIENT_SKIP_INIT | ILT_CLIENT_SKIP_MEM);
6606#endif
6607}
5000 6608
5001static int bnx2x_stop_multi(struct bnx2x *bp, int index) 6609int bnx2x_setup_client(struct bnx2x *bp, struct bnx2x_fastpath *fp,
6610 int is_leading)
5002{ 6611{
5003 struct bnx2x_fastpath *fp = &bp->fp[index]; 6612 struct bnx2x_client_init_params params = { {0} };
5004 int rc; 6613 int rc;
5005 6614
5006 /* halt the connection */ 6615 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0,
5007 fp->state = BNX2X_FP_STATE_HALTING; 6616 IGU_INT_ENABLE, 0);
5008 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, fp->cl_id, 0);
5009 6617
5010 /* Wait for completion */ 6618 params.ramrod_params.pstate = &fp->state;
5011 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index, 6619 params.ramrod_params.state = BNX2X_FP_STATE_OPEN;
5012 &(fp->state), 1); 6620 params.ramrod_params.index = fp->index;
5013 if (rc) /* timeout */ 6621 params.ramrod_params.cid = fp->cid;
5014 return rc;
5015 6622
5016 /* delete cfc entry */ 6623 if (is_leading)
5017 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1); 6624 params.ramrod_params.flags |= CLIENT_IS_LEADING_RSS;
5018 6625
5019 /* Wait for completion */ 6626 bnx2x_pf_rx_cl_prep(bp, fp, &params.pause, &params.rxq_params);
5020 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index, 6627
5021 &(fp->state), 1); 6628 bnx2x_pf_tx_cl_prep(bp, fp, &params.txq_params);
6629
6630 rc = bnx2x_setup_fw_client(bp, &params, 1,
6631 bnx2x_sp(bp, client_init_data),
6632 bnx2x_sp_mapping(bp, client_init_data));
5022 return rc; 6633 return rc;
5023} 6634}
5024 6635
5025static int bnx2x_stop_leading(struct bnx2x *bp) 6636int bnx2x_stop_fw_client(struct bnx2x *bp, struct bnx2x_client_ramrod_params *p)
5026{ 6637{
5027 __le16 dsb_sp_prod_idx;
5028 /* if the other port is handling traffic,
5029 this can take a lot of time */
5030 int cnt = 500;
5031 int rc; 6638 int rc;
5032 6639
5033 might_sleep(); 6640 int poll_flag = p->poll ? WAIT_RAMROD_POLL : 0;
5034 6641
5035 /* Send HALT ramrod */ 6642 /* halt the connection */
5036 bp->fp[0].state = BNX2X_FP_STATE_HALTING; 6643 *p->pstate = BNX2X_FP_STATE_HALTING;
5037 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, bp->fp->cl_id, 0); 6644 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, p->cid, 0,
6645 p->cl_id, 0);
5038 6646
5039 /* Wait for completion */ 6647 /* Wait for completion */
5040 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0, 6648 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, p->index,
5041 &(bp->fp[0].state), 1); 6649 p->pstate, poll_flag);
5042 if (rc) /* timeout */ 6650 if (rc) /* timeout */
5043 return rc; 6651 return rc;
5044 6652
5045 dsb_sp_prod_idx = *bp->dsb_sp_prod; 6653 *p->pstate = BNX2X_FP_STATE_TERMINATING;
6654 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_TERMINATE, p->cid, 0,
6655 p->cl_id, 0);
6656 /* Wait for completion */
6657 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_TERMINATED, p->index,
6658 p->pstate, poll_flag);
6659 if (rc) /* timeout */
6660 return rc;
5046 6661
5047 /* Send PORT_DELETE ramrod */
5048 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
5049 6662
5050 /* Wait for completion to arrive on default status block 6663 /* delete cfc entry */
5051 we are going to reset the chip anyway 6664 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_CFC_DEL, p->cid, 0, 0, 1);
5052 so there is not much to do if this times out
5053 */
5054 while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
5055 if (!cnt) {
5056 DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
5057 "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
5058 *bp->dsb_sp_prod, dsb_sp_prod_idx);
5059#ifdef BNX2X_STOP_ON_ERROR
5060 bnx2x_panic();
5061#endif
5062 rc = -EBUSY;
5063 break;
5064 }
5065 cnt--;
5066 msleep(1);
5067 rmb(); /* Refresh the dsb_sp_prod */
5068 }
5069 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
5070 bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
5071 6665
6666 /* Wait for completion */
6667 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, p->index,
6668 p->pstate, WAIT_RAMROD_COMMON);
5072 return rc; 6669 return rc;
5073} 6670}
5074 6671
6672static int bnx2x_stop_client(struct bnx2x *bp, int index)
6673{
6674 struct bnx2x_client_ramrod_params client_stop = {0};
6675 struct bnx2x_fastpath *fp = &bp->fp[index];
6676
6677 client_stop.index = index;
6678 client_stop.cid = fp->cid;
6679 client_stop.cl_id = fp->cl_id;
6680 client_stop.pstate = &(fp->state);
6681 client_stop.poll = 0;
6682
6683 return bnx2x_stop_fw_client(bp, &client_stop);
6684}
6685
6686
5075static void bnx2x_reset_func(struct bnx2x *bp) 6687static void bnx2x_reset_func(struct bnx2x *bp)
5076{ 6688{
5077 int port = BP_PORT(bp); 6689 int port = BP_PORT(bp);
5078 int func = BP_FUNC(bp); 6690 int func = BP_FUNC(bp);
5079 int base, i; 6691 int i;
6692 int pfunc_offset_fp = offsetof(struct hc_sb_data, p_func) +
6693 (CHIP_IS_E2(bp) ?
6694 offsetof(struct hc_status_block_data_e2, common) :
6695 offsetof(struct hc_status_block_data_e1x, common));
6696 int pfunc_offset_sp = offsetof(struct hc_sp_status_block_data, p_func);
6697 int pfid_offset = offsetof(struct pci_entity, pf_id);
6698
6699 /* Disable the function in the FW */
6700 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(func), 0);
6701 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(func), 0);
6702 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(func), 0);
6703 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(func), 0);
6704
6705 /* FP SBs */
6706 for_each_queue(bp, i) {
6707 struct bnx2x_fastpath *fp = &bp->fp[i];
6708 REG_WR8(bp,
6709 BAR_CSTRORM_INTMEM +
6710 CSTORM_STATUS_BLOCK_DATA_OFFSET(fp->fw_sb_id)
6711 + pfunc_offset_fp + pfid_offset,
6712 HC_FUNCTION_DISABLED);
6713 }
6714
6715 /* SP SB */
6716 REG_WR8(bp,
6717 BAR_CSTRORM_INTMEM +
6718 CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) +
6719 pfunc_offset_sp + pfid_offset,
6720 HC_FUNCTION_DISABLED);
6721
6722
6723 for (i = 0; i < XSTORM_SPQ_DATA_SIZE / 4; i++)
6724 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_DATA_OFFSET(func),
6725 0);
5080 6726
5081 /* Configure IGU */ 6727 /* Configure IGU */
5082 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0); 6728 if (bp->common.int_block == INT_BLOCK_HC) {
5083 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0); 6729 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6730 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6731 } else {
6732 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, 0);
6733 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, 0);
6734 }
5084 6735
5085#ifdef BCM_CNIC 6736#ifdef BCM_CNIC
5086 /* Disable Timer scan */ 6737 /* Disable Timer scan */
@@ -5096,9 +6747,27 @@ static void bnx2x_reset_func(struct bnx2x *bp)
5096 } 6747 }
5097#endif 6748#endif
5098 /* Clear ILT */ 6749 /* Clear ILT */
5099 base = FUNC_ILT_BASE(func); 6750 bnx2x_clear_func_ilt(bp, func);
5100 for (i = base; i < base + ILT_PER_FUNC; i++) 6751
5101 bnx2x_ilt_wr(bp, i, 0); 6752 /* Timers workaround bug for E2: if this is vnic-3,
6753 * we need to set the entire ilt range for this timers.
6754 */
6755 if (CHIP_IS_E2(bp) && BP_VN(bp) == 3) {
6756 struct ilt_client_info ilt_cli;
6757 /* use dummy TM client */
6758 memset(&ilt_cli, 0, sizeof(struct ilt_client_info));
6759 ilt_cli.start = 0;
6760 ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1;
6761 ilt_cli.client_num = ILT_CLIENT_TM;
6762
6763 bnx2x_ilt_boundry_init_op(bp, &ilt_cli, 0, INITOP_CLEAR);
6764 }
6765
6766 /* this assumes that reset_port() called before reset_func()*/
6767 if (CHIP_IS_E2(bp))
6768 bnx2x_pf_disable(bp);
6769
6770 bp->dmae_ready = 0;
5102} 6771}
5103 6772
5104static void bnx2x_reset_port(struct bnx2x *bp) 6773static void bnx2x_reset_port(struct bnx2x *bp)
@@ -5130,7 +6799,7 @@ static void bnx2x_reset_port(struct bnx2x *bp)
5130static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code) 6799static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
5131{ 6800{
5132 DP(BNX2X_MSG_MCP, "function %d reset_code %x\n", 6801 DP(BNX2X_MSG_MCP, "function %d reset_code %x\n",
5133 BP_FUNC(bp), reset_code); 6802 BP_ABS_FUNC(bp), reset_code);
5134 6803
5135 switch (reset_code) { 6804 switch (reset_code) {
5136 case FW_MSG_CODE_DRV_UNLOAD_COMMON: 6805 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
@@ -5167,7 +6836,6 @@ void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode)
5167 cnt = 1000; 6836 cnt = 1000;
5168 while (bnx2x_has_tx_work_unload(fp)) { 6837 while (bnx2x_has_tx_work_unload(fp)) {
5169 6838
5170 bnx2x_tx_int(fp);
5171 if (!cnt) { 6839 if (!cnt) {
5172 BNX2X_ERR("timeout waiting for queue[%d]\n", 6840 BNX2X_ERR("timeout waiting for queue[%d]\n",
5173 i); 6841 i);
@@ -5186,39 +6854,21 @@ void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode)
5186 msleep(1); 6854 msleep(1);
5187 6855
5188 if (CHIP_IS_E1(bp)) { 6856 if (CHIP_IS_E1(bp)) {
5189 struct mac_configuration_cmd *config = 6857 /* invalidate mc list,
5190 bnx2x_sp(bp, mcast_config); 6858 * wait and poll (interrupts are off)
5191 6859 */
5192 bnx2x_set_eth_mac_addr_e1(bp, 0); 6860 bnx2x_invlidate_e1_mc_list(bp);
5193 6861 bnx2x_set_eth_mac(bp, 0);
5194 for (i = 0; i < config->hdr.length; i++)
5195 CAM_INVALIDATE(config->config_table[i]);
5196
5197 config->hdr.length = i;
5198 if (CHIP_REV_IS_SLOW(bp))
5199 config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
5200 else
5201 config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
5202 config->hdr.client_id = bp->fp->cl_id;
5203 config->hdr.reserved1 = 0;
5204
5205 bp->set_mac_pending++;
5206 smp_wmb();
5207
5208 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
5209 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
5210 U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
5211 6862
5212 } else { /* E1H */ 6863 } else {
5213 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0); 6864 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
5214 6865
5215 bnx2x_set_eth_mac_addr_e1h(bp, 0); 6866 bnx2x_set_eth_mac(bp, 0);
5216 6867
5217 for (i = 0; i < MC_HASH_SIZE; i++) 6868 for (i = 0; i < MC_HASH_SIZE; i++)
5218 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0); 6869 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
5219
5220 REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
5221 } 6870 }
6871
5222#ifdef BCM_CNIC 6872#ifdef BCM_CNIC
5223 /* Clear iSCSI L2 MAC */ 6873 /* Clear iSCSI L2 MAC */
5224 mutex_lock(&bp->cnic_mutex); 6874 mutex_lock(&bp->cnic_mutex);
@@ -5257,33 +6907,44 @@ void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode)
5257 6907
5258 /* Close multi and leading connections 6908 /* Close multi and leading connections
5259 Completions for ramrods are collected in a synchronous way */ 6909 Completions for ramrods are collected in a synchronous way */
5260 for_each_nondefault_queue(bp, i) 6910 for_each_queue(bp, i)
5261 if (bnx2x_stop_multi(bp, i)) 6911
6912 if (bnx2x_stop_client(bp, i))
6913#ifdef BNX2X_STOP_ON_ERROR
6914 return;
6915#else
5262 goto unload_error; 6916 goto unload_error;
6917#endif
5263 6918
5264 rc = bnx2x_stop_leading(bp); 6919 rc = bnx2x_func_stop(bp);
5265 if (rc) { 6920 if (rc) {
5266 BNX2X_ERR("Stop leading failed!\n"); 6921 BNX2X_ERR("Function stop failed!\n");
5267#ifdef BNX2X_STOP_ON_ERROR 6922#ifdef BNX2X_STOP_ON_ERROR
5268 return -EBUSY; 6923 return;
5269#else 6924#else
5270 goto unload_error; 6925 goto unload_error;
5271#endif 6926#endif
5272 } 6927 }
5273 6928#ifndef BNX2X_STOP_ON_ERROR
5274unload_error: 6929unload_error:
6930#endif
5275 if (!BP_NOMCP(bp)) 6931 if (!BP_NOMCP(bp))
5276 reset_code = bnx2x_fw_command(bp, reset_code, 0); 6932 reset_code = bnx2x_fw_command(bp, reset_code, 0);
5277 else { 6933 else {
5278 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts %d, %d, %d\n", 6934 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts[%d] "
5279 load_count[0], load_count[1], load_count[2]); 6935 "%d, %d, %d\n", BP_PATH(bp),
5280 load_count[0]--; 6936 load_count[BP_PATH(bp)][0],
5281 load_count[1 + port]--; 6937 load_count[BP_PATH(bp)][1],
5282 DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts %d, %d, %d\n", 6938 load_count[BP_PATH(bp)][2]);
5283 load_count[0], load_count[1], load_count[2]); 6939 load_count[BP_PATH(bp)][0]--;
5284 if (load_count[0] == 0) 6940 load_count[BP_PATH(bp)][1 + port]--;
6941 DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts[%d] "
6942 "%d, %d, %d\n", BP_PATH(bp),
6943 load_count[BP_PATH(bp)][0], load_count[BP_PATH(bp)][1],
6944 load_count[BP_PATH(bp)][2]);
6945 if (load_count[BP_PATH(bp)][0] == 0)
5285 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON; 6946 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
5286 else if (load_count[1 + port] == 0) 6947 else if (load_count[BP_PATH(bp)][1 + port] == 0)
5287 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT; 6948 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
5288 else 6949 else
5289 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION; 6950 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
@@ -5293,6 +6954,12 @@ unload_error:
5293 (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT)) 6954 (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
5294 bnx2x__link_reset(bp); 6955 bnx2x__link_reset(bp);
5295 6956
6957 /* Disable HW interrupts, NAPI */
6958 bnx2x_netif_stop(bp, 1);
6959
6960 /* Release IRQs */
6961 bnx2x_free_irq(bp);
6962
5296 /* Reset the chip */ 6963 /* Reset the chip */
5297 bnx2x_reset_chip(bp, reset_code); 6964 bnx2x_reset_chip(bp, reset_code);
5298 6965
@@ -5324,7 +6991,6 @@ void bnx2x_disable_close_the_gate(struct bnx2x *bp)
5324 } 6991 }
5325} 6992}
5326 6993
5327
5328/* Close gates #2, #3 and #4: */ 6994/* Close gates #2, #3 and #4: */
5329static void bnx2x_set_234_gates(struct bnx2x *bp, bool close) 6995static void bnx2x_set_234_gates(struct bnx2x *bp, bool close)
5330{ 6996{
@@ -5370,15 +7036,13 @@ static void bnx2x_clp_reset_prep(struct bnx2x *bp, u32 *magic_val)
5370static void bnx2x_clp_reset_done(struct bnx2x *bp, u32 magic_val) 7036static void bnx2x_clp_reset_done(struct bnx2x *bp, u32 magic_val)
5371{ 7037{
5372 /* Restore the `magic' bit value... */ 7038 /* Restore the `magic' bit value... */
5373 /* u32 val = SHMEM_RD(bp, mf_cfg.shared_mf_config.clp_mb);
5374 SHMEM_WR(bp, mf_cfg.shared_mf_config.clp_mb,
5375 (val & (~SHARED_MF_CLP_MAGIC)) | magic_val); */
5376 u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb); 7039 u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
5377 MF_CFG_WR(bp, shared_mf_config.clp_mb, 7040 MF_CFG_WR(bp, shared_mf_config.clp_mb,
5378 (val & (~SHARED_MF_CLP_MAGIC)) | magic_val); 7041 (val & (~SHARED_MF_CLP_MAGIC)) | magic_val);
5379} 7042}
5380 7043
5381/* Prepares for MCP reset: takes care of CLP configurations. 7044/**
7045 * Prepares for MCP reset: takes care of CLP configurations.
5382 * 7046 *
5383 * @param bp 7047 * @param bp
5384 * @param magic_val Old value of 'magic' bit. 7048 * @param magic_val Old value of 'magic' bit.
@@ -5776,39 +7440,23 @@ reset_task_exit:
5776 * Init service functions 7440 * Init service functions
5777 */ 7441 */
5778 7442
5779static inline u32 bnx2x_get_pretend_reg(struct bnx2x *bp, int func) 7443u32 bnx2x_get_pretend_reg(struct bnx2x *bp)
5780{ 7444{
5781 switch (func) { 7445 u32 base = PXP2_REG_PGL_PRETEND_FUNC_F0;
5782 case 0: return PXP2_REG_PGL_PRETEND_FUNC_F0; 7446 u32 stride = PXP2_REG_PGL_PRETEND_FUNC_F1 - base;
5783 case 1: return PXP2_REG_PGL_PRETEND_FUNC_F1; 7447 return base + (BP_ABS_FUNC(bp)) * stride;
5784 case 2: return PXP2_REG_PGL_PRETEND_FUNC_F2;
5785 case 3: return PXP2_REG_PGL_PRETEND_FUNC_F3;
5786 case 4: return PXP2_REG_PGL_PRETEND_FUNC_F4;
5787 case 5: return PXP2_REG_PGL_PRETEND_FUNC_F5;
5788 case 6: return PXP2_REG_PGL_PRETEND_FUNC_F6;
5789 case 7: return PXP2_REG_PGL_PRETEND_FUNC_F7;
5790 default:
5791 BNX2X_ERR("Unsupported function index: %d\n", func);
5792 return (u32)(-1);
5793 }
5794} 7448}
5795 7449
5796static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp, int orig_func) 7450static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp)
5797{ 7451{
5798 u32 reg = bnx2x_get_pretend_reg(bp, orig_func), new_val; 7452 u32 reg = bnx2x_get_pretend_reg(bp);
5799 7453
5800 /* Flush all outstanding writes */ 7454 /* Flush all outstanding writes */
5801 mmiowb(); 7455 mmiowb();
5802 7456
5803 /* Pretend to be function 0 */ 7457 /* Pretend to be function 0 */
5804 REG_WR(bp, reg, 0); 7458 REG_WR(bp, reg, 0);
5805 /* Flush the GRC transaction (in the chip) */ 7459 REG_RD(bp, reg); /* Flush the GRC transaction (in the chip) */
5806 new_val = REG_RD(bp, reg);
5807 if (new_val != 0) {
5808 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (0,%d)!\n",
5809 new_val);
5810 BUG();
5811 }
5812 7460
5813 /* From now we are in the "like-E1" mode */ 7461 /* From now we are in the "like-E1" mode */
5814 bnx2x_int_disable(bp); 7462 bnx2x_int_disable(bp);
@@ -5816,22 +7464,17 @@ static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp, int orig_func)
5816 /* Flush all outstanding writes */ 7464 /* Flush all outstanding writes */
5817 mmiowb(); 7465 mmiowb();
5818 7466
5819 /* Restore the original funtion settings */ 7467 /* Restore the original function */
5820 REG_WR(bp, reg, orig_func); 7468 REG_WR(bp, reg, BP_ABS_FUNC(bp));
5821 new_val = REG_RD(bp, reg); 7469 REG_RD(bp, reg);
5822 if (new_val != orig_func) {
5823 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (%d,%d)!\n",
5824 orig_func, new_val);
5825 BUG();
5826 }
5827} 7470}
5828 7471
5829static inline void bnx2x_undi_int_disable(struct bnx2x *bp, int func) 7472static inline void bnx2x_undi_int_disable(struct bnx2x *bp)
5830{ 7473{
5831 if (CHIP_IS_E1H(bp)) 7474 if (CHIP_IS_E1(bp))
5832 bnx2x_undi_int_disable_e1h(bp, func);
5833 else
5834 bnx2x_int_disable(bp); 7475 bnx2x_int_disable(bp);
7476 else
7477 bnx2x_undi_int_disable_e1h(bp);
5835} 7478}
5836 7479
5837static void __devinit bnx2x_undi_unload(struct bnx2x *bp) 7480static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
@@ -5848,8 +7491,8 @@ static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
5848 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST); 7491 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
5849 if (val == 0x7) { 7492 if (val == 0x7) {
5850 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS; 7493 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
5851 /* save our func */ 7494 /* save our pf_num */
5852 int func = BP_FUNC(bp); 7495 int orig_pf_num = bp->pf_num;
5853 u32 swap_en; 7496 u32 swap_en;
5854 u32 swap_val; 7497 u32 swap_val;
5855 7498
@@ -5859,9 +7502,9 @@ static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
5859 BNX2X_DEV_INFO("UNDI is active! reset device\n"); 7502 BNX2X_DEV_INFO("UNDI is active! reset device\n");
5860 7503
5861 /* try unload UNDI on port 0 */ 7504 /* try unload UNDI on port 0 */
5862 bp->func = 0; 7505 bp->pf_num = 0;
5863 bp->fw_seq = 7506 bp->fw_seq =
5864 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) & 7507 (SHMEM_RD(bp, func_mb[bp->pf_num].drv_mb_header) &
5865 DRV_MSG_SEQ_NUMBER_MASK); 7508 DRV_MSG_SEQ_NUMBER_MASK);
5866 reset_code = bnx2x_fw_command(bp, reset_code, 0); 7509 reset_code = bnx2x_fw_command(bp, reset_code, 0);
5867 7510
@@ -5873,9 +7516,9 @@ static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
5873 DRV_MSG_CODE_UNLOAD_DONE, 0); 7516 DRV_MSG_CODE_UNLOAD_DONE, 0);
5874 7517
5875 /* unload UNDI on port 1 */ 7518 /* unload UNDI on port 1 */
5876 bp->func = 1; 7519 bp->pf_num = 1;
5877 bp->fw_seq = 7520 bp->fw_seq =
5878 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) & 7521 (SHMEM_RD(bp, func_mb[bp->pf_num].drv_mb_header) &
5879 DRV_MSG_SEQ_NUMBER_MASK); 7522 DRV_MSG_SEQ_NUMBER_MASK);
5880 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS; 7523 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
5881 7524
@@ -5885,7 +7528,7 @@ static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
5885 /* now it's safe to release the lock */ 7528 /* now it's safe to release the lock */
5886 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI); 7529 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
5887 7530
5888 bnx2x_undi_int_disable(bp, func); 7531 bnx2x_undi_int_disable(bp);
5889 7532
5890 /* close input traffic and wait for it */ 7533 /* close input traffic and wait for it */
5891 /* Do not rcv packets to BRB */ 7534 /* Do not rcv packets to BRB */
@@ -5924,11 +7567,10 @@ static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
5924 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0); 7567 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
5925 7568
5926 /* restore our func and fw_seq */ 7569 /* restore our func and fw_seq */
5927 bp->func = func; 7570 bp->pf_num = orig_pf_num;
5928 bp->fw_seq = 7571 bp->fw_seq =
5929 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) & 7572 (SHMEM_RD(bp, func_mb[bp->pf_num].drv_mb_header) &
5930 DRV_MSG_SEQ_NUMBER_MASK); 7573 DRV_MSG_SEQ_NUMBER_MASK);
5931
5932 } else 7574 } else
5933 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI); 7575 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
5934 } 7576 }
@@ -5950,6 +7592,40 @@ static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
5950 val = REG_RD(bp, MISC_REG_BOND_ID); 7592 val = REG_RD(bp, MISC_REG_BOND_ID);
5951 id |= (val & 0xf); 7593 id |= (val & 0xf);
5952 bp->common.chip_id = id; 7594 bp->common.chip_id = id;
7595
7596 /* Set doorbell size */
7597 bp->db_size = (1 << BNX2X_DB_SHIFT);
7598
7599 if (CHIP_IS_E2(bp)) {
7600 val = REG_RD(bp, MISC_REG_PORT4MODE_EN_OVWR);
7601 if ((val & 1) == 0)
7602 val = REG_RD(bp, MISC_REG_PORT4MODE_EN);
7603 else
7604 val = (val >> 1) & 1;
7605 BNX2X_DEV_INFO("chip is in %s\n", val ? "4_PORT_MODE" :
7606 "2_PORT_MODE");
7607 bp->common.chip_port_mode = val ? CHIP_4_PORT_MODE :
7608 CHIP_2_PORT_MODE;
7609
7610 if (CHIP_MODE_IS_4_PORT(bp))
7611 bp->pfid = (bp->pf_num >> 1); /* 0..3 */
7612 else
7613 bp->pfid = (bp->pf_num & 0x6); /* 0, 2, 4, 6 */
7614 } else {
7615 bp->common.chip_port_mode = CHIP_PORT_MODE_NONE; /* N/A */
7616 bp->pfid = bp->pf_num; /* 0..7 */
7617 }
7618
7619 /*
7620 * set base FW non-default (fast path) status block id, this value is
7621 * used to initialize the fw_sb_id saved on the fp/queue structure to
7622 * determine the id used by the FW.
7623 */
7624 if (CHIP_IS_E1x(bp))
7625 bp->base_fw_ndsb = BP_PORT(bp) * FP_SB_MAX_E1x;
7626 else /* E2 */
7627 bp->base_fw_ndsb = BP_PORT(bp) * FP_SB_MAX_E2;
7628
5953 bp->link_params.chip_id = bp->common.chip_id; 7629 bp->link_params.chip_id = bp->common.chip_id;
5954 BNX2X_DEV_INFO("chip ID is 0x%x\n", id); 7630 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
5955 7631
@@ -5967,15 +7643,15 @@ static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
5967 bp->common.flash_size, bp->common.flash_size); 7643 bp->common.flash_size, bp->common.flash_size);
5968 7644
5969 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR); 7645 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
5970 bp->common.shmem2_base = REG_RD(bp, MISC_REG_GENERIC_CR_0); 7646 bp->common.shmem2_base = REG_RD(bp, (BP_PATH(bp) ?
7647 MISC_REG_GENERIC_CR_1 :
7648 MISC_REG_GENERIC_CR_0));
5971 bp->link_params.shmem_base = bp->common.shmem_base; 7649 bp->link_params.shmem_base = bp->common.shmem_base;
5972 bp->link_params.shmem2_base = bp->common.shmem2_base; 7650 bp->link_params.shmem2_base = bp->common.shmem2_base;
5973 BNX2X_DEV_INFO("shmem offset 0x%x shmem2 offset 0x%x\n", 7651 BNX2X_DEV_INFO("shmem offset 0x%x shmem2 offset 0x%x\n",
5974 bp->common.shmem_base, bp->common.shmem2_base); 7652 bp->common.shmem_base, bp->common.shmem2_base);
5975 7653
5976 if (!bp->common.shmem_base || 7654 if (!bp->common.shmem_base) {
5977 (bp->common.shmem_base < 0xA0000) ||
5978 (bp->common.shmem_base >= 0xC0000)) {
5979 BNX2X_DEV_INFO("MCP not active\n"); 7655 BNX2X_DEV_INFO("MCP not active\n");
5980 bp->flags |= NO_MCP_FLAG; 7656 bp->flags |= NO_MCP_FLAG;
5981 return; 7657 return;
@@ -5984,7 +7660,7 @@ static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
5984 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]); 7660 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
5985 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) 7661 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
5986 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) 7662 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
5987 BNX2X_ERROR("BAD MCP validity signature\n"); 7663 BNX2X_ERR("BAD MCP validity signature\n");
5988 7664
5989 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config); 7665 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
5990 BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config); 7666 BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
@@ -6008,12 +7684,13 @@ static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
6008 if (val < BNX2X_BC_VER) { 7684 if (val < BNX2X_BC_VER) {
6009 /* for now only warn 7685 /* for now only warn
6010 * later we might need to enforce this */ 7686 * later we might need to enforce this */
6011 BNX2X_ERROR("This driver needs bc_ver %X but found %X, " 7687 BNX2X_ERR("This driver needs bc_ver %X but found %X, "
6012 "please upgrade BC\n", BNX2X_BC_VER, val); 7688 "please upgrade BC\n", BNX2X_BC_VER, val);
6013 } 7689 }
6014 bp->link_params.feature_config_flags |= 7690 bp->link_params.feature_config_flags |=
6015 (val >= REQ_BC_VER_4_VRFY_FIRST_PHY_OPT_MDL) ? 7691 (val >= REQ_BC_VER_4_VRFY_FIRST_PHY_OPT_MDL) ?
6016 FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : 0; 7692 FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : 0;
7693
6017 bp->link_params.feature_config_flags |= 7694 bp->link_params.feature_config_flags |=
6018 (val >= REQ_BC_VER_4_VRFY_SPECIFIC_PHY_OPT_MDL) ? 7695 (val >= REQ_BC_VER_4_VRFY_SPECIFIC_PHY_OPT_MDL) ?
6019 FEATURE_CONFIG_BC_SUPPORTS_DUAL_PHY_OPT_MDL_VRFY : 0; 7696 FEATURE_CONFIG_BC_SUPPORTS_DUAL_PHY_OPT_MDL_VRFY : 0;
@@ -6037,6 +7714,57 @@ static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
6037 val, val2, val3, val4); 7714 val, val2, val3, val4);
6038} 7715}
6039 7716
7717#define IGU_FID(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID)
7718#define IGU_VEC(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR)
7719
7720static void __devinit bnx2x_get_igu_cam_info(struct bnx2x *bp)
7721{
7722 int pfid = BP_FUNC(bp);
7723 int vn = BP_E1HVN(bp);
7724 int igu_sb_id;
7725 u32 val;
7726 u8 fid;
7727
7728 bp->igu_base_sb = 0xff;
7729 bp->igu_sb_cnt = 0;
7730 if (CHIP_INT_MODE_IS_BC(bp)) {
7731 bp->igu_sb_cnt = min_t(u8, FP_SB_MAX_E1x,
7732 bp->l2_cid_count);
7733
7734 bp->igu_base_sb = (CHIP_MODE_IS_4_PORT(bp) ? pfid : vn) *
7735 FP_SB_MAX_E1x;
7736
7737 bp->igu_dsb_id = E1HVN_MAX * FP_SB_MAX_E1x +
7738 (CHIP_MODE_IS_4_PORT(bp) ? pfid : vn);
7739
7740 return;
7741 }
7742
7743 /* IGU in normal mode - read CAM */
7744 for (igu_sb_id = 0; igu_sb_id < IGU_REG_MAPPING_MEMORY_SIZE;
7745 igu_sb_id++) {
7746 val = REG_RD(bp, IGU_REG_MAPPING_MEMORY + igu_sb_id * 4);
7747 if (!(val & IGU_REG_MAPPING_MEMORY_VALID))
7748 continue;
7749 fid = IGU_FID(val);
7750 if ((fid & IGU_FID_ENCODE_IS_PF)) {
7751 if ((fid & IGU_FID_PF_NUM_MASK) != pfid)
7752 continue;
7753 if (IGU_VEC(val) == 0)
7754 /* default status block */
7755 bp->igu_dsb_id = igu_sb_id;
7756 else {
7757 if (bp->igu_base_sb == 0xff)
7758 bp->igu_base_sb = igu_sb_id;
7759 bp->igu_sb_cnt++;
7760 }
7761 }
7762 }
7763 bp->igu_sb_cnt = min_t(u8, bp->igu_sb_cnt, bp->l2_cid_count);
7764 if (bp->igu_sb_cnt == 0)
7765 BNX2X_ERR("CAM configuration error\n");
7766}
7767
6040static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp, 7768static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
6041 u32 switch_cfg) 7769 u32 switch_cfg)
6042{ 7770{
@@ -6079,7 +7807,7 @@ static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
6079 SHMEM_RD(bp, 7807 SHMEM_RD(bp,
6080 dev_info.port_hw_config[port].external_phy_config2)); 7808 dev_info.port_hw_config[port].external_phy_config2));
6081 return; 7809 return;
6082 } 7810 }
6083 7811
6084 switch (switch_cfg) { 7812 switch (switch_cfg) {
6085 case SWITCH_CFG_1G: 7813 case SWITCH_CFG_1G:
@@ -6092,7 +7820,6 @@ static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
6092 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR + 7820 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
6093 port*0x18); 7821 port*0x18);
6094 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr); 7822 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
6095
6096 break; 7823 break;
6097 7824
6098 default: 7825 default:
@@ -6121,7 +7848,7 @@ static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
6121 if (!(bp->link_params.speed_cap_mask[idx] & 7848 if (!(bp->link_params.speed_cap_mask[idx] &
6122 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) 7849 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
6123 bp->port.supported[idx] &= ~(SUPPORTED_1000baseT_Half | 7850 bp->port.supported[idx] &= ~(SUPPORTED_1000baseT_Half |
6124 SUPPORTED_1000baseT_Full); 7851 SUPPORTED_1000baseT_Full);
6125 7852
6126 if (!(bp->link_params.speed_cap_mask[idx] & 7853 if (!(bp->link_params.speed_cap_mask[idx] &
6127 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G)) 7854 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
@@ -6155,41 +7882,41 @@ static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
6155 bp->link_params.req_duplex[idx] = DUPLEX_FULL; 7882 bp->link_params.req_duplex[idx] = DUPLEX_FULL;
6156 link_config = bp->port.link_config[idx]; 7883 link_config = bp->port.link_config[idx];
6157 switch (link_config & PORT_FEATURE_LINK_SPEED_MASK) { 7884 switch (link_config & PORT_FEATURE_LINK_SPEED_MASK) {
6158 case PORT_FEATURE_LINK_SPEED_AUTO: 7885 case PORT_FEATURE_LINK_SPEED_AUTO:
6159 if (bp->port.supported[idx] & SUPPORTED_Autoneg) { 7886 if (bp->port.supported[idx] & SUPPORTED_Autoneg) {
6160 bp->link_params.req_line_speed[idx] = 7887 bp->link_params.req_line_speed[idx] =
6161 SPEED_AUTO_NEG; 7888 SPEED_AUTO_NEG;
6162 bp->port.advertising[idx] |= 7889 bp->port.advertising[idx] |=
6163 bp->port.supported[idx]; 7890 bp->port.supported[idx];
6164 } else { 7891 } else {
6165 /* force 10G, no AN */ 7892 /* force 10G, no AN */
6166 bp->link_params.req_line_speed[idx] = 7893 bp->link_params.req_line_speed[idx] =
6167 SPEED_10000; 7894 SPEED_10000;
6168 bp->port.advertising[idx] |= 7895 bp->port.advertising[idx] |=
6169 (ADVERTISED_10000baseT_Full | 7896 (ADVERTISED_10000baseT_Full |
6170 ADVERTISED_FIBRE); 7897 ADVERTISED_FIBRE);
6171 continue; 7898 continue;
6172 } 7899 }
6173 break; 7900 break;
6174 7901
6175 case PORT_FEATURE_LINK_SPEED_10M_FULL: 7902 case PORT_FEATURE_LINK_SPEED_10M_FULL:
6176 if (bp->port.supported[idx] & SUPPORTED_10baseT_Full) { 7903 if (bp->port.supported[idx] & SUPPORTED_10baseT_Full) {
6177 bp->link_params.req_line_speed[idx] = 7904 bp->link_params.req_line_speed[idx] =
6178 SPEED_10; 7905 SPEED_10;
6179 bp->port.advertising[idx] |= 7906 bp->port.advertising[idx] |=
6180 (ADVERTISED_10baseT_Full | 7907 (ADVERTISED_10baseT_Full |
6181 ADVERTISED_TP); 7908 ADVERTISED_TP);
6182 } else { 7909 } else {
6183 BNX2X_ERROR("NVRAM config error. " 7910 BNX2X_ERROR("NVRAM config error. "
6184 "Invalid link_config 0x%x" 7911 "Invalid link_config 0x%x"
6185 " speed_cap_mask 0x%x\n", 7912 " speed_cap_mask 0x%x\n",
6186 link_config, 7913 link_config,
6187 bp->link_params.speed_cap_mask[idx]); 7914 bp->link_params.speed_cap_mask[idx]);
6188 return; 7915 return;
6189 } 7916 }
6190 break; 7917 break;
6191 7918
6192 case PORT_FEATURE_LINK_SPEED_10M_HALF: 7919 case PORT_FEATURE_LINK_SPEED_10M_HALF:
6193 if (bp->port.supported[idx] & SUPPORTED_10baseT_Half) { 7920 if (bp->port.supported[idx] & SUPPORTED_10baseT_Half) {
6194 bp->link_params.req_line_speed[idx] = 7921 bp->link_params.req_line_speed[idx] =
6195 SPEED_10; 7922 SPEED_10;
@@ -6197,70 +7924,74 @@ static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
6197 DUPLEX_HALF; 7924 DUPLEX_HALF;
6198 bp->port.advertising[idx] |= 7925 bp->port.advertising[idx] |=
6199 (ADVERTISED_10baseT_Half | 7926 (ADVERTISED_10baseT_Half |
6200 ADVERTISED_TP); 7927 ADVERTISED_TP);
6201 } else { 7928 } else {
6202 BNX2X_ERROR("NVRAM config error. " 7929 BNX2X_ERROR("NVRAM config error. "
6203 "Invalid link_config 0x%x" 7930 "Invalid link_config 0x%x"
6204 " speed_cap_mask 0x%x\n", 7931 " speed_cap_mask 0x%x\n",
6205 link_config, 7932 link_config,
6206 bp->link_params.speed_cap_mask[idx]); 7933 bp->link_params.speed_cap_mask[idx]);
6207 return; 7934 return;
6208 } 7935 }
6209 break; 7936 break;
6210 7937
6211 case PORT_FEATURE_LINK_SPEED_100M_FULL: 7938 case PORT_FEATURE_LINK_SPEED_100M_FULL:
6212 if (bp->port.supported[idx] & SUPPORTED_100baseT_Full) { 7939 if (bp->port.supported[idx] &
7940 SUPPORTED_100baseT_Full) {
6213 bp->link_params.req_line_speed[idx] = 7941 bp->link_params.req_line_speed[idx] =
6214 SPEED_100; 7942 SPEED_100;
6215 bp->port.advertising[idx] |= 7943 bp->port.advertising[idx] |=
6216 (ADVERTISED_100baseT_Full | 7944 (ADVERTISED_100baseT_Full |
6217 ADVERTISED_TP); 7945 ADVERTISED_TP);
6218 } else { 7946 } else {
6219 BNX2X_ERROR("NVRAM config error. " 7947 BNX2X_ERROR("NVRAM config error. "
6220 "Invalid link_config 0x%x" 7948 "Invalid link_config 0x%x"
6221 " speed_cap_mask 0x%x\n", 7949 " speed_cap_mask 0x%x\n",
6222 link_config, 7950 link_config,
6223 bp->link_params.speed_cap_mask[idx]); 7951 bp->link_params.speed_cap_mask[idx]);
6224 return; 7952 return;
6225 } 7953 }
6226 break; 7954 break;
6227 7955
6228 case PORT_FEATURE_LINK_SPEED_100M_HALF: 7956 case PORT_FEATURE_LINK_SPEED_100M_HALF:
6229 if (bp->port.supported[idx] & SUPPORTED_100baseT_Half) { 7957 if (bp->port.supported[idx] &
6230 bp->link_params.req_line_speed[idx] = SPEED_100; 7958 SUPPORTED_100baseT_Half) {
6231 bp->link_params.req_duplex[idx] = DUPLEX_HALF; 7959 bp->link_params.req_line_speed[idx] =
7960 SPEED_100;
7961 bp->link_params.req_duplex[idx] =
7962 DUPLEX_HALF;
6232 bp->port.advertising[idx] |= 7963 bp->port.advertising[idx] |=
6233 (ADVERTISED_100baseT_Half | 7964 (ADVERTISED_100baseT_Half |
6234 ADVERTISED_TP); 7965 ADVERTISED_TP);
6235 } else { 7966 } else {
6236 BNX2X_ERROR("NVRAM config error. " 7967 BNX2X_ERROR("NVRAM config error. "
6237 "Invalid link_config 0x%x" 7968 "Invalid link_config 0x%x"
6238 " speed_cap_mask 0x%x\n", 7969 " speed_cap_mask 0x%x\n",
6239 link_config, 7970 link_config,
6240 bp->link_params.speed_cap_mask[idx]); 7971 bp->link_params.speed_cap_mask[idx]);
6241 return; 7972 return;
6242 } 7973 }
6243 break; 7974 break;
6244 7975
6245 case PORT_FEATURE_LINK_SPEED_1G: 7976 case PORT_FEATURE_LINK_SPEED_1G:
6246 if (bp->port.supported[idx] & 7977 if (bp->port.supported[idx] &
6247 SUPPORTED_1000baseT_Full) { 7978 SUPPORTED_1000baseT_Full) {
6248 bp->link_params.req_line_speed[idx] = 7979 bp->link_params.req_line_speed[idx] =
6249 SPEED_1000; 7980 SPEED_1000;
6250 bp->port.advertising[idx] |= 7981 bp->port.advertising[idx] |=
6251 (ADVERTISED_1000baseT_Full | 7982 (ADVERTISED_1000baseT_Full |
6252 ADVERTISED_TP); 7983 ADVERTISED_TP);
6253 } else { 7984 } else {
6254 BNX2X_ERROR("NVRAM config error. " 7985 BNX2X_ERROR("NVRAM config error. "
6255 "Invalid link_config 0x%x" 7986 "Invalid link_config 0x%x"
6256 " speed_cap_mask 0x%x\n", 7987 " speed_cap_mask 0x%x\n",
6257 link_config, 7988 link_config,
6258 bp->link_params.speed_cap_mask[idx]); 7989 bp->link_params.speed_cap_mask[idx]);
6259 return; 7990 return;
6260 } 7991 }
6261 break; 7992 break;
6262 7993
6263 case PORT_FEATURE_LINK_SPEED_2_5G: 7994 case PORT_FEATURE_LINK_SPEED_2_5G:
6264 if (bp->port.supported[idx] & 7995 if (bp->port.supported[idx] &
6265 SUPPORTED_2500baseX_Full) { 7996 SUPPORTED_2500baseX_Full) {
6266 bp->link_params.req_line_speed[idx] = 7997 bp->link_params.req_line_speed[idx] =
@@ -6268,19 +7999,19 @@ static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
6268 bp->port.advertising[idx] |= 7999 bp->port.advertising[idx] |=
6269 (ADVERTISED_2500baseX_Full | 8000 (ADVERTISED_2500baseX_Full |
6270 ADVERTISED_TP); 8001 ADVERTISED_TP);
6271 } else { 8002 } else {
6272 BNX2X_ERROR("NVRAM config error. " 8003 BNX2X_ERROR("NVRAM config error. "
6273 "Invalid link_config 0x%x" 8004 "Invalid link_config 0x%x"
6274 " speed_cap_mask 0x%x\n", 8005 " speed_cap_mask 0x%x\n",
6275 link_config, 8006 link_config,
6276 bp->link_params.speed_cap_mask[idx]); 8007 bp->link_params.speed_cap_mask[idx]);
6277 return; 8008 return;
6278 } 8009 }
6279 break; 8010 break;
6280 8011
6281 case PORT_FEATURE_LINK_SPEED_10G_CX4: 8012 case PORT_FEATURE_LINK_SPEED_10G_CX4:
6282 case PORT_FEATURE_LINK_SPEED_10G_KX4: 8013 case PORT_FEATURE_LINK_SPEED_10G_KX4:
6283 case PORT_FEATURE_LINK_SPEED_10G_KR: 8014 case PORT_FEATURE_LINK_SPEED_10G_KR:
6284 if (bp->port.supported[idx] & 8015 if (bp->port.supported[idx] &
6285 SUPPORTED_10000baseT_Full) { 8016 SUPPORTED_10000baseT_Full) {
6286 bp->link_params.req_line_speed[idx] = 8017 bp->link_params.req_line_speed[idx] =
@@ -6288,24 +8019,26 @@ static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
6288 bp->port.advertising[idx] |= 8019 bp->port.advertising[idx] |=
6289 (ADVERTISED_10000baseT_Full | 8020 (ADVERTISED_10000baseT_Full |
6290 ADVERTISED_FIBRE); 8021 ADVERTISED_FIBRE);
6291 } else { 8022 } else {
6292 BNX2X_ERROR("NVRAM config error. " 8023 BNX2X_ERROR("NVRAM config error. "
6293 "Invalid link_config 0x%x" 8024 "Invalid link_config 0x%x"
6294 " speed_cap_mask 0x%x\n", 8025 " speed_cap_mask 0x%x\n",
6295 link_config, 8026 link_config,
6296 bp->link_params.speed_cap_mask[idx]); 8027 bp->link_params.speed_cap_mask[idx]);
6297 return; 8028 return;
6298 } 8029 }
6299 break; 8030 break;
6300 8031
6301 default: 8032 default:
6302 BNX2X_ERROR("NVRAM config error. " 8033 BNX2X_ERROR("NVRAM config error. "
6303 "BAD link speed link_config 0x%x\n", 8034 "BAD link speed link_config 0x%x\n",
6304 link_config); 8035 link_config);
6305 bp->link_params.req_line_speed[idx] = SPEED_AUTO_NEG; 8036 bp->link_params.req_line_speed[idx] =
6306 bp->port.advertising[idx] = bp->port.supported[idx]; 8037 SPEED_AUTO_NEG;
6307 break; 8038 bp->port.advertising[idx] =
6308 } 8039 bp->port.supported[idx];
8040 break;
8041 }
6309 8042
6310 bp->link_params.req_flow_ctrl[idx] = (link_config & 8043 bp->link_params.req_flow_ctrl[idx] = (link_config &
6311 PORT_FEATURE_FLOW_CONTROL_MASK); 8044 PORT_FEATURE_FLOW_CONTROL_MASK);
@@ -6367,14 +8100,14 @@ static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
6367 bp->wol = (!(bp->flags & NO_WOL_FLAG) && 8100 bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
6368 (config & PORT_FEATURE_WOL_ENABLED)); 8101 (config & PORT_FEATURE_WOL_ENABLED));
6369 8102
6370 BNX2X_DEV_INFO("lane_config 0x%08x" 8103 BNX2X_DEV_INFO("lane_config 0x%08x "
6371 "speed_cap_mask0 0x%08x link_config0 0x%08x\n", 8104 "speed_cap_mask0 0x%08x link_config0 0x%08x\n",
6372 bp->link_params.lane_config, 8105 bp->link_params.lane_config,
6373 bp->link_params.speed_cap_mask[0], 8106 bp->link_params.speed_cap_mask[0],
6374 bp->port.link_config[0]); 8107 bp->port.link_config[0]);
6375 8108
6376 bp->link_params.switch_cfg = (bp->port.link_config[0] & 8109 bp->link_params.switch_cfg = (bp->port.link_config[0] &
6377 PORT_FEATURE_CONNECTED_SWITCH_MASK); 8110 PORT_FEATURE_CONNECTED_SWITCH_MASK);
6378 bnx2x_phy_probe(&bp->link_params); 8111 bnx2x_phy_probe(&bp->link_params);
6379 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg); 8112 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
6380 8113
@@ -6411,41 +8144,74 @@ static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
6411 8144
6412static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp) 8145static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
6413{ 8146{
6414 int func = BP_FUNC(bp); 8147 int func = BP_ABS_FUNC(bp);
8148 int vn;
6415 u32 val, val2; 8149 u32 val, val2;
6416 int rc = 0; 8150 int rc = 0;
6417 8151
6418 bnx2x_get_common_hwinfo(bp); 8152 bnx2x_get_common_hwinfo(bp);
6419 8153
6420 bp->e1hov = 0; 8154 if (CHIP_IS_E1x(bp)) {
6421 bp->e1hmf = 0; 8155 bp->common.int_block = INT_BLOCK_HC;
6422 if (CHIP_IS_E1H(bp) && !BP_NOMCP(bp)) { 8156
6423 bp->mf_config = 8157 bp->igu_dsb_id = DEF_SB_IGU_ID;
6424 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config); 8158 bp->igu_base_sb = 0;
8159 bp->igu_sb_cnt = min_t(u8, FP_SB_MAX_E1x, bp->l2_cid_count);
8160 } else {
8161 bp->common.int_block = INT_BLOCK_IGU;
8162 val = REG_RD(bp, IGU_REG_BLOCK_CONFIGURATION);
8163 if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) {
8164 DP(NETIF_MSG_PROBE, "IGU Backward Compatible Mode\n");
8165 bp->common.int_block |= INT_BLOCK_MODE_BW_COMP;
8166 } else
8167 DP(NETIF_MSG_PROBE, "IGU Normal Mode\n");
8168
8169 bnx2x_get_igu_cam_info(bp);
8170
8171 }
8172 DP(NETIF_MSG_PROBE, "igu_dsb_id %d igu_base_sb %d igu_sb_cnt %d\n",
8173 bp->igu_dsb_id, bp->igu_base_sb, bp->igu_sb_cnt);
8174
8175 /*
8176 * Initialize MF configuration
8177 */
6425 8178
6426 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[FUNC_0].e1hov_tag) & 8179 bp->mf_ov = 0;
8180 bp->mf_mode = 0;
8181 vn = BP_E1HVN(bp);
8182 if (!CHIP_IS_E1(bp) && !BP_NOMCP(bp)) {
8183 if (SHMEM2_HAS(bp, mf_cfg_addr))
8184 bp->common.mf_cfg_base = SHMEM2_RD(bp, mf_cfg_addr);
8185 else
8186 bp->common.mf_cfg_base = bp->common.shmem_base +
8187 offsetof(struct shmem_region, func_mb) +
8188 E1H_FUNC_MAX * sizeof(struct drv_func_mb);
8189 bp->mf_config[vn] =
8190 MF_CFG_RD(bp, func_mf_config[func].config);
8191
8192 val = (MF_CFG_RD(bp, func_mf_config[FUNC_0].e1hov_tag) &
6427 FUNC_MF_CFG_E1HOV_TAG_MASK); 8193 FUNC_MF_CFG_E1HOV_TAG_MASK);
6428 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) 8194 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT)
6429 bp->e1hmf = 1; 8195 bp->mf_mode = 1;
6430 BNX2X_DEV_INFO("%s function mode\n", 8196 BNX2X_DEV_INFO("%s function mode\n",
6431 IS_E1HMF(bp) ? "multi" : "single"); 8197 IS_MF(bp) ? "multi" : "single");
6432 8198
6433 if (IS_E1HMF(bp)) { 8199 if (IS_MF(bp)) {
6434 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func]. 8200 val = (MF_CFG_RD(bp, func_mf_config[func].
6435 e1hov_tag) & 8201 e1hov_tag) &
6436 FUNC_MF_CFG_E1HOV_TAG_MASK); 8202 FUNC_MF_CFG_E1HOV_TAG_MASK);
6437 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) { 8203 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
6438 bp->e1hov = val; 8204 bp->mf_ov = val;
6439 BNX2X_DEV_INFO("E1HOV for func %d is %d " 8205 BNX2X_DEV_INFO("MF OV for func %d is %d "
6440 "(0x%04x)\n", 8206 "(0x%04x)\n",
6441 func, bp->e1hov, bp->e1hov); 8207 func, bp->mf_ov, bp->mf_ov);
6442 } else { 8208 } else {
6443 BNX2X_ERROR("No valid E1HOV for func %d," 8209 BNX2X_ERROR("No valid MF OV for func %d,"
6444 " aborting\n", func); 8210 " aborting\n", func);
6445 rc = -EPERM; 8211 rc = -EPERM;
6446 } 8212 }
6447 } else { 8213 } else {
6448 if (BP_E1HVN(bp)) { 8214 if (BP_VN(bp)) {
6449 BNX2X_ERROR("VN %d in single function mode," 8215 BNX2X_ERROR("VN %d in single function mode,"
6450 " aborting\n", BP_E1HVN(bp)); 8216 " aborting\n", BP_E1HVN(bp));
6451 rc = -EPERM; 8217 rc = -EPERM;
@@ -6453,17 +8219,31 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
6453 } 8219 }
6454 } 8220 }
6455 8221
8222 /* adjust igu_sb_cnt to MF for E1x */
8223 if (CHIP_IS_E1x(bp) && IS_MF(bp))
8224 bp->igu_sb_cnt /= E1HVN_MAX;
8225
8226 /*
8227 * adjust E2 sb count: to be removed when FW will support
8228 * more then 16 L2 clients
8229 */
8230#define MAX_L2_CLIENTS 16
8231 if (CHIP_IS_E2(bp))
8232 bp->igu_sb_cnt = min_t(u8, bp->igu_sb_cnt,
8233 MAX_L2_CLIENTS / (IS_MF(bp) ? 4 : 1));
8234
6456 if (!BP_NOMCP(bp)) { 8235 if (!BP_NOMCP(bp)) {
6457 bnx2x_get_port_hwinfo(bp); 8236 bnx2x_get_port_hwinfo(bp);
6458 8237
6459 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) & 8238 bp->fw_seq =
6460 DRV_MSG_SEQ_NUMBER_MASK); 8239 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
8240 DRV_MSG_SEQ_NUMBER_MASK);
6461 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq); 8241 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
6462 } 8242 }
6463 8243
6464 if (IS_E1HMF(bp)) { 8244 if (IS_MF(bp)) {
6465 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper); 8245 val2 = MF_CFG_RD(bp, func_mf_config[func].mac_upper);
6466 val = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_lower); 8246 val = MF_CFG_RD(bp, func_mf_config[func].mac_lower);
6467 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) && 8247 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
6468 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) { 8248 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
6469 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff); 8249 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
@@ -6557,7 +8337,7 @@ out_not_found:
6557 8337
6558static int __devinit bnx2x_init_bp(struct bnx2x *bp) 8338static int __devinit bnx2x_init_bp(struct bnx2x *bp)
6559{ 8339{
6560 int func = BP_FUNC(bp); 8340 int func;
6561 int timer_interval; 8341 int timer_interval;
6562 int rc; 8342 int rc;
6563 8343
@@ -6577,7 +8357,13 @@ static int __devinit bnx2x_init_bp(struct bnx2x *bp)
6577 8357
6578 rc = bnx2x_get_hwinfo(bp); 8358 rc = bnx2x_get_hwinfo(bp);
6579 8359
8360 if (!rc)
8361 rc = bnx2x_alloc_mem_bp(bp);
8362
6580 bnx2x_read_fwinfo(bp); 8363 bnx2x_read_fwinfo(bp);
8364
8365 func = BP_FUNC(bp);
8366
6581 /* need to reset chip if undi was active */ 8367 /* need to reset chip if undi was active */
6582 if (!BP_NOMCP(bp)) 8368 if (!BP_NOMCP(bp))
6583 bnx2x_undi_unload(bp); 8369 bnx2x_undi_unload(bp);
@@ -6623,8 +8409,8 @@ static int __devinit bnx2x_init_bp(struct bnx2x *bp)
6623 bp->rx_csum = 1; 8409 bp->rx_csum = 1;
6624 8410
6625 /* make sure that the numbers are in the right granularity */ 8411 /* make sure that the numbers are in the right granularity */
6626 bp->tx_ticks = (50 / (4 * BNX2X_BTR)) * (4 * BNX2X_BTR); 8412 bp->tx_ticks = (50 / BNX2X_BTR) * BNX2X_BTR;
6627 bp->rx_ticks = (25 / (4 * BNX2X_BTR)) * (4 * BNX2X_BTR); 8413 bp->rx_ticks = (25 / BNX2X_BTR) * BNX2X_BTR;
6628 8414
6629 timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ); 8415 timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
6630 bp->current_interval = (poll ? poll : timer_interval); 8416 bp->current_interval = (poll ? poll : timer_interval);
@@ -6716,81 +8502,22 @@ void bnx2x_set_rx_mode(struct net_device *dev)
6716 8502
6717 if (dev->flags & IFF_PROMISC) 8503 if (dev->flags & IFF_PROMISC)
6718 rx_mode = BNX2X_RX_MODE_PROMISC; 8504 rx_mode = BNX2X_RX_MODE_PROMISC;
6719
6720 else if ((dev->flags & IFF_ALLMULTI) || 8505 else if ((dev->flags & IFF_ALLMULTI) ||
6721 ((netdev_mc_count(dev) > BNX2X_MAX_MULTICAST) && 8506 ((netdev_mc_count(dev) > BNX2X_MAX_MULTICAST) &&
6722 CHIP_IS_E1(bp))) 8507 CHIP_IS_E1(bp)))
6723 rx_mode = BNX2X_RX_MODE_ALLMULTI; 8508 rx_mode = BNX2X_RX_MODE_ALLMULTI;
6724
6725 else { /* some multicasts */ 8509 else { /* some multicasts */
6726 if (CHIP_IS_E1(bp)) { 8510 if (CHIP_IS_E1(bp)) {
6727 int i, old, offset; 8511 /*
6728 struct netdev_hw_addr *ha; 8512 * set mc list, do not wait as wait implies sleep
6729 struct mac_configuration_cmd *config = 8513 * and set_rx_mode can be invoked from non-sleepable
6730 bnx2x_sp(bp, mcast_config); 8514 * context
6731 8515 */
6732 i = 0; 8516 u8 offset = (CHIP_REV_IS_SLOW(bp) ?
6733 netdev_for_each_mc_addr(ha, dev) { 8517 BNX2X_MAX_EMUL_MULTI*(1 + port) :
6734 config->config_table[i]. 8518 BNX2X_MAX_MULTICAST*(1 + port));
6735 cam_entry.msb_mac_addr =
6736 swab16(*(u16 *)&ha->addr[0]);
6737 config->config_table[i].
6738 cam_entry.middle_mac_addr =
6739 swab16(*(u16 *)&ha->addr[2]);
6740 config->config_table[i].
6741 cam_entry.lsb_mac_addr =
6742 swab16(*(u16 *)&ha->addr[4]);
6743 config->config_table[i].cam_entry.flags =
6744 cpu_to_le16(port);
6745 config->config_table[i].
6746 target_table_entry.flags = 0;
6747 config->config_table[i].target_table_entry.
6748 clients_bit_vector =
6749 cpu_to_le32(1 << BP_L_ID(bp));
6750 config->config_table[i].
6751 target_table_entry.vlan_id = 0;
6752
6753 DP(NETIF_MSG_IFUP,
6754 "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
6755 config->config_table[i].
6756 cam_entry.msb_mac_addr,
6757 config->config_table[i].
6758 cam_entry.middle_mac_addr,
6759 config->config_table[i].
6760 cam_entry.lsb_mac_addr);
6761 i++;
6762 }
6763 old = config->hdr.length;
6764 if (old > i) {
6765 for (; i < old; i++) {
6766 if (CAM_IS_INVALID(config->
6767 config_table[i])) {
6768 /* already invalidated */
6769 break;
6770 }
6771 /* invalidate */
6772 CAM_INVALIDATE(config->
6773 config_table[i]);
6774 }
6775 }
6776
6777 if (CHIP_REV_IS_SLOW(bp))
6778 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
6779 else
6780 offset = BNX2X_MAX_MULTICAST*(1 + port);
6781
6782 config->hdr.length = i;
6783 config->hdr.offset = offset;
6784 config->hdr.client_id = bp->fp->cl_id;
6785 config->hdr.reserved1 = 0;
6786
6787 bp->set_mac_pending++;
6788 smp_wmb();
6789 8519
6790 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0, 8520 bnx2x_set_e1_mc_list(bp, offset);
6791 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
6792 U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
6793 0);
6794 } else { /* E1H */ 8521 } else { /* E1H */
6795 /* Accept one or more multicasts */ 8522 /* Accept one or more multicasts */
6796 struct netdev_hw_addr *ha; 8523 struct netdev_hw_addr *ha;
@@ -6802,9 +8529,10 @@ void bnx2x_set_rx_mode(struct net_device *dev)
6802 8529
6803 netdev_for_each_mc_addr(ha, dev) { 8530 netdev_for_each_mc_addr(ha, dev) {
6804 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n", 8531 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
6805 ha->addr); 8532 bnx2x_mc_addr(ha));
6806 8533
6807 crc = crc32c_le(0, ha->addr, ETH_ALEN); 8534 crc = crc32c_le(0, bnx2x_mc_addr(ha),
8535 ETH_ALEN);
6808 bit = (crc >> 24) & 0xff; 8536 bit = (crc >> 24) & 0xff;
6809 regidx = bit >> 5; 8537 regidx = bit >> 5;
6810 bit &= 0x1f; 8538 bit &= 0x1f;
@@ -6821,7 +8549,6 @@ void bnx2x_set_rx_mode(struct net_device *dev)
6821 bnx2x_set_storm_rx_mode(bp); 8549 bnx2x_set_storm_rx_mode(bp);
6822} 8550}
6823 8551
6824
6825/* called with rtnl_lock */ 8552/* called with rtnl_lock */
6826static int bnx2x_mdio_read(struct net_device *netdev, int prtad, 8553static int bnx2x_mdio_read(struct net_device *netdev, int prtad,
6827 int devad, u16 addr) 8554 int devad, u16 addr)
@@ -6901,9 +8628,6 @@ static const struct net_device_ops bnx2x_netdev_ops = {
6901 .ndo_do_ioctl = bnx2x_ioctl, 8628 .ndo_do_ioctl = bnx2x_ioctl,
6902 .ndo_change_mtu = bnx2x_change_mtu, 8629 .ndo_change_mtu = bnx2x_change_mtu,
6903 .ndo_tx_timeout = bnx2x_tx_timeout, 8630 .ndo_tx_timeout = bnx2x_tx_timeout,
6904#ifdef BCM_VLAN
6905 .ndo_vlan_rx_register = bnx2x_vlan_rx_register,
6906#endif
6907#ifdef CONFIG_NET_POLL_CONTROLLER 8631#ifdef CONFIG_NET_POLL_CONTROLLER
6908 .ndo_poll_controller = poll_bnx2x, 8632 .ndo_poll_controller = poll_bnx2x,
6909#endif 8633#endif
@@ -6921,7 +8645,7 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
6921 bp->dev = dev; 8645 bp->dev = dev;
6922 bp->pdev = pdev; 8646 bp->pdev = pdev;
6923 bp->flags = 0; 8647 bp->flags = 0;
6924 bp->func = PCI_FUNC(pdev->devfn); 8648 bp->pf_num = PCI_FUNC(pdev->devfn);
6925 8649
6926 rc = pci_enable_device(pdev); 8650 rc = pci_enable_device(pdev);
6927 if (rc) { 8651 if (rc) {
@@ -7003,7 +8727,7 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
7003 } 8727 }
7004 8728
7005 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2), 8729 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
7006 min_t(u64, BNX2X_DB_SIZE, 8730 min_t(u64, BNX2X_DB_SIZE(bp),
7007 pci_resource_len(pdev, 2))); 8731 pci_resource_len(pdev, 2)));
7008 if (!bp->doorbells) { 8732 if (!bp->doorbells) {
7009 dev_err(&bp->pdev->dev, 8733 dev_err(&bp->pdev->dev,
@@ -7035,9 +8759,7 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
7035 dev->features |= NETIF_F_HIGHDMA; 8759 dev->features |= NETIF_F_HIGHDMA;
7036 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN); 8760 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
7037 dev->features |= NETIF_F_TSO6; 8761 dev->features |= NETIF_F_TSO6;
7038#ifdef BCM_VLAN
7039 dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX); 8762 dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
7040 bp->flags |= (HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
7041 8763
7042 dev->vlan_features |= NETIF_F_SG; 8764 dev->vlan_features |= NETIF_F_SG;
7043 dev->vlan_features |= NETIF_F_HW_CSUM; 8765 dev->vlan_features |= NETIF_F_HW_CSUM;
@@ -7045,7 +8767,6 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
7045 dev->vlan_features |= NETIF_F_HIGHDMA; 8767 dev->vlan_features |= NETIF_F_HIGHDMA;
7046 dev->vlan_features |= (NETIF_F_TSO | NETIF_F_TSO_ECN); 8768 dev->vlan_features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
7047 dev->vlan_features |= NETIF_F_TSO6; 8769 dev->vlan_features |= NETIF_F_TSO6;
7048#endif
7049 8770
7050 /* get_port_hwinfo() will set prtad and mmds properly */ 8771 /* get_port_hwinfo() will set prtad and mmds properly */
7051 bp->mdio.prtad = MDIO_PRTAD_NONE; 8772 bp->mdio.prtad = MDIO_PRTAD_NONE;
@@ -7179,6 +8900,30 @@ static inline void bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
7179 } 8900 }
7180} 8901}
7181 8902
8903/**
8904 * IRO array is stored in the following format:
8905 * {base(24bit), m1(16bit), m2(16bit), m3(16bit), size(16bit) }
8906 */
8907static inline void bnx2x_prep_iro(const u8 *_source, u8 *_target, u32 n)
8908{
8909 const __be32 *source = (const __be32 *)_source;
8910 struct iro *target = (struct iro *)_target;
8911 u32 i, j, tmp;
8912
8913 for (i = 0, j = 0; i < n/sizeof(struct iro); i++) {
8914 target[i].base = be32_to_cpu(source[j]);
8915 j++;
8916 tmp = be32_to_cpu(source[j]);
8917 target[i].m1 = (tmp >> 16) & 0xffff;
8918 target[i].m2 = tmp & 0xffff;
8919 j++;
8920 tmp = be32_to_cpu(source[j]);
8921 target[i].m3 = (tmp >> 16) & 0xffff;
8922 target[i].size = tmp & 0xffff;
8923 j++;
8924 }
8925}
8926
7182static inline void be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n) 8927static inline void be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
7183{ 8928{
7184 const __be16 *source = (const __be16 *)_source; 8929 const __be16 *source = (const __be16 *)_source;
@@ -7211,6 +8956,8 @@ int bnx2x_init_firmware(struct bnx2x *bp)
7211 fw_file_name = FW_FILE_NAME_E1; 8956 fw_file_name = FW_FILE_NAME_E1;
7212 else if (CHIP_IS_E1H(bp)) 8957 else if (CHIP_IS_E1H(bp))
7213 fw_file_name = FW_FILE_NAME_E1H; 8958 fw_file_name = FW_FILE_NAME_E1H;
8959 else if (CHIP_IS_E2(bp))
8960 fw_file_name = FW_FILE_NAME_E2;
7214 else { 8961 else {
7215 BNX2X_ERR("Unsupported chip revision\n"); 8962 BNX2X_ERR("Unsupported chip revision\n");
7216 return -EINVAL; 8963 return -EINVAL;
@@ -7260,9 +9007,13 @@ int bnx2x_init_firmware(struct bnx2x *bp)
7260 be32_to_cpu(fw_hdr->csem_int_table_data.offset); 9007 be32_to_cpu(fw_hdr->csem_int_table_data.offset);
7261 INIT_CSEM_PRAM_DATA(bp) = bp->firmware->data + 9008 INIT_CSEM_PRAM_DATA(bp) = bp->firmware->data +
7262 be32_to_cpu(fw_hdr->csem_pram_data.offset); 9009 be32_to_cpu(fw_hdr->csem_pram_data.offset);
9010 /* IRO */
9011 BNX2X_ALLOC_AND_SET(iro_arr, iro_alloc_err, bnx2x_prep_iro);
7263 9012
7264 return 0; 9013 return 0;
7265 9014
9015iro_alloc_err:
9016 kfree(bp->init_ops_offsets);
7266init_offsets_alloc_err: 9017init_offsets_alloc_err:
7267 kfree(bp->init_ops); 9018 kfree(bp->init_ops);
7268init_ops_alloc_err: 9019init_ops_alloc_err:
@@ -7273,6 +9024,15 @@ request_firmware_exit:
7273 return rc; 9024 return rc;
7274} 9025}
7275 9026
9027static inline int bnx2x_set_qm_cid_count(struct bnx2x *bp, int l2_cid_count)
9028{
9029 int cid_count = L2_FP_COUNT(l2_cid_count);
9030
9031#ifdef BCM_CNIC
9032 cid_count += CNIC_CID_MAX;
9033#endif
9034 return roundup(cid_count, QM_CID_ROUND);
9035}
7276 9036
7277static int __devinit bnx2x_init_one(struct pci_dev *pdev, 9037static int __devinit bnx2x_init_one(struct pci_dev *pdev,
7278 const struct pci_device_id *ent) 9038 const struct pci_device_id *ent)
@@ -7280,10 +9040,30 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
7280 struct net_device *dev = NULL; 9040 struct net_device *dev = NULL;
7281 struct bnx2x *bp; 9041 struct bnx2x *bp;
7282 int pcie_width, pcie_speed; 9042 int pcie_width, pcie_speed;
7283 int rc; 9043 int rc, cid_count;
9044
9045 switch (ent->driver_data) {
9046 case BCM57710:
9047 case BCM57711:
9048 case BCM57711E:
9049 cid_count = FP_SB_MAX_E1x;
9050 break;
9051
9052 case BCM57712:
9053 case BCM57712E:
9054 cid_count = FP_SB_MAX_E2;
9055 break;
9056
9057 default:
9058 pr_err("Unknown board_type (%ld), aborting\n",
9059 ent->driver_data);
9060 return ENODEV;
9061 }
9062
9063 cid_count += CNIC_CONTEXT_USE;
7284 9064
7285 /* dev zeroed in init_etherdev */ 9065 /* dev zeroed in init_etherdev */
7286 dev = alloc_etherdev_mq(sizeof(*bp), MAX_CONTEXT); 9066 dev = alloc_etherdev_mq(sizeof(*bp), cid_count);
7287 if (!dev) { 9067 if (!dev) {
7288 dev_err(&pdev->dev, "Cannot allocate net device\n"); 9068 dev_err(&pdev->dev, "Cannot allocate net device\n");
7289 return -ENOMEM; 9069 return -ENOMEM;
@@ -7294,6 +9074,8 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
7294 9074
7295 pci_set_drvdata(pdev, dev); 9075 pci_set_drvdata(pdev, dev);
7296 9076
9077 bp->l2_cid_count = cid_count;
9078
7297 rc = bnx2x_init_dev(pdev, dev); 9079 rc = bnx2x_init_dev(pdev, dev);
7298 if (rc < 0) { 9080 if (rc < 0) {
7299 free_netdev(dev); 9081 free_netdev(dev);
@@ -7304,17 +9086,32 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
7304 if (rc) 9086 if (rc)
7305 goto init_one_exit; 9087 goto init_one_exit;
7306 9088
9089 /* calc qm_cid_count */
9090 bp->qm_cid_count = bnx2x_set_qm_cid_count(bp, cid_count);
9091
7307 rc = register_netdev(dev); 9092 rc = register_netdev(dev);
7308 if (rc) { 9093 if (rc) {
7309 dev_err(&pdev->dev, "Cannot register net device\n"); 9094 dev_err(&pdev->dev, "Cannot register net device\n");
7310 goto init_one_exit; 9095 goto init_one_exit;
7311 } 9096 }
7312 9097
9098 /* Configure interupt mode: try to enable MSI-X/MSI if
9099 * needed, set bp->num_queues appropriately.
9100 */
9101 bnx2x_set_int_mode(bp);
9102
9103 /* Add all NAPI objects */
9104 bnx2x_add_all_napi(bp);
9105
7313 bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed); 9106 bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed);
9107
7314 netdev_info(dev, "%s (%c%d) PCI-E x%d %s found at mem %lx," 9108 netdev_info(dev, "%s (%c%d) PCI-E x%d %s found at mem %lx,"
7315 " IRQ %d, ", board_info[ent->driver_data].name, 9109 " IRQ %d, ", board_info[ent->driver_data].name,
7316 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4), 9110 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
7317 pcie_width, (pcie_speed == 2) ? "5GHz (Gen2)" : "2.5GHz", 9111 pcie_width,
9112 ((!CHIP_IS_E2(bp) && pcie_speed == 2) ||
9113 (CHIP_IS_E2(bp) && pcie_speed == 1)) ?
9114 "5GHz (Gen2)" : "2.5GHz",
7318 dev->base_addr, bp->pdev->irq); 9115 dev->base_addr, bp->pdev->irq);
7319 pr_cont("node addr %pM\n", dev->dev_addr); 9116 pr_cont("node addr %pM\n", dev->dev_addr);
7320 9117
@@ -7351,6 +9148,12 @@ static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
7351 9148
7352 unregister_netdev(dev); 9149 unregister_netdev(dev);
7353 9150
9151 /* Delete all NAPI objects */
9152 bnx2x_del_all_napi(bp);
9153
9154 /* Disable MSI/MSI-X */
9155 bnx2x_disable_msi(bp);
9156
7354 /* Make sure RESET task is not scheduled before continuing */ 9157 /* Make sure RESET task is not scheduled before continuing */
7355 cancel_delayed_work_sync(&bp->reset_task); 9158 cancel_delayed_work_sync(&bp->reset_task);
7356 9159
@@ -7360,6 +9163,8 @@ static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
7360 if (bp->doorbells) 9163 if (bp->doorbells)
7361 iounmap(bp->doorbells); 9164 iounmap(bp->doorbells);
7362 9165
9166 bnx2x_free_mem_bp(bp);
9167
7363 free_netdev(dev); 9168 free_netdev(dev);
7364 9169
7365 if (atomic_read(&pdev->enable_cnt) == 1) 9170 if (atomic_read(&pdev->enable_cnt) == 1)
@@ -7385,22 +9190,14 @@ static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
7385 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n"); 9190 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
7386 9191
7387 /* Release IRQs */ 9192 /* Release IRQs */
7388 bnx2x_free_irq(bp, false); 9193 bnx2x_free_irq(bp);
7389
7390 if (CHIP_IS_E1(bp)) {
7391 struct mac_configuration_cmd *config =
7392 bnx2x_sp(bp, mcast_config);
7393
7394 for (i = 0; i < config->hdr.length; i++)
7395 CAM_INVALIDATE(config->config_table[i]);
7396 }
7397 9194
7398 /* Free SKBs, SGEs, TPA pool and driver internals */ 9195 /* Free SKBs, SGEs, TPA pool and driver internals */
7399 bnx2x_free_skbs(bp); 9196 bnx2x_free_skbs(bp);
9197
7400 for_each_queue(bp, i) 9198 for_each_queue(bp, i)
7401 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE); 9199 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
7402 for_each_queue(bp, i) 9200
7403 netif_napi_del(&bnx2x_fp(bp, i, napi));
7404 bnx2x_free_mem(bp); 9201 bnx2x_free_mem(bp);
7405 9202
7406 bp->state = BNX2X_STATE_CLOSED; 9203 bp->state = BNX2X_STATE_CLOSED;
@@ -7432,8 +9229,9 @@ static void bnx2x_eeh_recover(struct bnx2x *bp)
7432 BNX2X_ERR("BAD MCP validity signature\n"); 9229 BNX2X_ERR("BAD MCP validity signature\n");
7433 9230
7434 if (!BP_NOMCP(bp)) { 9231 if (!BP_NOMCP(bp)) {
7435 bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FUNC(bp)].drv_mb_header) 9232 bp->fw_seq =
7436 & DRV_MSG_SEQ_NUMBER_MASK); 9233 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
9234 DRV_MSG_SEQ_NUMBER_MASK);
7437 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq); 9235 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
7438 } 9236 }
7439} 9237}
@@ -7516,7 +9314,8 @@ static void bnx2x_io_resume(struct pci_dev *pdev)
7516 struct bnx2x *bp = netdev_priv(dev); 9314 struct bnx2x *bp = netdev_priv(dev);
7517 9315
7518 if (bp->recovery_state != BNX2X_RECOVERY_DONE) { 9316 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
7519 printk(KERN_ERR "Handling parity error recovery. Try again later\n"); 9317 printk(KERN_ERR "Handling parity error recovery. "
9318 "Try again later\n");
7520 return; 9319 return;
7521 } 9320 }
7522 9321
@@ -7591,19 +9390,53 @@ static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count)
7591#endif 9390#endif
7592 9391
7593 spin_lock_bh(&bp->spq_lock); 9392 spin_lock_bh(&bp->spq_lock);
9393 BUG_ON(bp->cnic_spq_pending < count);
7594 bp->cnic_spq_pending -= count; 9394 bp->cnic_spq_pending -= count;
7595 9395
7596 for (; bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending;
7597 bp->cnic_spq_pending++) {
7598 9396
7599 if (!bp->cnic_kwq_pending) 9397 for (; bp->cnic_kwq_pending; bp->cnic_kwq_pending--) {
9398 u16 type = (le16_to_cpu(bp->cnic_kwq_cons->hdr.type)
9399 & SPE_HDR_CONN_TYPE) >>
9400 SPE_HDR_CONN_TYPE_SHIFT;
9401
9402 /* Set validation for iSCSI L2 client before sending SETUP
9403 * ramrod
9404 */
9405 if (type == ETH_CONNECTION_TYPE) {
9406 u8 cmd = (le32_to_cpu(bp->cnic_kwq_cons->
9407 hdr.conn_and_cmd_data) >>
9408 SPE_HDR_CMD_ID_SHIFT) & 0xff;
9409
9410 if (cmd == RAMROD_CMD_ID_ETH_CLIENT_SETUP)
9411 bnx2x_set_ctx_validation(&bp->context.
9412 vcxt[BNX2X_ISCSI_ETH_CID].eth,
9413 HW_CID(bp, BNX2X_ISCSI_ETH_CID));
9414 }
9415
9416 /* There may be not more than 8 L2 and COMMON SPEs and not more
9417 * than 8 L5 SPEs in the air.
9418 */
9419 if ((type == NONE_CONNECTION_TYPE) ||
9420 (type == ETH_CONNECTION_TYPE)) {
9421 if (!atomic_read(&bp->spq_left))
9422 break;
9423 else
9424 atomic_dec(&bp->spq_left);
9425 } else if (type == ISCSI_CONNECTION_TYPE) {
9426 if (bp->cnic_spq_pending >=
9427 bp->cnic_eth_dev.max_kwqe_pending)
9428 break;
9429 else
9430 bp->cnic_spq_pending++;
9431 } else {
9432 BNX2X_ERR("Unknown SPE type: %d\n", type);
9433 bnx2x_panic();
7600 break; 9434 break;
9435 }
7601 9436
7602 spe = bnx2x_sp_get_next(bp); 9437 spe = bnx2x_sp_get_next(bp);
7603 *spe = *bp->cnic_kwq_cons; 9438 *spe = *bp->cnic_kwq_cons;
7604 9439
7605 bp->cnic_kwq_pending--;
7606
7607 DP(NETIF_MSG_TIMER, "pending on SPQ %d, on KWQ %d count %d\n", 9440 DP(NETIF_MSG_TIMER, "pending on SPQ %d, on KWQ %d count %d\n",
7608 bp->cnic_spq_pending, bp->cnic_kwq_pending, count); 9441 bp->cnic_spq_pending, bp->cnic_kwq_pending, count);
7609 9442
@@ -7641,8 +9474,8 @@ static int bnx2x_cnic_sp_queue(struct net_device *dev,
7641 9474
7642 DP(NETIF_MSG_TIMER, "L5 SPQE %x %x %x:%x pos %d\n", 9475 DP(NETIF_MSG_TIMER, "L5 SPQE %x %x %x:%x pos %d\n",
7643 spe->hdr.conn_and_cmd_data, spe->hdr.type, 9476 spe->hdr.conn_and_cmd_data, spe->hdr.type,
7644 spe->data.mac_config_addr.hi, 9477 spe->data.update_data_addr.hi,
7645 spe->data.mac_config_addr.lo, 9478 spe->data.update_data_addr.lo,
7646 bp->cnic_kwq_pending); 9479 bp->cnic_kwq_pending);
7647 9480
7648 if (bp->cnic_kwq_prod == bp->cnic_kwq_last) 9481 if (bp->cnic_kwq_prod == bp->cnic_kwq_last)
@@ -7708,7 +9541,7 @@ static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid)
7708 ctl.data.comp.cid = cid; 9541 ctl.data.comp.cid = cid;
7709 9542
7710 bnx2x_cnic_ctl_send_bh(bp, &ctl); 9543 bnx2x_cnic_ctl_send_bh(bp, &ctl);
7711 bnx2x_cnic_sp_post(bp, 1); 9544 bnx2x_cnic_sp_post(bp, 0);
7712} 9545}
7713 9546
7714static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl) 9547static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
@@ -7725,8 +9558,8 @@ static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
7725 break; 9558 break;
7726 } 9559 }
7727 9560
7728 case DRV_CTL_COMPLETION_CMD: { 9561 case DRV_CTL_RET_L5_SPQ_CREDIT_CMD: {
7729 int count = ctl->data.comp.comp_count; 9562 int count = ctl->data.credit.credit_count;
7730 9563
7731 bnx2x_cnic_sp_post(bp, count); 9564 bnx2x_cnic_sp_post(bp, count);
7732 break; 9565 break;
@@ -7736,8 +9569,24 @@ static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
7736 case DRV_CTL_START_L2_CMD: { 9569 case DRV_CTL_START_L2_CMD: {
7737 u32 cli = ctl->data.ring.client_id; 9570 u32 cli = ctl->data.ring.client_id;
7738 9571
7739 bp->rx_mode_cl_mask |= (1 << cli); 9572 /* Set iSCSI MAC address */
7740 bnx2x_set_storm_rx_mode(bp); 9573 bnx2x_set_iscsi_eth_mac_addr(bp, 1);
9574
9575 mmiowb();
9576 barrier();
9577
9578 /* Start accepting on iSCSI L2 ring. Accept all multicasts
9579 * because it's the only way for UIO Client to accept
9580 * multicasts (in non-promiscuous mode only one Client per
9581 * function will receive multicast packets (leading in our
9582 * case).
9583 */
9584 bnx2x_rxq_set_mac_filters(bp, cli,
9585 BNX2X_ACCEPT_UNICAST |
9586 BNX2X_ACCEPT_BROADCAST |
9587 BNX2X_ACCEPT_ALL_MULTICAST);
9588 storm_memset_mac_filters(bp, &bp->mac_filters, BP_FUNC(bp));
9589
7741 break; 9590 break;
7742 } 9591 }
7743 9592
@@ -7745,8 +9594,23 @@ static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
7745 case DRV_CTL_STOP_L2_CMD: { 9594 case DRV_CTL_STOP_L2_CMD: {
7746 u32 cli = ctl->data.ring.client_id; 9595 u32 cli = ctl->data.ring.client_id;
7747 9596
7748 bp->rx_mode_cl_mask &= ~(1 << cli); 9597 /* Stop accepting on iSCSI L2 ring */
7749 bnx2x_set_storm_rx_mode(bp); 9598 bnx2x_rxq_set_mac_filters(bp, cli, BNX2X_ACCEPT_NONE);
9599 storm_memset_mac_filters(bp, &bp->mac_filters, BP_FUNC(bp));
9600
9601 mmiowb();
9602 barrier();
9603
9604 /* Unset iSCSI L2 MAC */
9605 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
9606 break;
9607 }
9608 case DRV_CTL_RET_L2_SPQ_CREDIT_CMD: {
9609 int count = ctl->data.credit.credit_count;
9610
9611 smp_mb__before_atomic_inc();
9612 atomic_add(count, &bp->spq_left);
9613 smp_mb__after_atomic_inc();
7750 break; 9614 break;
7751 } 9615 }
7752 9616
@@ -7770,10 +9634,16 @@ void bnx2x_setup_cnic_irq_info(struct bnx2x *bp)
7770 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX; 9634 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
7771 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX; 9635 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
7772 } 9636 }
7773 cp->irq_arr[0].status_blk = bp->cnic_sb; 9637 if (CHIP_IS_E2(bp))
9638 cp->irq_arr[0].status_blk = (void *)bp->cnic_sb.e2_sb;
9639 else
9640 cp->irq_arr[0].status_blk = (void *)bp->cnic_sb.e1x_sb;
9641
7774 cp->irq_arr[0].status_blk_num = CNIC_SB_ID(bp); 9642 cp->irq_arr[0].status_blk_num = CNIC_SB_ID(bp);
9643 cp->irq_arr[0].status_blk_num2 = CNIC_IGU_SB_ID(bp);
7775 cp->irq_arr[1].status_blk = bp->def_status_blk; 9644 cp->irq_arr[1].status_blk = bp->def_status_blk;
7776 cp->irq_arr[1].status_blk_num = DEF_SB_ID; 9645 cp->irq_arr[1].status_blk_num = DEF_SB_ID;
9646 cp->irq_arr[1].status_blk_num2 = DEF_SB_IGU_ID;
7777 9647
7778 cp->num_irq = 2; 9648 cp->num_irq = 2;
7779} 9649}
@@ -7805,12 +9675,10 @@ static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops,
7805 9675
7806 cp->num_irq = 0; 9676 cp->num_irq = 0;
7807 cp->drv_state = CNIC_DRV_STATE_REGD; 9677 cp->drv_state = CNIC_DRV_STATE_REGD;
7808 9678 cp->iro_arr = bp->iro_arr;
7809 bnx2x_init_sb(bp, bp->cnic_sb, bp->cnic_sb_mapping, CNIC_SB_ID(bp));
7810 9679
7811 bnx2x_setup_cnic_irq_info(bp); 9680 bnx2x_setup_cnic_irq_info(bp);
7812 bnx2x_set_iscsi_eth_mac_addr(bp, 1); 9681
7813 bp->cnic_flags |= BNX2X_CNIC_FLAG_MAC_SET;
7814 rcu_assign_pointer(bp->cnic_ops, ops); 9682 rcu_assign_pointer(bp->cnic_ops, ops);
7815 9683
7816 return 0; 9684 return 0;
@@ -7847,15 +9715,24 @@ struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
7847 cp->io_base = bp->regview; 9715 cp->io_base = bp->regview;
7848 cp->io_base2 = bp->doorbells; 9716 cp->io_base2 = bp->doorbells;
7849 cp->max_kwqe_pending = 8; 9717 cp->max_kwqe_pending = 8;
7850 cp->ctx_blk_size = CNIC_CTX_PER_ILT * sizeof(union cdu_context); 9718 cp->ctx_blk_size = CDU_ILT_PAGE_SZ;
7851 cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) + 1; 9719 cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) +
9720 bnx2x_cid_ilt_lines(bp);
7852 cp->ctx_tbl_len = CNIC_ILT_LINES; 9721 cp->ctx_tbl_len = CNIC_ILT_LINES;
7853 cp->starting_cid = BCM_CNIC_CID_START; 9722 cp->starting_cid = bnx2x_cid_ilt_lines(bp) * ILT_PAGE_CIDS;
7854 cp->drv_submit_kwqes_16 = bnx2x_cnic_sp_queue; 9723 cp->drv_submit_kwqes_16 = bnx2x_cnic_sp_queue;
7855 cp->drv_ctl = bnx2x_drv_ctl; 9724 cp->drv_ctl = bnx2x_drv_ctl;
7856 cp->drv_register_cnic = bnx2x_register_cnic; 9725 cp->drv_register_cnic = bnx2x_register_cnic;
7857 cp->drv_unregister_cnic = bnx2x_unregister_cnic; 9726 cp->drv_unregister_cnic = bnx2x_unregister_cnic;
7858 9727 cp->iscsi_l2_client_id = BNX2X_ISCSI_ETH_CL_ID;
9728 cp->iscsi_l2_cid = BNX2X_ISCSI_ETH_CID;
9729
9730 DP(BNX2X_MSG_SP, "page_size %d, tbl_offset %d, tbl_lines %d, "
9731 "starting cid %d\n",
9732 cp->ctx_blk_size,
9733 cp->ctx_tbl_offset,
9734 cp->ctx_tbl_len,
9735 cp->starting_cid);
7859 return cp; 9736 return cp;
7860} 9737}
7861EXPORT_SYMBOL(bnx2x_cnic_probe); 9738EXPORT_SYMBOL(bnx2x_cnic_probe);
diff --git a/drivers/net/bnx2x/bnx2x_reg.h b/drivers/net/bnx2x/bnx2x_reg.h
index 6be0d09ad3fd..1cefe489a955 100644
--- a/drivers/net/bnx2x/bnx2x_reg.h
+++ b/drivers/net/bnx2x/bnx2x_reg.h
@@ -1,6 +1,6 @@
1/* bnx2x_reg.h: Broadcom Everest network driver. 1/* bnx2x_reg.h: Broadcom Everest network driver.
2 * 2 *
3 * Copyright (c) 2007-2009 Broadcom Corporation 3 * Copyright (c) 2007-2010 Broadcom Corporation
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify 5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by 6 * it under the terms of the GNU General Public License as published by
@@ -19,7 +19,20 @@
19 * 19 *
20 */ 20 */
21 21
22 22#define ATC_ATC_INT_STS_REG_ADDRESS_ERROR (0x1<<0)
23#define ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS (0x1<<2)
24#define ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU (0x1<<5)
25#define ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT (0x1<<3)
26#define ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR (0x1<<4)
27#define ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND (0x1<<1)
28/* [RW 1] Initiate the ATC array - reset all the valid bits */
29#define ATC_REG_ATC_INIT_ARRAY 0x1100b8
30/* [R 1] ATC initalization done */
31#define ATC_REG_ATC_INIT_DONE 0x1100bc
32/* [RC 6] Interrupt register #0 read clear */
33#define ATC_REG_ATC_INT_STS_CLR 0x1101c0
34/* [RW 19] Interrupt mask register #0 read/write */
35#define BRB1_REG_BRB1_INT_MASK 0x60128
23/* [R 19] Interrupt register #0 read */ 36/* [R 19] Interrupt register #0 read */
24#define BRB1_REG_BRB1_INT_STS 0x6011c 37#define BRB1_REG_BRB1_INT_STS 0x6011c
25/* [RW 4] Parity mask register #0 read/write */ 38/* [RW 4] Parity mask register #0 read/write */
@@ -27,9 +40,31 @@
27/* [R 4] Parity register #0 read */ 40/* [R 4] Parity register #0 read */
28#define BRB1_REG_BRB1_PRTY_STS 0x6012c 41#define BRB1_REG_BRB1_PRTY_STS 0x6012c
29/* [RW 10] At address BRB1_IND_FREE_LIST_PRS_CRDT initialize free head. At 42/* [RW 10] At address BRB1_IND_FREE_LIST_PRS_CRDT initialize free head. At
30 address BRB1_IND_FREE_LIST_PRS_CRDT+1 initialize free tail. At address 43 * address BRB1_IND_FREE_LIST_PRS_CRDT+1 initialize free tail. At address
31 BRB1_IND_FREE_LIST_PRS_CRDT+2 initialize parser initial credit. */ 44 * BRB1_IND_FREE_LIST_PRS_CRDT+2 initialize parser initial credit. Warning -
45 * following reset the first rbc access to this reg must be write; there can
46 * be no more rbc writes after the first one; there can be any number of rbc
47 * read following the first write; rbc access not following these rules will
48 * result in hang condition. */
32#define BRB1_REG_FREE_LIST_PRS_CRDT 0x60200 49#define BRB1_REG_FREE_LIST_PRS_CRDT 0x60200
50/* [RW 10] The number of free blocks below which the full signal to class 0
51 * is asserted */
52#define BRB1_REG_FULL_0_XOFF_THRESHOLD_0 0x601d0
53/* [RW 10] The number of free blocks above which the full signal to class 0
54 * is de-asserted */
55#define BRB1_REG_FULL_0_XON_THRESHOLD_0 0x601d4
56/* [RW 10] The number of free blocks below which the full signal to class 1
57 * is asserted */
58#define BRB1_REG_FULL_1_XOFF_THRESHOLD_0 0x601d8
59/* [RW 10] The number of free blocks above which the full signal to class 1
60 * is de-asserted */
61#define BRB1_REG_FULL_1_XON_THRESHOLD_0 0x601dc
62/* [RW 10] The number of free blocks below which the full signal to the LB
63 * port is asserted */
64#define BRB1_REG_FULL_LB_XOFF_THRESHOLD 0x601e0
65/* [RW 10] The number of free blocks above which the full signal to the LB
66 * port is de-asserted */
67#define BRB1_REG_FULL_LB_XON_THRESHOLD 0x601e4
33/* [RW 10] The number of free blocks above which the High_llfc signal to 68/* [RW 10] The number of free blocks above which the High_llfc signal to
34 interface #n is de-asserted. */ 69 interface #n is de-asserted. */
35#define BRB1_REG_HIGH_LLFC_HIGH_THRESHOLD_0 0x6014c 70#define BRB1_REG_HIGH_LLFC_HIGH_THRESHOLD_0 0x6014c
@@ -44,6 +79,9 @@
44/* [RW 10] The number of free blocks below which the Low_llfc signal to 79/* [RW 10] The number of free blocks below which the Low_llfc signal to
45 interface #n is asserted. */ 80 interface #n is asserted. */
46#define BRB1_REG_LOW_LLFC_LOW_THRESHOLD_0 0x6015c 81#define BRB1_REG_LOW_LLFC_LOW_THRESHOLD_0 0x6015c
82/* [RW 10] The number of blocks guarantied for the MAC port */
83#define BRB1_REG_MAC_GUARANTIED_0 0x601e8
84#define BRB1_REG_MAC_GUARANTIED_1 0x60240
47/* [R 24] The number of full blocks. */ 85/* [R 24] The number of full blocks. */
48#define BRB1_REG_NUM_OF_FULL_BLOCKS 0x60090 86#define BRB1_REG_NUM_OF_FULL_BLOCKS 0x60090
49/* [ST 32] The number of cycles that the write_full signal towards MAC #0 87/* [ST 32] The number of cycles that the write_full signal towards MAC #0
@@ -55,7 +93,19 @@
55 asserted. */ 93 asserted. */
56#define BRB1_REG_NUM_OF_PAUSE_CYCLES_0 0x600b8 94#define BRB1_REG_NUM_OF_PAUSE_CYCLES_0 0x600b8
57#define BRB1_REG_NUM_OF_PAUSE_CYCLES_1 0x600bc 95#define BRB1_REG_NUM_OF_PAUSE_CYCLES_1 0x600bc
58/* [RW 10] Write client 0: De-assert pause threshold. */ 96/* [RW 10] The number of free blocks below which the pause signal to class 0
97 * is asserted */
98#define BRB1_REG_PAUSE_0_XOFF_THRESHOLD_0 0x601c0
99/* [RW 10] The number of free blocks above which the pause signal to class 0
100 * is de-asserted */
101#define BRB1_REG_PAUSE_0_XON_THRESHOLD_0 0x601c4
102/* [RW 10] The number of free blocks below which the pause signal to class 1
103 * is asserted */
104#define BRB1_REG_PAUSE_1_XOFF_THRESHOLD_0 0x601c8
105/* [RW 10] The number of free blocks above which the pause signal to class 1
106 * is de-asserted */
107#define BRB1_REG_PAUSE_1_XON_THRESHOLD_0 0x601cc
108/* [RW 10] Write client 0: De-assert pause threshold. Not Functional */
59#define BRB1_REG_PAUSE_HIGH_THRESHOLD_0 0x60078 109#define BRB1_REG_PAUSE_HIGH_THRESHOLD_0 0x60078
60#define BRB1_REG_PAUSE_HIGH_THRESHOLD_1 0x6007c 110#define BRB1_REG_PAUSE_HIGH_THRESHOLD_1 0x6007c
61/* [RW 10] Write client 0: Assert pause threshold. */ 111/* [RW 10] Write client 0: Assert pause threshold. */
@@ -362,6 +412,7 @@
362#define CFC_REG_NUM_LCIDS_ARRIVING 0x104004 412#define CFC_REG_NUM_LCIDS_ARRIVING 0x104004
363/* [R 9] Number of Leaving LCIDs in Link List Block */ 413/* [R 9] Number of Leaving LCIDs in Link List Block */
364#define CFC_REG_NUM_LCIDS_LEAVING 0x104018 414#define CFC_REG_NUM_LCIDS_LEAVING 0x104018
415#define CFC_REG_WEAK_ENABLE_PF 0x104124
365/* [RW 8] The event id for aggregated interrupt 0 */ 416/* [RW 8] The event id for aggregated interrupt 0 */
366#define CSDM_REG_AGG_INT_EVENT_0 0xc2038 417#define CSDM_REG_AGG_INT_EVENT_0 0xc2038
367#define CSDM_REG_AGG_INT_EVENT_10 0xc2060 418#define CSDM_REG_AGG_INT_EVENT_10 0xc2060
@@ -590,10 +641,17 @@
590#define CSEM_REG_TS_8_AS 0x200058 641#define CSEM_REG_TS_8_AS 0x200058
591/* [RW 3] The arbitration scheme of time_slot 9 */ 642/* [RW 3] The arbitration scheme of time_slot 9 */
592#define CSEM_REG_TS_9_AS 0x20005c 643#define CSEM_REG_TS_9_AS 0x20005c
644/* [W 7] VF or PF ID for reset error bit. Values 0-63 reset error bit for 64
645 * VF; values 64-67 reset error for 4 PF; values 68-127 are not valid. */
646#define CSEM_REG_VFPF_ERR_NUM 0x200380
593/* [RW 1] Parity mask register #0 read/write */ 647/* [RW 1] Parity mask register #0 read/write */
594#define DBG_REG_DBG_PRTY_MASK 0xc0a8 648#define DBG_REG_DBG_PRTY_MASK 0xc0a8
595/* [R 1] Parity register #0 read */ 649/* [R 1] Parity register #0 read */
596#define DBG_REG_DBG_PRTY_STS 0xc09c 650#define DBG_REG_DBG_PRTY_STS 0xc09c
651/* [RW 1] When set the DMAE will process the commands as in E1.5. 1.The
652 * function that is used is always SRC-PCI; 2.VF_Valid = 0; 3.VFID=0;
653 * 4.Completion function=0; 5.Error handling=0 */
654#define DMAE_REG_BACKWARD_COMP_EN 0x10207c
597/* [RW 32] Commands memory. The address to command X; row Y is to calculated 655/* [RW 32] Commands memory. The address to command X; row Y is to calculated
598 as 14*X+Y. */ 656 as 14*X+Y. */
599#define DMAE_REG_CMD_MEM 0x102400 657#define DMAE_REG_CMD_MEM 0x102400
@@ -742,9 +800,13 @@
742#define HC_REG_HC_PRTY_MASK 0x1080a0 800#define HC_REG_HC_PRTY_MASK 0x1080a0
743/* [R 3] Parity register #0 read */ 801/* [R 3] Parity register #0 read */
744#define HC_REG_HC_PRTY_STS 0x108094 802#define HC_REG_HC_PRTY_STS 0x108094
745#define HC_REG_INT_MASK 0x108108 803/* [RC 3] Parity register #0 read clear */
804#define HC_REG_HC_PRTY_STS_CLR 0x108098
805#define HC_REG_INT_MASK 0x108108
746#define HC_REG_LEADING_EDGE_0 0x108040 806#define HC_REG_LEADING_EDGE_0 0x108040
747#define HC_REG_LEADING_EDGE_1 0x108048 807#define HC_REG_LEADING_EDGE_1 0x108048
808#define HC_REG_MAIN_MEMORY 0x108800
809#define HC_REG_MAIN_MEMORY_SIZE 152
748#define HC_REG_P0_PROD_CONS 0x108200 810#define HC_REG_P0_PROD_CONS 0x108200
749#define HC_REG_P1_PROD_CONS 0x108400 811#define HC_REG_P1_PROD_CONS 0x108400
750#define HC_REG_PBA_COMMAND 0x108140 812#define HC_REG_PBA_COMMAND 0x108140
@@ -758,6 +820,92 @@
758#define HC_REG_USTORM_ADDR_FOR_COALESCE 0x108068 820#define HC_REG_USTORM_ADDR_FOR_COALESCE 0x108068
759#define HC_REG_VQID_0 0x108008 821#define HC_REG_VQID_0 0x108008
760#define HC_REG_VQID_1 0x10800c 822#define HC_REG_VQID_1 0x10800c
823#define IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN (0x1<<1)
824#define IGU_REG_ATTENTION_ACK_BITS 0x130108
825/* [R 4] Debug: attn_fsm */
826#define IGU_REG_ATTN_FSM 0x130054
827#define IGU_REG_ATTN_MSG_ADDR_H 0x13011c
828#define IGU_REG_ATTN_MSG_ADDR_L 0x130120
829/* [R 4] Debug: [3] - attention write done message is pending (0-no pending;
830 * 1-pending). [2:0] = PFID. Pending means attention message was sent; but
831 * write done didnt receive. */
832#define IGU_REG_ATTN_WRITE_DONE_PENDING 0x130030
833#define IGU_REG_BLOCK_CONFIGURATION 0x130000
834#define IGU_REG_COMMAND_REG_32LSB_DATA 0x130124
835#define IGU_REG_COMMAND_REG_CTRL 0x13012c
836/* [WB_R 32] Cleanup bit status per SB. 1 = cleanup is set. 0 = cleanup bit
837 * is clear. The bits in this registers are set and clear via the producer
838 * command. Data valid only in addresses 0-4. all the rest are zero. */
839#define IGU_REG_CSTORM_TYPE_0_SB_CLEANUP 0x130200
840/* [R 5] Debug: ctrl_fsm */
841#define IGU_REG_CTRL_FSM 0x130064
842/* [R 1] data availble for error memory. If this bit is clear do not red
843 * from error_handling_memory. */
844#define IGU_REG_ERROR_HANDLING_DATA_VALID 0x130130
845/* [R 11] Parity register #0 read */
846#define IGU_REG_IGU_PRTY_STS 0x13009c
847/* [R 4] Debug: int_handle_fsm */
848#define IGU_REG_INT_HANDLE_FSM 0x130050
849#define IGU_REG_LEADING_EDGE_LATCH 0x130134
850/* [RW 14] mapping CAM; relevant for E2 operating mode only. [0] - valid.
851 * [6:1] - vector number; [13:7] - FID (if VF - [13] = 0; [12:7] = VF
852 * number; if PF - [13] = 1; [12:10] = 0; [9:7] = PF number); */
853#define IGU_REG_MAPPING_MEMORY 0x131000
854#define IGU_REG_MAPPING_MEMORY_SIZE 136
855#define IGU_REG_PBA_STATUS_LSB 0x130138
856#define IGU_REG_PBA_STATUS_MSB 0x13013c
857#define IGU_REG_PCI_PF_MSI_EN 0x130140
858#define IGU_REG_PCI_PF_MSIX_EN 0x130144
859#define IGU_REG_PCI_PF_MSIX_FUNC_MASK 0x130148
860/* [WB_R 32] Each bit represent the pending bits status for that SB. 0 = no
861 * pending; 1 = pending. Pendings means interrupt was asserted; and write
862 * done was not received. Data valid only in addresses 0-4. all the rest are
863 * zero. */
864#define IGU_REG_PENDING_BITS_STATUS 0x130300
865#define IGU_REG_PF_CONFIGURATION 0x130154
866/* [RW 20] producers only. E2 mode: address 0-135 match to the mapping
867 * memory; 136 - PF0 default prod; 137 PF1 default prod; 138 - PF2 default
868 * prod; 139 PF3 default prod; 140 - PF0 - ATTN prod; 141 - PF1 - ATTN prod;
869 * 142 - PF2 - ATTN prod; 143 - PF3 - ATTN prod; 144-147 reserved. E1.5 mode
870 * - In backward compatible mode; for non default SB; each even line in the
871 * memory holds the U producer and each odd line hold the C producer. The
872 * first 128 producer are for NDSB (PF0 - 0-31; PF1 - 32-63 and so on). The
873 * last 20 producers are for the DSB for each PF. each PF has five segments
874 * (the order inside each segment is PF0; PF1; PF2; PF3) - 128-131 U prods;
875 * 132-135 C prods; 136-139 X prods; 140-143 T prods; 144-147 ATTN prods; */
876#define IGU_REG_PROD_CONS_MEMORY 0x132000
877/* [R 3] Debug: pxp_arb_fsm */
878#define IGU_REG_PXP_ARB_FSM 0x130068
879/* [RW 6] Write one for each bit will reset the appropriate memory. When the
880 * memory reset finished the appropriate bit will be clear. Bit 0 - mapping
881 * memory; Bit 1 - SB memory; Bit 2 - SB interrupt and mask register; Bit 3
882 * - MSIX memory; Bit 4 - PBA memory; Bit 5 - statistics; */
883#define IGU_REG_RESET_MEMORIES 0x130158
884/* [R 4] Debug: sb_ctrl_fsm */
885#define IGU_REG_SB_CTRL_FSM 0x13004c
886#define IGU_REG_SB_INT_BEFORE_MASK_LSB 0x13015c
887#define IGU_REG_SB_INT_BEFORE_MASK_MSB 0x130160
888#define IGU_REG_SB_MASK_LSB 0x130164
889#define IGU_REG_SB_MASK_MSB 0x130168
890/* [RW 16] Number of command that were dropped without causing an interrupt
891 * due to: read access for WO BAR address; or write access for RO BAR
892 * address or any access for reserved address or PCI function error is set
893 * and address is not MSIX; PBA or cleanup */
894#define IGU_REG_SILENT_DROP 0x13016c
895/* [RW 10] Number of MSI/MSIX/ATTN messages sent for the function: 0-63 -
896 * number of MSIX messages per VF; 64-67 - number of MSI/MSIX messages per
897 * PF; 68-71 number of ATTN messages per PF */
898#define IGU_REG_STATISTIC_NUM_MESSAGE_SENT 0x130800
899/* [RW 32] Number of cycles the timer mask masking the IGU interrupt when a
900 * timer mask command arrives. Value must be bigger than 100. */
901#define IGU_REG_TIMER_MASKING_VALUE 0x13003c
902#define IGU_REG_TRAILING_EDGE_LATCH 0x130104
903#define IGU_REG_VF_CONFIGURATION 0x130170
904/* [WB_R 32] Each bit represent write done pending bits status for that SB
905 * (MSI/MSIX message was sent and write done was not received yet). 0 =
906 * clear; 1 = set. Data valid only in addresses 0-4. all the rest are zero. */
907#define IGU_REG_WRITE_DONE_PENDING 0x130480
908#define MCP_A_REG_MCPR_SCRATCH 0x3a0000
761#define MCP_REG_MCPR_NVM_ACCESS_ENABLE 0x86424 909#define MCP_REG_MCPR_NVM_ACCESS_ENABLE 0x86424
762#define MCP_REG_MCPR_NVM_ADDR 0x8640c 910#define MCP_REG_MCPR_NVM_ADDR 0x8640c
763#define MCP_REG_MCPR_NVM_CFG4 0x8642c 911#define MCP_REG_MCPR_NVM_CFG4 0x8642c
@@ -880,6 +1028,11 @@
880 rom_parity; [29] MCP Latched ump_rx_parity; [30] MCP Latched 1028 rom_parity; [29] MCP Latched ump_rx_parity; [30] MCP Latched
881 ump_tx_parity; [31] MCP Latched scpad_parity; */ 1029 ump_tx_parity; [31] MCP Latched scpad_parity; */
882#define MISC_REG_AEU_AFTER_INVERT_4_MCP 0xa458 1030#define MISC_REG_AEU_AFTER_INVERT_4_MCP 0xa458
1031/* [R 32] Read fifth 32 bit after inversion of function 0. Mapped as
1032 * follows: [0] PGLUE config_space; [1] PGLUE misc_flr; [2] PGLUE B RBC
1033 * attention [3] PGLUE B RBC parity; [4] ATC attention; [5] ATC parity; [6]
1034 * CNIG attention (reserved); [7] CNIG parity (reserved); [31-8] Reserved; */
1035#define MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 0xa700
883/* [W 14] write to this register results with the clear of the latched 1036/* [W 14] write to this register results with the clear of the latched
884 signals; one in d0 clears RBCR latch; one in d1 clears RBCT latch; one in 1037 signals; one in d0 clears RBCR latch; one in d1 clears RBCT latch; one in
885 d2 clears RBCN latch; one in d3 clears RBCU latch; one in d4 clears RBCP 1038 d2 clears RBCN latch; one in d3 clears RBCU latch; one in d4 clears RBCP
@@ -1251,6 +1404,7 @@
1251#define MISC_REG_E1HMF_MODE 0xa5f8 1404#define MISC_REG_E1HMF_MODE 0xa5f8
1252/* [RW 32] Debug only: spare RW register reset by core reset */ 1405/* [RW 32] Debug only: spare RW register reset by core reset */
1253#define MISC_REG_GENERIC_CR_0 0xa460 1406#define MISC_REG_GENERIC_CR_0 0xa460
1407#define MISC_REG_GENERIC_CR_1 0xa464
1254/* [RW 32] Debug only: spare RW register reset by por reset */ 1408/* [RW 32] Debug only: spare RW register reset by por reset */
1255#define MISC_REG_GENERIC_POR_1 0xa474 1409#define MISC_REG_GENERIC_POR_1 0xa474
1256/* [RW 32] GPIO. [31-28] FLOAT port 0; [27-24] FLOAT port 0; When any of 1410/* [RW 32] GPIO. [31-28] FLOAT port 0; [27-24] FLOAT port 0; When any of
@@ -1373,6 +1527,14 @@
1373#define MISC_REG_PLL_STORM_CTRL_2 0xa298 1527#define MISC_REG_PLL_STORM_CTRL_2 0xa298
1374#define MISC_REG_PLL_STORM_CTRL_3 0xa29c 1528#define MISC_REG_PLL_STORM_CTRL_3 0xa29c
1375#define MISC_REG_PLL_STORM_CTRL_4 0xa2a0 1529#define MISC_REG_PLL_STORM_CTRL_4 0xa2a0
1530/* [R 1] Status of 4 port mode enable input pin. */
1531#define MISC_REG_PORT4MODE_EN 0xa750
1532/* [RW 2] 4 port mode enable overwrite.[0] - Overwrite control; if it is 0 -
1533 * the port4mode_en output is equal to 4 port mode input pin; if it is 1 -
1534 * the port4mode_en output is equal to bit[1] of this register; [1] -
1535 * Overwrite value. If bit[0] of this register is 1 this is the value that
1536 * receives the port4mode_en output . */
1537#define MISC_REG_PORT4MODE_EN_OVWR 0xa720
1376/* [RW 32] reset reg#2; rite/read one = the specific block is out of reset; 1538/* [RW 32] reset reg#2; rite/read one = the specific block is out of reset;
1377 write/read zero = the specific block is in reset; addr 0-wr- the write 1539 write/read zero = the specific block is in reset; addr 0-wr- the write
1378 value will be written to the register; addr 1-set - one will be written 1540 value will be written to the register; addr 1-set - one will be written
@@ -1656,8 +1818,91 @@
1656/* [R 32] Interrupt register #0 read */ 1818/* [R 32] Interrupt register #0 read */
1657#define NIG_REG_NIG_INT_STS_0 0x103b0 1819#define NIG_REG_NIG_INT_STS_0 0x103b0
1658#define NIG_REG_NIG_INT_STS_1 0x103c0 1820#define NIG_REG_NIG_INT_STS_1 0x103c0
1659/* [R 32] Parity register #0 read */ 1821/* [R 32] Legacy E1 and E1H location for parity error status register. */
1660#define NIG_REG_NIG_PRTY_STS 0x103d0 1822#define NIG_REG_NIG_PRTY_STS 0x103d0
1823/* [R 32] Parity register #0 read */
1824#define NIG_REG_NIG_PRTY_STS_0 0x183bc
1825#define NIG_REG_NIG_PRTY_STS_1 0x183cc
1826/* [RW 6] Bit-map indicating which L2 hdrs may appear after the basic
1827 * Ethernet header. */
1828#define NIG_REG_P0_HDRS_AFTER_BASIC 0x18038
1829/* [RW 1] HW PFC enable bit. Set this bit to enable the PFC functionality in
1830 * the NIG. Other flow control modes such as PAUSE and SAFC/LLFC should be
1831 * disabled when this bit is set. */
1832#define NIG_REG_P0_HWPFC_ENABLE 0x18078
1833#define NIG_REG_P0_LLH_FUNC_MEM2 0x18480
1834#define NIG_REG_P0_LLH_FUNC_MEM2_ENABLE 0x18440
1835/* [RW 32] Eight 4-bit configurations for specifying which COS (0-15 for
1836 * future expansion) each priorty is to be mapped to. Bits 3:0 specify the
1837 * COS for priority 0. Bits 31:28 specify the COS for priority 7. The 3-bit
1838 * priority field is extracted from the outer-most VLAN in receive packet.
1839 * Only COS 0 and COS 1 are supported in E2. */
1840#define NIG_REG_P0_PKT_PRIORITY_TO_COS 0x18054
1841/* [RW 16] Bit-map indicating which SAFC/PFC priorities to map to COS 0. A
1842 * priority is mapped to COS 0 when the corresponding mask bit is 1. More
1843 * than one bit may be set; allowing multiple priorities to be mapped to one
1844 * COS. */
1845#define NIG_REG_P0_RX_COS0_PRIORITY_MASK 0x18058
1846/* [RW 16] Bit-map indicating which SAFC/PFC priorities to map to COS 1. A
1847 * priority is mapped to COS 1 when the corresponding mask bit is 1. More
1848 * than one bit may be set; allowing multiple priorities to be mapped to one
1849 * COS. */
1850#define NIG_REG_P0_RX_COS1_PRIORITY_MASK 0x1805c
1851/* [RW 15] Specify which of the credit registers the client is to be mapped
1852 * to. Bits[2:0] are for client 0; bits [14:12] are for client 4. For
1853 * clients that are not subject to WFQ credit blocking - their
1854 * specifications here are not used. */
1855#define NIG_REG_P0_TX_ARB_CLIENT_CREDIT_MAP 0x180f0
1856/* [RW 5] Specify whether the client competes directly in the strict
1857 * priority arbiter. The bits are mapped according to client ID (client IDs
1858 * are defined in tx_arb_priority_client). Default value is set to enable
1859 * strict priorities for clients 0-2 -- management and debug traffic. */
1860#define NIG_REG_P0_TX_ARB_CLIENT_IS_STRICT 0x180e8
1861/* [RW 5] Specify whether the client is subject to WFQ credit blocking. The
1862 * bits are mapped according to client ID (client IDs are defined in
1863 * tx_arb_priority_client). Default value is 0 for not using WFQ credit
1864 * blocking. */
1865#define NIG_REG_P0_TX_ARB_CLIENT_IS_SUBJECT2WFQ 0x180ec
1866/* [RW 32] Specify the upper bound that credit register 0 is allowed to
1867 * reach. */
1868#define NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_0 0x1810c
1869#define NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_1 0x18110
1870/* [RW 32] Specify the weight (in bytes) to be added to credit register 0
1871 * when it is time to increment. */
1872#define NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_0 0x180f8
1873#define NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_1 0x180fc
1874/* [RW 12] Specify the number of strict priority arbitration slots between
1875 * two round-robin arbitration slots to avoid starvation. A value of 0 means
1876 * no strict priority cycles - the strict priority with anti-starvation
1877 * arbiter becomes a round-robin arbiter. */
1878#define NIG_REG_P0_TX_ARB_NUM_STRICT_ARB_SLOTS 0x180f4
1879/* [RW 15] Specify the client number to be assigned to each priority of the
1880 * strict priority arbiter. Priority 0 is the highest priority. Bits [2:0]
1881 * are for priority 0 client; bits [14:12] are for priority 4 client. The
1882 * clients are assigned the following IDs: 0-management; 1-debug traffic
1883 * from this port; 2-debug traffic from other port; 3-COS0 traffic; 4-COS1
1884 * traffic. The reset value[14:0] is set to 0x4688 (15'b100_011_010_001_000)
1885 * for management at priority 0; debug traffic at priorities 1 and 2; COS0
1886 * traffic at priority 3; and COS1 traffic at priority 4. */
1887#define NIG_REG_P0_TX_ARB_PRIORITY_CLIENT 0x180e4
1888#define NIG_REG_P1_LLH_FUNC_MEM2 0x184c0
1889#define NIG_REG_P1_LLH_FUNC_MEM2_ENABLE 0x18460
1890/* [RW 32] Eight 4-bit configurations for specifying which COS (0-15 for
1891 * future expansion) each priorty is to be mapped to. Bits 3:0 specify the
1892 * COS for priority 0. Bits 31:28 specify the COS for priority 7. The 3-bit
1893 * priority field is extracted from the outer-most VLAN in receive packet.
1894 * Only COS 0 and COS 1 are supported in E2. */
1895#define NIG_REG_P1_PKT_PRIORITY_TO_COS 0x181a8
1896/* [RW 16] Bit-map indicating which SAFC/PFC priorities to map to COS 0. A
1897 * priority is mapped to COS 0 when the corresponding mask bit is 1. More
1898 * than one bit may be set; allowing multiple priorities to be mapped to one
1899 * COS. */
1900#define NIG_REG_P1_RX_COS0_PRIORITY_MASK 0x181ac
1901/* [RW 16] Bit-map indicating which SAFC/PFC priorities to map to COS 1. A
1902 * priority is mapped to COS 1 when the corresponding mask bit is 1. More
1903 * than one bit may be set; allowing multiple priorities to be mapped to one
1904 * COS. */
1905#define NIG_REG_P1_RX_COS1_PRIORITY_MASK 0x181b0
1661/* [RW 1] Pause enable for port0. This register may get 1 only when 1906/* [RW 1] Pause enable for port0. This register may get 1 only when
1662 ~safc_enable.safc_enable = 0 and ppp_enable.ppp_enable =0 for the same 1907 ~safc_enable.safc_enable = 0 and ppp_enable.ppp_enable =0 for the same
1663 port */ 1908 port */
@@ -1742,6 +1987,10 @@
1742/* [RW 1] Disable processing further tasks from port 4 (after ending the 1987/* [RW 1] Disable processing further tasks from port 4 (after ending the
1743 current task in process). */ 1988 current task in process). */
1744#define PBF_REG_DISABLE_NEW_TASK_PROC_P4 0x14006c 1989#define PBF_REG_DISABLE_NEW_TASK_PROC_P4 0x14006c
1990#define PBF_REG_DISABLE_PF 0x1402e8
1991/* [RW 6] Bit-map indicating which L2 hdrs may appear after the basic
1992 * Ethernet header. */
1993#define PBF_REG_HDRS_AFTER_BASIC 0x15c0a8
1745#define PBF_REG_IF_ENABLE_REG 0x140044 1994#define PBF_REG_IF_ENABLE_REG 0x140044
1746/* [RW 1] Init bit. When set the initial credits are copied to the credit 1995/* [RW 1] Init bit. When set the initial credits are copied to the credit
1747 registers (except the port credits). Should be set and then reset after 1996 registers (except the port credits). Should be set and then reset after
@@ -1765,6 +2014,8 @@
1765#define PBF_REG_MAC_IF1_ENABLE 0x140034 2014#define PBF_REG_MAC_IF1_ENABLE 0x140034
1766/* [RW 1] Enable for the loopback interface. */ 2015/* [RW 1] Enable for the loopback interface. */
1767#define PBF_REG_MAC_LB_ENABLE 0x140040 2016#define PBF_REG_MAC_LB_ENABLE 0x140040
2017/* [RW 6] Bit-map indicating which headers must appear in the packet */
2018#define PBF_REG_MUST_HAVE_HDRS 0x15c0c4
1768/* [RW 10] Port 0 threshold used by arbiter in 16 byte lines used when pause 2019/* [RW 10] Port 0 threshold used by arbiter in 16 byte lines used when pause
1769 not suppoterd. */ 2020 not suppoterd. */
1770#define PBF_REG_P0_ARB_THRSH 0x1400e4 2021#define PBF_REG_P0_ARB_THRSH 0x1400e4
@@ -1804,6 +2055,259 @@
1804#define PB_REG_PB_PRTY_MASK 0x38 2055#define PB_REG_PB_PRTY_MASK 0x38
1805/* [R 4] Parity register #0 read */ 2056/* [R 4] Parity register #0 read */
1806#define PB_REG_PB_PRTY_STS 0x2c 2057#define PB_REG_PB_PRTY_STS 0x2c
2058#define PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR (0x1<<0)
2059#define PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW (0x1<<8)
2060#define PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR (0x1<<1)
2061#define PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN (0x1<<6)
2062#define PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN (0x1<<7)
2063#define PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN (0x1<<4)
2064#define PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN (0x1<<3)
2065#define PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN (0x1<<5)
2066#define PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN (0x1<<2)
2067/* [R 8] Config space A attention dirty bits. Each bit indicates that the
2068 * corresponding PF generates config space A attention. Set by PXP. Reset by
2069 * MCP writing 1 to icfg_space_a_request_clr. Note: register contains bits
2070 * from both paths. */
2071#define PGLUE_B_REG_CFG_SPACE_A_REQUEST 0x9010
2072/* [R 8] Config space B attention dirty bits. Each bit indicates that the
2073 * corresponding PF generates config space B attention. Set by PXP. Reset by
2074 * MCP writing 1 to icfg_space_b_request_clr. Note: register contains bits
2075 * from both paths. */
2076#define PGLUE_B_REG_CFG_SPACE_B_REQUEST 0x9014
2077/* [RW 1] Type A PF enable inbound interrupt table for CSDM. 0 - disable; 1
2078 * - enable. */
2079#define PGLUE_B_REG_CSDM_INB_INT_A_PF_ENABLE 0x9194
2080/* [RW 18] Type B VF inbound interrupt table for CSDM: bits[17:9]-mask;
2081 * its[8:0]-address. Bits [1:0] must be zero (DW resolution address). */
2082#define PGLUE_B_REG_CSDM_INB_INT_B_VF 0x916c
2083/* [RW 1] Type B VF enable inbound interrupt table for CSDM. 0 - disable; 1
2084 * - enable. */
2085#define PGLUE_B_REG_CSDM_INB_INT_B_VF_ENABLE 0x919c
2086/* [RW 16] Start offset of CSDM zone A (queue zone) in the internal RAM */
2087#define PGLUE_B_REG_CSDM_START_OFFSET_A 0x9100
2088/* [RW 16] Start offset of CSDM zone B (legacy zone) in the internal RAM */
2089#define PGLUE_B_REG_CSDM_START_OFFSET_B 0x9108
2090/* [RW 5] VF Shift of CSDM zone B (legacy zone) in the internal RAM */
2091#define PGLUE_B_REG_CSDM_VF_SHIFT_B 0x9110
2092/* [RW 1] 0 - Zone A size is 136x32B; 1 - Zone A size is 152x32B. */
2093#define PGLUE_B_REG_CSDM_ZONE_A_SIZE_PF 0x91ac
2094/* [R 8] FLR request attention dirty bits for PFs 0 to 7. Each bit indicates
2095 * that the FLR register of the corresponding PF was set. Set by PXP. Reset
2096 * by MCP writing 1 to flr_request_pf_7_0_clr. Note: register contains bits
2097 * from both paths. */
2098#define PGLUE_B_REG_FLR_REQUEST_PF_7_0 0x9028
2099/* [W 8] FLR request attention dirty bits clear for PFs 0 to 7. MCP writes 1
2100 * to a bit in this register in order to clear the corresponding bit in
2101 * flr_request_pf_7_0 register. Note: register contains bits from both
2102 * paths. */
2103#define PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR 0x9418
2104/* [R 32] FLR request attention dirty bits for VFs 96 to 127. Each bit
2105 * indicates that the FLR register of the corresponding VF was set. Set by
2106 * PXP. Reset by MCP writing 1 to flr_request_vf_127_96_clr. */
2107#define PGLUE_B_REG_FLR_REQUEST_VF_127_96 0x9024
2108/* [R 32] FLR request attention dirty bits for VFs 0 to 31. Each bit
2109 * indicates that the FLR register of the corresponding VF was set. Set by
2110 * PXP. Reset by MCP writing 1 to flr_request_vf_31_0_clr. */
2111#define PGLUE_B_REG_FLR_REQUEST_VF_31_0 0x9018
2112/* [R 32] FLR request attention dirty bits for VFs 32 to 63. Each bit
2113 * indicates that the FLR register of the corresponding VF was set. Set by
2114 * PXP. Reset by MCP writing 1 to flr_request_vf_63_32_clr. */
2115#define PGLUE_B_REG_FLR_REQUEST_VF_63_32 0x901c
2116/* [R 32] FLR request attention dirty bits for VFs 64 to 95. Each bit
2117 * indicates that the FLR register of the corresponding VF was set. Set by
2118 * PXP. Reset by MCP writing 1 to flr_request_vf_95_64_clr. */
2119#define PGLUE_B_REG_FLR_REQUEST_VF_95_64 0x9020
2120/* [R 8] Each bit indicates an incorrect behavior in user RX interface. Bit
2121 * 0 - Target memory read arrived with a correctable error. Bit 1 - Target
2122 * memory read arrived with an uncorrectable error. Bit 2 - Configuration RW
2123 * arrived with a correctable error. Bit 3 - Configuration RW arrived with
2124 * an uncorrectable error. Bit 4 - Completion with Configuration Request
2125 * Retry Status. Bit 5 - Expansion ROM access received with a write request.
2126 * Bit 6 - Completion with pcie_rx_err of 0000; CMPL_STATUS of non-zero; and
2127 * pcie_rx_last not asserted. Bit 7 - Completion with pcie_rx_err of 1010;
2128 * and pcie_rx_last not asserted. */
2129#define PGLUE_B_REG_INCORRECT_RCV_DETAILS 0x9068
2130#define PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER 0x942c
2131#define PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ 0x9430
2132#define PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_WRITE 0x9434
2133#define PGLUE_B_REG_INTERNAL_VFID_ENABLE 0x9438
2134/* [R 9] Interrupt register #0 read */
2135#define PGLUE_B_REG_PGLUE_B_INT_STS 0x9298
2136/* [RC 9] Interrupt register #0 read clear */
2137#define PGLUE_B_REG_PGLUE_B_INT_STS_CLR 0x929c
2138/* [R 2] Parity register #0 read */
2139#define PGLUE_B_REG_PGLUE_B_PRTY_STS 0x92a8
2140/* [R 13] Details of first request received with error. [2:0] - PFID. [3] -
2141 * VF_VALID. [9:4] - VFID. [11:10] - Error Code - 0 - Indicates Completion
2142 * Timeout of a User Tx non-posted request. 1 - unsupported request. 2 -
2143 * completer abort. 3 - Illegal value for this field. [12] valid - indicates
2144 * if there was a completion error since the last time this register was
2145 * cleared. */
2146#define PGLUE_B_REG_RX_ERR_DETAILS 0x9080
2147/* [R 18] Details of first ATS Translation Completion request received with
2148 * error. [2:0] - PFID. [3] - VF_VALID. [9:4] - VFID. [11:10] - Error Code -
2149 * 0 - Indicates Completion Timeout of a User Tx non-posted request. 1 -
2150 * unsupported request. 2 - completer abort. 3 - Illegal value for this
2151 * field. [16:12] - ATC OTB EntryID. [17] valid - indicates if there was a
2152 * completion error since the last time this register was cleared. */
2153#define PGLUE_B_REG_RX_TCPL_ERR_DETAILS 0x9084
2154/* [W 8] Debug only - Shadow BME bits clear for PFs 0 to 7. MCP writes 1 to
2155 * a bit in this register in order to clear the corresponding bit in
2156 * shadow_bme_pf_7_0 register. MCP should never use this unless a
2157 * work-around is needed. Note: register contains bits from both paths. */
2158#define PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR 0x9458
2159/* [R 8] SR IOV disabled attention dirty bits. Each bit indicates that the
2160 * VF enable register of the corresponding PF is written to 0 and was
2161 * previously 1. Set by PXP. Reset by MCP writing 1 to
2162 * sr_iov_disabled_request_clr. Note: register contains bits from both
2163 * paths. */
2164#define PGLUE_B_REG_SR_IOV_DISABLED_REQUEST 0x9030
2165/* [R 32] Indicates the status of tags 32-63. 0 - tags is used - read
2166 * completion did not return yet. 1 - tag is unused. Same functionality as
2167 * pxp2_registers_pgl_exp_rom_data2 for tags 0-31. */
2168#define PGLUE_B_REG_TAGS_63_32 0x9244
2169/* [RW 1] Type A PF enable inbound interrupt table for TSDM. 0 - disable; 1
2170 * - enable. */
2171#define PGLUE_B_REG_TSDM_INB_INT_A_PF_ENABLE 0x9170
2172/* [RW 16] Start offset of TSDM zone A (queue zone) in the internal RAM */
2173#define PGLUE_B_REG_TSDM_START_OFFSET_A 0x90c4
2174/* [RW 16] Start offset of TSDM zone B (legacy zone) in the internal RAM */
2175#define PGLUE_B_REG_TSDM_START_OFFSET_B 0x90cc
2176/* [RW 5] VF Shift of TSDM zone B (legacy zone) in the internal RAM */
2177#define PGLUE_B_REG_TSDM_VF_SHIFT_B 0x90d4
2178/* [RW 1] 0 - Zone A size is 136x32B; 1 - Zone A size is 152x32B. */
2179#define PGLUE_B_REG_TSDM_ZONE_A_SIZE_PF 0x91a0
2180/* [R 32] Address [31:0] of first read request not submitted due to error */
2181#define PGLUE_B_REG_TX_ERR_RD_ADD_31_0 0x9098
2182/* [R 32] Address [63:32] of first read request not submitted due to error */
2183#define PGLUE_B_REG_TX_ERR_RD_ADD_63_32 0x909c
2184/* [R 31] Details of first read request not submitted due to error. [4:0]
2185 * VQID. [5] TREQ. 1 - Indicates the request is a Translation Request.
2186 * [20:8] - Length in bytes. [23:21] - PFID. [24] - VF_VALID. [30:25] -
2187 * VFID. */
2188#define PGLUE_B_REG_TX_ERR_RD_DETAILS 0x90a0
2189/* [R 26] Details of first read request not submitted due to error. [15:0]
2190 * Request ID. [19:16] client ID. [20] - last SR. [24:21] - Error type -
2191 * [21] - Indicates was_error was set; [22] - Indicates BME was cleared;
2192 * [23] - Indicates FID_enable was cleared; [24] - Indicates VF with parent
2193 * PF FLR_request or IOV_disable_request dirty bit is set. [25] valid -
2194 * indicates if there was a request not submitted due to error since the
2195 * last time this register was cleared. */
2196#define PGLUE_B_REG_TX_ERR_RD_DETAILS2 0x90a4
2197/* [R 32] Address [31:0] of first write request not submitted due to error */
2198#define PGLUE_B_REG_TX_ERR_WR_ADD_31_0 0x9088
2199/* [R 32] Address [63:32] of first write request not submitted due to error */
2200#define PGLUE_B_REG_TX_ERR_WR_ADD_63_32 0x908c
2201/* [R 31] Details of first write request not submitted due to error. [4:0]
2202 * VQID. [20:8] - Length in bytes. [23:21] - PFID. [24] - VF_VALID. [30:25]
2203 * - VFID. */
2204#define PGLUE_B_REG_TX_ERR_WR_DETAILS 0x9090
2205/* [R 26] Details of first write request not submitted due to error. [15:0]
2206 * Request ID. [19:16] client ID. [20] - last SR. [24:21] - Error type -
2207 * [21] - Indicates was_error was set; [22] - Indicates BME was cleared;
2208 * [23] - Indicates FID_enable was cleared; [24] - Indicates VF with parent
2209 * PF FLR_request or IOV_disable_request dirty bit is set. [25] valid -
2210 * indicates if there was a request not submitted due to error since the
2211 * last time this register was cleared. */
2212#define PGLUE_B_REG_TX_ERR_WR_DETAILS2 0x9094
2213/* [RW 10] Type A PF/VF inbound interrupt table for USDM: bits[9:5]-mask;
2214 * its[4:0]-address relative to start_offset_a. Bits [1:0] can have any
2215 * value (Byte resolution address). */
2216#define PGLUE_B_REG_USDM_INB_INT_A_0 0x9128
2217#define PGLUE_B_REG_USDM_INB_INT_A_1 0x912c
2218#define PGLUE_B_REG_USDM_INB_INT_A_2 0x9130
2219#define PGLUE_B_REG_USDM_INB_INT_A_3 0x9134
2220#define PGLUE_B_REG_USDM_INB_INT_A_4 0x9138
2221#define PGLUE_B_REG_USDM_INB_INT_A_5 0x913c
2222#define PGLUE_B_REG_USDM_INB_INT_A_6 0x9140
2223/* [RW 1] Type A PF enable inbound interrupt table for USDM. 0 - disable; 1
2224 * - enable. */
2225#define PGLUE_B_REG_USDM_INB_INT_A_PF_ENABLE 0x917c
2226/* [RW 1] Type A VF enable inbound interrupt table for USDM. 0 - disable; 1
2227 * - enable. */
2228#define PGLUE_B_REG_USDM_INB_INT_A_VF_ENABLE 0x9180
2229/* [RW 1] Type B VF enable inbound interrupt table for USDM. 0 - disable; 1
2230 * - enable. */
2231#define PGLUE_B_REG_USDM_INB_INT_B_VF_ENABLE 0x9184
2232/* [RW 16] Start offset of USDM zone A (queue zone) in the internal RAM */
2233#define PGLUE_B_REG_USDM_START_OFFSET_A 0x90d8
2234/* [RW 16] Start offset of USDM zone B (legacy zone) in the internal RAM */
2235#define PGLUE_B_REG_USDM_START_OFFSET_B 0x90e0
2236/* [RW 5] VF Shift of USDM zone B (legacy zone) in the internal RAM */
2237#define PGLUE_B_REG_USDM_VF_SHIFT_B 0x90e8
2238/* [RW 1] 0 - Zone A size is 136x32B; 1 - Zone A size is 152x32B. */
2239#define PGLUE_B_REG_USDM_ZONE_A_SIZE_PF 0x91a4
2240/* [R 26] Details of first target VF request accessing VF GRC space that
2241 * failed permission check. [14:0] Address. [15] w_nr: 0 - Read; 1 - Write.
2242 * [21:16] VFID. [24:22] - PFID. [25] valid - indicates if there was a
2243 * request accessing VF GRC space that failed permission check since the
2244 * last time this register was cleared. Permission checks are: function
2245 * permission; R/W permission; address range permission. */
2246#define PGLUE_B_REG_VF_GRC_SPACE_VIOLATION_DETAILS 0x9234
2247/* [R 31] Details of first target VF request with length violation (too many
2248 * DWs) accessing BAR0. [12:0] Address in DWs (bits [14:2] of byte address).
2249 * [14:13] BAR. [20:15] VFID. [23:21] - PFID. [29:24] - Length in DWs. [30]
2250 * valid - indicates if there was a request with length violation since the
2251 * last time this register was cleared. Length violations: length of more
2252 * than 2DWs; length of 2DWs and address not QW aligned; window is GRC and
2253 * length is more than 1 DW. */
2254#define PGLUE_B_REG_VF_LENGTH_VIOLATION_DETAILS 0x9230
2255/* [R 8] Was_error indication dirty bits for PFs 0 to 7. Each bit indicates
2256 * that there was a completion with uncorrectable error for the
2257 * corresponding PF. Set by PXP. Reset by MCP writing 1 to
2258 * was_error_pf_7_0_clr. */
2259#define PGLUE_B_REG_WAS_ERROR_PF_7_0 0x907c
2260/* [W 8] Was_error indication dirty bits clear for PFs 0 to 7. MCP writes 1
2261 * to a bit in this register in order to clear the corresponding bit in
2262 * flr_request_pf_7_0 register. */
2263#define PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR 0x9470
2264/* [R 32] Was_error indication dirty bits for VFs 96 to 127. Each bit
2265 * indicates that there was a completion with uncorrectable error for the
2266 * corresponding VF. Set by PXP. Reset by MCP writing 1 to
2267 * was_error_vf_127_96_clr. */
2268#define PGLUE_B_REG_WAS_ERROR_VF_127_96 0x9078
2269/* [W 32] Was_error indication dirty bits clear for VFs 96 to 127. MCP
2270 * writes 1 to a bit in this register in order to clear the corresponding
2271 * bit in was_error_vf_127_96 register. */
2272#define PGLUE_B_REG_WAS_ERROR_VF_127_96_CLR 0x9474
2273/* [R 32] Was_error indication dirty bits for VFs 0 to 31. Each bit
2274 * indicates that there was a completion with uncorrectable error for the
2275 * corresponding VF. Set by PXP. Reset by MCP writing 1 to
2276 * was_error_vf_31_0_clr. */
2277#define PGLUE_B_REG_WAS_ERROR_VF_31_0 0x906c
2278/* [W 32] Was_error indication dirty bits clear for VFs 0 to 31. MCP writes
2279 * 1 to a bit in this register in order to clear the corresponding bit in
2280 * was_error_vf_31_0 register. */
2281#define PGLUE_B_REG_WAS_ERROR_VF_31_0_CLR 0x9478
2282/* [R 32] Was_error indication dirty bits for VFs 32 to 63. Each bit
2283 * indicates that there was a completion with uncorrectable error for the
2284 * corresponding VF. Set by PXP. Reset by MCP writing 1 to
2285 * was_error_vf_63_32_clr. */
2286#define PGLUE_B_REG_WAS_ERROR_VF_63_32 0x9070
2287/* [W 32] Was_error indication dirty bits clear for VFs 32 to 63. MCP writes
2288 * 1 to a bit in this register in order to clear the corresponding bit in
2289 * was_error_vf_63_32 register. */
2290#define PGLUE_B_REG_WAS_ERROR_VF_63_32_CLR 0x947c
2291/* [R 32] Was_error indication dirty bits for VFs 64 to 95. Each bit
2292 * indicates that there was a completion with uncorrectable error for the
2293 * corresponding VF. Set by PXP. Reset by MCP writing 1 to
2294 * was_error_vf_95_64_clr. */
2295#define PGLUE_B_REG_WAS_ERROR_VF_95_64 0x9074
2296/* [W 32] Was_error indication dirty bits clear for VFs 64 to 95. MCP writes
2297 * 1 to a bit in this register in order to clear the corresponding bit in
2298 * was_error_vf_95_64 register. */
2299#define PGLUE_B_REG_WAS_ERROR_VF_95_64_CLR 0x9480
2300/* [RW 1] Type A PF enable inbound interrupt table for XSDM. 0 - disable; 1
2301 * - enable. */
2302#define PGLUE_B_REG_XSDM_INB_INT_A_PF_ENABLE 0x9188
2303/* [RW 16] Start offset of XSDM zone A (queue zone) in the internal RAM */
2304#define PGLUE_B_REG_XSDM_START_OFFSET_A 0x90ec
2305/* [RW 16] Start offset of XSDM zone B (legacy zone) in the internal RAM */
2306#define PGLUE_B_REG_XSDM_START_OFFSET_B 0x90f4
2307/* [RW 5] VF Shift of XSDM zone B (legacy zone) in the internal RAM */
2308#define PGLUE_B_REG_XSDM_VF_SHIFT_B 0x90fc
2309/* [RW 1] 0 - Zone A size is 136x32B; 1 - Zone A size is 152x32B. */
2310#define PGLUE_B_REG_XSDM_ZONE_A_SIZE_PF 0x91a8
1807#define PRS_REG_A_PRSU_20 0x40134 2311#define PRS_REG_A_PRSU_20 0x40134
1808/* [R 8] debug only: CFC load request current credit. Transaction based. */ 2312/* [R 8] debug only: CFC load request current credit. Transaction based. */
1809#define PRS_REG_CFC_LD_CURRENT_CREDIT 0x40164 2313#define PRS_REG_CFC_LD_CURRENT_CREDIT 0x40164
@@ -1866,9 +2370,13 @@
1866#define PRS_REG_FLUSH_REGIONS_TYPE_5 0x40018 2370#define PRS_REG_FLUSH_REGIONS_TYPE_5 0x40018
1867#define PRS_REG_FLUSH_REGIONS_TYPE_6 0x4001c 2371#define PRS_REG_FLUSH_REGIONS_TYPE_6 0x4001c
1868#define PRS_REG_FLUSH_REGIONS_TYPE_7 0x40020 2372#define PRS_REG_FLUSH_REGIONS_TYPE_7 0x40020
2373/* [RW 6] Bit-map indicating which L2 hdrs may appear after the basic
2374 * Ethernet header. */
2375#define PRS_REG_HDRS_AFTER_BASIC 0x40238
1869/* [RW 4] The increment value to send in the CFC load request message */ 2376/* [RW 4] The increment value to send in the CFC load request message */
1870#define PRS_REG_INC_VALUE 0x40048 2377#define PRS_REG_INC_VALUE 0x40048
1871/* [RW 1] If set indicates not to send messages to CFC on received packets */ 2378/* [RW 6] Bit-map indicating which headers must appear in the packet */
2379#define PRS_REG_MUST_HAVE_HDRS 0x40254
1872#define PRS_REG_NIC_MODE 0x40138 2380#define PRS_REG_NIC_MODE 0x40138
1873/* [RW 8] The 8-bit event ID for cases where there is no match on the 2381/* [RW 8] The 8-bit event ID for cases where there is no match on the
1874 connection. Used in packet start message to TCM. */ 2382 connection. Used in packet start message to TCM. */
@@ -1919,6 +2427,13 @@
1919#define PRS_REG_TCM_CURRENT_CREDIT 0x40160 2427#define PRS_REG_TCM_CURRENT_CREDIT 0x40160
1920/* [R 8] debug only: TSDM current credit. Transaction based. */ 2428/* [R 8] debug only: TSDM current credit. Transaction based. */
1921#define PRS_REG_TSDM_CURRENT_CREDIT 0x4015c 2429#define PRS_REG_TSDM_CURRENT_CREDIT 0x4015c
2430#define PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_AFT (0x1<<19)
2431#define PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_OF (0x1<<20)
2432#define PXP2_PXP2_INT_MASK_0_REG_PGL_PCIE_ATTN (0x1<<22)
2433#define PXP2_PXP2_INT_MASK_0_REG_PGL_READ_BLOCKED (0x1<<23)
2434#define PXP2_PXP2_INT_MASK_0_REG_PGL_WRITE_BLOCKED (0x1<<24)
2435#define PXP2_PXP2_INT_STS_0_REG_WR_PGLUE_EOP_ERROR (0x1<<7)
2436#define PXP2_PXP2_INT_STS_CLR_0_REG_WR_PGLUE_EOP_ERROR (0x1<<7)
1922/* [R 6] Debug only: Number of used entries in the data FIFO */ 2437/* [R 6] Debug only: Number of used entries in the data FIFO */
1923#define PXP2_REG_HST_DATA_FIFO_STATUS 0x12047c 2438#define PXP2_REG_HST_DATA_FIFO_STATUS 0x12047c
1924/* [R 7] Debug only: Number of used entries in the header FIFO */ 2439/* [R 7] Debug only: Number of used entries in the header FIFO */
@@ -2244,8 +2759,17 @@
2244/* [RW 1] When '1'; requests will enter input buffers but wont get out 2759/* [RW 1] When '1'; requests will enter input buffers but wont get out
2245 towards the glue */ 2760 towards the glue */
2246#define PXP2_REG_RQ_DISABLE_INPUTS 0x120330 2761#define PXP2_REG_RQ_DISABLE_INPUTS 0x120330
2247/* [RW 1] 1 - SR will be aligned by 64B; 0 - SR will be aligned by 8B */ 2762/* [RW 4] Determines alignment of write SRs when a request is split into
2763 * several SRs. 0 - 8B aligned. 1 - 64B aligned. 2 - 128B aligned. 3 - 256B
2764 * aligned. 4 - 512B aligned. */
2248#define PXP2_REG_RQ_DRAM_ALIGN 0x1205b0 2765#define PXP2_REG_RQ_DRAM_ALIGN 0x1205b0
2766/* [RW 4] Determines alignment of read SRs when a request is split into
2767 * several SRs. 0 - 8B aligned. 1 - 64B aligned. 2 - 128B aligned. 3 - 256B
2768 * aligned. 4 - 512B aligned. */
2769#define PXP2_REG_RQ_DRAM_ALIGN_RD 0x12092c
2770/* [RW 1] when set the new alignment method (E2) will be applied; when reset
2771 * the original alignment method (E1 E1H) will be applied */
2772#define PXP2_REG_RQ_DRAM_ALIGN_SEL 0x120930
2249/* [RW 1] If 1 ILT failiue will not result in ELT access; An interrupt will 2773/* [RW 1] If 1 ILT failiue will not result in ELT access; An interrupt will
2250 be asserted */ 2774 be asserted */
2251#define PXP2_REG_RQ_ELT_DISABLE 0x12066c 2775#define PXP2_REG_RQ_ELT_DISABLE 0x12066c
@@ -2436,7 +2960,8 @@
2436#define PXP_REG_PXP_INT_STS_1 0x103078 2960#define PXP_REG_PXP_INT_STS_1 0x103078
2437/* [RC 32] Interrupt register #0 read clear */ 2961/* [RC 32] Interrupt register #0 read clear */
2438#define PXP_REG_PXP_INT_STS_CLR_0 0x10306c 2962#define PXP_REG_PXP_INT_STS_CLR_0 0x10306c
2439/* [RW 26] Parity mask register #0 read/write */ 2963#define PXP_REG_PXP_INT_STS_CLR_1 0x10307c
2964/* [RW 27] Parity mask register #0 read/write */
2440#define PXP_REG_PXP_PRTY_MASK 0x103094 2965#define PXP_REG_PXP_PRTY_MASK 0x103094
2441/* [R 26] Parity register #0 read */ 2966/* [R 26] Parity register #0 read */
2442#define PXP_REG_PXP_PRTY_STS 0x103088 2967#define PXP_REG_PXP_PRTY_STS 0x103088
@@ -2566,6 +3091,7 @@
2566#define QM_REG_PAUSESTATE7 0x16e698 3091#define QM_REG_PAUSESTATE7 0x16e698
2567/* [RW 2] The PCI attributes field used in the PCI request. */ 3092/* [RW 2] The PCI attributes field used in the PCI request. */
2568#define QM_REG_PCIREQAT 0x168054 3093#define QM_REG_PCIREQAT 0x168054
3094#define QM_REG_PF_EN 0x16e70c
2569/* [R 16] The byte credit of port 0 */ 3095/* [R 16] The byte credit of port 0 */
2570#define QM_REG_PORT0BYTECRD 0x168300 3096#define QM_REG_PORT0BYTECRD 0x168300
2571/* [R 16] The byte credit of port 1 */ 3097/* [R 16] The byte credit of port 1 */
@@ -3402,6 +3928,14 @@
3402/* [R 32] Parity register #0 read */ 3928/* [R 32] Parity register #0 read */
3403#define TSEM_REG_TSEM_PRTY_STS_0 0x180114 3929#define TSEM_REG_TSEM_PRTY_STS_0 0x180114
3404#define TSEM_REG_TSEM_PRTY_STS_1 0x180124 3930#define TSEM_REG_TSEM_PRTY_STS_1 0x180124
3931/* [W 7] VF or PF ID for reset error bit. Values 0-63 reset error bit for 64
3932 * VF; values 64-67 reset error for 4 PF; values 68-127 are not valid. */
3933#define TSEM_REG_VFPF_ERR_NUM 0x180380
3934/* [RW 32] Indirect access to AG context with 32-bits granularity. The bits
3935 * [10:8] of the address should be the offset within the accessed LCID
3936 * context; the bits [7:0] are the accessed LCID.Example: to write to REG10
3937 * LCID100. The RBC address should be 12'ha64. */
3938#define UCM_REG_AG_CTX 0xe2000
3405/* [R 5] Used to read the XX protection CAM occupancy counter. */ 3939/* [R 5] Used to read the XX protection CAM occupancy counter. */
3406#define UCM_REG_CAM_OCCUP 0xe0170 3940#define UCM_REG_CAM_OCCUP 0xe0170
3407/* [RW 1] CDU AG read Interface enable. If 0 - the request input is 3941/* [RW 1] CDU AG read Interface enable. If 0 - the request input is
@@ -3851,6 +4385,17 @@
3851/* [R 32] Parity register #0 read */ 4385/* [R 32] Parity register #0 read */
3852#define USEM_REG_USEM_PRTY_STS_0 0x300124 4386#define USEM_REG_USEM_PRTY_STS_0 0x300124
3853#define USEM_REG_USEM_PRTY_STS_1 0x300134 4387#define USEM_REG_USEM_PRTY_STS_1 0x300134
4388/* [W 7] VF or PF ID for reset error bit. Values 0-63 reset error bit for 64
4389 * VF; values 64-67 reset error for 4 PF; values 68-127 are not valid. */
4390#define USEM_REG_VFPF_ERR_NUM 0x300380
4391#define VFC_MEMORIES_RST_REG_CAM_RST (0x1<<0)
4392#define VFC_MEMORIES_RST_REG_RAM_RST (0x1<<1)
4393#define VFC_REG_MEMORIES_RST 0x1943c
4394/* [RW 32] Indirect access to AG context with 32-bits granularity. The bits
4395 * [12:8] of the address should be the offset within the accessed LCID
4396 * context; the bits [7:0] are the accessed LCID.Example: to write to REG10
4397 * LCID100. The RBC address should be 13'ha64. */
4398#define XCM_REG_AG_CTX 0x28000
3854/* [RW 2] The queue index for registration on Aux1 counter flag. */ 4399/* [RW 2] The queue index for registration on Aux1 counter flag. */
3855#define XCM_REG_AUX1_Q 0x20134 4400#define XCM_REG_AUX1_Q 0x20134
3856/* [RW 2] Per each decision rule the queue index to register to. */ 4401/* [RW 2] Per each decision rule the queue index to register to. */
@@ -4333,6 +4878,9 @@
4333#define XSEM_REG_TS_8_AS 0x280058 4878#define XSEM_REG_TS_8_AS 0x280058
4334/* [RW 3] The arbitration scheme of time_slot 9 */ 4879/* [RW 3] The arbitration scheme of time_slot 9 */
4335#define XSEM_REG_TS_9_AS 0x28005c 4880#define XSEM_REG_TS_9_AS 0x28005c
4881/* [W 7] VF or PF ID for reset error bit. Values 0-63 reset error bit for 64
4882 * VF; values 64-67 reset error for 4 PF; values 68-127 are not valid. */
4883#define XSEM_REG_VFPF_ERR_NUM 0x280380
4336/* [RW 32] Interrupt mask register #0 read/write */ 4884/* [RW 32] Interrupt mask register #0 read/write */
4337#define XSEM_REG_XSEM_INT_MASK_0 0x280110 4885#define XSEM_REG_XSEM_INT_MASK_0 0x280110
4338#define XSEM_REG_XSEM_INT_MASK_1 0x280120 4886#define XSEM_REG_XSEM_INT_MASK_1 0x280120
@@ -4371,6 +4919,23 @@
4371#define BIGMAC_REGISTER_TX_SOURCE_ADDR (0x08<<3) 4919#define BIGMAC_REGISTER_TX_SOURCE_ADDR (0x08<<3)
4372#define BIGMAC_REGISTER_TX_STAT_GTBYT (0x20<<3) 4920#define BIGMAC_REGISTER_TX_STAT_GTBYT (0x20<<3)
4373#define BIGMAC_REGISTER_TX_STAT_GTPKT (0x0C<<3) 4921#define BIGMAC_REGISTER_TX_STAT_GTPKT (0x0C<<3)
4922#define BIGMAC2_REGISTER_BMAC_CONTROL (0x00<<3)
4923#define BIGMAC2_REGISTER_BMAC_XGXS_CONTROL (0x01<<3)
4924#define BIGMAC2_REGISTER_CNT_MAX_SIZE (0x05<<3)
4925#define BIGMAC2_REGISTER_PFC_CONTROL (0x06<<3)
4926#define BIGMAC2_REGISTER_RX_CONTROL (0x3A<<3)
4927#define BIGMAC2_REGISTER_RX_LLFC_MSG_FLDS (0x62<<3)
4928#define BIGMAC2_REGISTER_RX_MAX_SIZE (0x3C<<3)
4929#define BIGMAC2_REGISTER_RX_STAT_GR64 (0x40<<3)
4930#define BIGMAC2_REGISTER_RX_STAT_GRIPJ (0x5f<<3)
4931#define BIGMAC2_REGISTER_RX_STAT_GRPP (0x51<<3)
4932#define BIGMAC2_REGISTER_TX_CONTROL (0x1C<<3)
4933#define BIGMAC2_REGISTER_TX_MAX_SIZE (0x1E<<3)
4934#define BIGMAC2_REGISTER_TX_PAUSE_CONTROL (0x20<<3)
4935#define BIGMAC2_REGISTER_TX_SOURCE_ADDR (0x1D<<3)
4936#define BIGMAC2_REGISTER_TX_STAT_GTBYT (0x39<<3)
4937#define BIGMAC2_REGISTER_TX_STAT_GTPOK (0x22<<3)
4938#define BIGMAC2_REGISTER_TX_STAT_GTPP (0x24<<3)
4374#define EMAC_LED_1000MB_OVERRIDE (1L<<1) 4939#define EMAC_LED_1000MB_OVERRIDE (1L<<1)
4375#define EMAC_LED_100MB_OVERRIDE (1L<<2) 4940#define EMAC_LED_100MB_OVERRIDE (1L<<2)
4376#define EMAC_LED_10MB_OVERRIDE (1L<<3) 4941#define EMAC_LED_10MB_OVERRIDE (1L<<3)
@@ -4478,6 +5043,8 @@
4478#define HW_LOCK_RESOURCE_SPIO 2 5043#define HW_LOCK_RESOURCE_SPIO 2
4479#define HW_LOCK_RESOURCE_UNDI 5 5044#define HW_LOCK_RESOURCE_UNDI 5
4480#define PRS_FLAG_OVERETH_IPV4 1 5045#define PRS_FLAG_OVERETH_IPV4 1
5046#define AEU_INPUTS_ATTN_BITS_ATC_HW_INTERRUPT (0x1<<4)
5047#define AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR (0x1<<5)
4481#define AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR (1<<18) 5048#define AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR (1<<18)
4482#define AEU_INPUTS_ATTN_BITS_CCM_HW_INTERRUPT (1<<31) 5049#define AEU_INPUTS_ATTN_BITS_CCM_HW_INTERRUPT (1<<31)
4483#define AEU_INPUTS_ATTN_BITS_CDU_HW_INTERRUPT (1<<9) 5050#define AEU_INPUTS_ATTN_BITS_CDU_HW_INTERRUPT (1<<9)
@@ -4504,6 +5071,8 @@
4504#define AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR (1<<20) 5071#define AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR (1<<20)
4505#define AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR (1<<0) 5072#define AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR (1<<0)
4506#define AEU_INPUTS_ATTN_BITS_PBF_HW_INTERRUPT (1<<31) 5073#define AEU_INPUTS_ATTN_BITS_PBF_HW_INTERRUPT (1<<31)
5074#define AEU_INPUTS_ATTN_BITS_PGLUE_HW_INTERRUPT (0x1<<2)
5075#define AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR (0x1<<3)
4507#define AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT (1<<3) 5076#define AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT (1<<3)
4508#define AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR (1<<2) 5077#define AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR (1<<2)
4509#define AEU_INPUTS_ATTN_BITS_PXPPCICLOCKCLIENT_HW_INTERRUPT (1<<5) 5078#define AEU_INPUTS_ATTN_BITS_PXPPCICLOCKCLIENT_HW_INTERRUPT (1<<5)
@@ -4796,6 +5365,253 @@
4796#define PCI_ID_VAL1 0x434 5365#define PCI_ID_VAL1 0x434
4797#define PCI_ID_VAL2 0x438 5366#define PCI_ID_VAL2 0x438
4798 5367
5368#define PXPCS_TL_CONTROL_5 0x814
5369#define PXPCS_TL_CONTROL_5_UNKNOWNTYPE_ERR_ATTN (1 << 29) /*WC*/
5370#define PXPCS_TL_CONTROL_5_BOUNDARY4K_ERR_ATTN (1 << 28) /*WC*/
5371#define PXPCS_TL_CONTROL_5_MRRS_ERR_ATTN (1 << 27) /*WC*/
5372#define PXPCS_TL_CONTROL_5_MPS_ERR_ATTN (1 << 26) /*WC*/
5373#define PXPCS_TL_CONTROL_5_TTX_BRIDGE_FORWARD_ERR (1 << 25) /*WC*/
5374#define PXPCS_TL_CONTROL_5_TTX_TXINTF_OVERFLOW (1 << 24) /*WC*/
5375#define PXPCS_TL_CONTROL_5_PHY_ERR_ATTN (1 << 23) /*RO*/
5376#define PXPCS_TL_CONTROL_5_DL_ERR_ATTN (1 << 22) /*RO*/
5377#define PXPCS_TL_CONTROL_5_TTX_ERR_NP_TAG_IN_USE (1 << 21) /*WC*/
5378#define PXPCS_TL_CONTROL_5_TRX_ERR_UNEXP_RTAG (1 << 20) /*WC*/
5379#define PXPCS_TL_CONTROL_5_PRI_SIG_TARGET_ABORT1 (1 << 19) /*WC*/
5380#define PXPCS_TL_CONTROL_5_ERR_UNSPPORT1 (1 << 18) /*WC*/
5381#define PXPCS_TL_CONTROL_5_ERR_ECRC1 (1 << 17) /*WC*/
5382#define PXPCS_TL_CONTROL_5_ERR_MALF_TLP1 (1 << 16) /*WC*/
5383#define PXPCS_TL_CONTROL_5_ERR_RX_OFLOW1 (1 << 15) /*WC*/
5384#define PXPCS_TL_CONTROL_5_ERR_UNEXP_CPL1 (1 << 14) /*WC*/
5385#define PXPCS_TL_CONTROL_5_ERR_MASTER_ABRT1 (1 << 13) /*WC*/
5386#define PXPCS_TL_CONTROL_5_ERR_CPL_TIMEOUT1 (1 << 12) /*WC*/
5387#define PXPCS_TL_CONTROL_5_ERR_FC_PRTL1 (1 << 11) /*WC*/
5388#define PXPCS_TL_CONTROL_5_ERR_PSND_TLP1 (1 << 10) /*WC*/
5389#define PXPCS_TL_CONTROL_5_PRI_SIG_TARGET_ABORT (1 << 9) /*WC*/
5390#define PXPCS_TL_CONTROL_5_ERR_UNSPPORT (1 << 8) /*WC*/
5391#define PXPCS_TL_CONTROL_5_ERR_ECRC (1 << 7) /*WC*/
5392#define PXPCS_TL_CONTROL_5_ERR_MALF_TLP (1 << 6) /*WC*/
5393#define PXPCS_TL_CONTROL_5_ERR_RX_OFLOW (1 << 5) /*WC*/
5394#define PXPCS_TL_CONTROL_5_ERR_UNEXP_CPL (1 << 4) /*WC*/
5395#define PXPCS_TL_CONTROL_5_ERR_MASTER_ABRT (1 << 3) /*WC*/
5396#define PXPCS_TL_CONTROL_5_ERR_CPL_TIMEOUT (1 << 2) /*WC*/
5397#define PXPCS_TL_CONTROL_5_ERR_FC_PRTL (1 << 1) /*WC*/
5398#define PXPCS_TL_CONTROL_5_ERR_PSND_TLP (1 << 0) /*WC*/
5399
5400
5401#define PXPCS_TL_FUNC345_STAT 0x854
5402#define PXPCS_TL_FUNC345_STAT_PRI_SIG_TARGET_ABORT4 (1 << 29) /* WC */
5403#define PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT4\
5404 (1 << 28) /* Unsupported Request Error Status in function4, if \
5405 set, generate pcie_err_attn output when this error is seen. WC */
5406#define PXPCS_TL_FUNC345_STAT_ERR_ECRC4\
5407 (1 << 27) /* ECRC Error TLP Status Status in function 4, if set, \
5408 generate pcie_err_attn output when this error is seen.. WC */
5409#define PXPCS_TL_FUNC345_STAT_ERR_MALF_TLP4\
5410 (1 << 26) /* Malformed TLP Status Status in function 4, if set, \
5411 generate pcie_err_attn output when this error is seen.. WC */
5412#define PXPCS_TL_FUNC345_STAT_ERR_RX_OFLOW4\
5413 (1 << 25) /* Receiver Overflow Status Status in function 4, if \
5414 set, generate pcie_err_attn output when this error is seen.. WC \
5415 */
5416#define PXPCS_TL_FUNC345_STAT_ERR_UNEXP_CPL4\
5417 (1 << 24) /* Unexpected Completion Status Status in function 4, \
5418 if set, generate pcie_err_attn output when this error is seen. WC \
5419 */
5420#define PXPCS_TL_FUNC345_STAT_ERR_MASTER_ABRT4\
5421 (1 << 23) /* Receive UR Statusin function 4. If set, generate \
5422 pcie_err_attn output when this error is seen. WC */
5423#define PXPCS_TL_FUNC345_STAT_ERR_CPL_TIMEOUT4\
5424 (1 << 22) /* Completer Timeout Status Status in function 4, if \
5425 set, generate pcie_err_attn output when this error is seen. WC */
5426#define PXPCS_TL_FUNC345_STAT_ERR_FC_PRTL4\
5427 (1 << 21) /* Flow Control Protocol Error Status Status in \
5428 function 4, if set, generate pcie_err_attn output when this error \
5429 is seen. WC */
5430#define PXPCS_TL_FUNC345_STAT_ERR_PSND_TLP4\
5431 (1 << 20) /* Poisoned Error Status Status in function 4, if set, \
5432 generate pcie_err_attn output when this error is seen.. WC */
5433#define PXPCS_TL_FUNC345_STAT_PRI_SIG_TARGET_ABORT3 (1 << 19) /* WC */
5434#define PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT3\
5435 (1 << 18) /* Unsupported Request Error Status in function3, if \
5436 set, generate pcie_err_attn output when this error is seen. WC */
5437#define PXPCS_TL_FUNC345_STAT_ERR_ECRC3\
5438 (1 << 17) /* ECRC Error TLP Status Status in function 3, if set, \
5439 generate pcie_err_attn output when this error is seen.. WC */
5440#define PXPCS_TL_FUNC345_STAT_ERR_MALF_TLP3\
5441 (1 << 16) /* Malformed TLP Status Status in function 3, if set, \
5442 generate pcie_err_attn output when this error is seen.. WC */
5443#define PXPCS_TL_FUNC345_STAT_ERR_RX_OFLOW3\
5444 (1 << 15) /* Receiver Overflow Status Status in function 3, if \
5445 set, generate pcie_err_attn output when this error is seen.. WC \
5446 */
5447#define PXPCS_TL_FUNC345_STAT_ERR_UNEXP_CPL3\
5448 (1 << 14) /* Unexpected Completion Status Status in function 3, \
5449 if set, generate pcie_err_attn output when this error is seen. WC \
5450 */
5451#define PXPCS_TL_FUNC345_STAT_ERR_MASTER_ABRT3\
5452 (1 << 13) /* Receive UR Statusin function 3. If set, generate \
5453 pcie_err_attn output when this error is seen. WC */
5454#define PXPCS_TL_FUNC345_STAT_ERR_CPL_TIMEOUT3\
5455 (1 << 12) /* Completer Timeout Status Status in function 3, if \
5456 set, generate pcie_err_attn output when this error is seen. WC */
5457#define PXPCS_TL_FUNC345_STAT_ERR_FC_PRTL3\
5458 (1 << 11) /* Flow Control Protocol Error Status Status in \
5459 function 3, if set, generate pcie_err_attn output when this error \
5460 is seen. WC */
5461#define PXPCS_TL_FUNC345_STAT_ERR_PSND_TLP3\
5462 (1 << 10) /* Poisoned Error Status Status in function 3, if set, \
5463 generate pcie_err_attn output when this error is seen.. WC */
5464#define PXPCS_TL_FUNC345_STAT_PRI_SIG_TARGET_ABORT2 (1 << 9) /* WC */
5465#define PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT2\
5466 (1 << 8) /* Unsupported Request Error Status for Function 2, if \
5467 set, generate pcie_err_attn output when this error is seen. WC */
5468#define PXPCS_TL_FUNC345_STAT_ERR_ECRC2\
5469 (1 << 7) /* ECRC Error TLP Status Status for Function 2, if set, \
5470 generate pcie_err_attn output when this error is seen.. WC */
5471#define PXPCS_TL_FUNC345_STAT_ERR_MALF_TLP2\
5472 (1 << 6) /* Malformed TLP Status Status for Function 2, if set, \
5473 generate pcie_err_attn output when this error is seen.. WC */
5474#define PXPCS_TL_FUNC345_STAT_ERR_RX_OFLOW2\
5475 (1 << 5) /* Receiver Overflow Status Status for Function 2, if \
5476 set, generate pcie_err_attn output when this error is seen.. WC \
5477 */
5478#define PXPCS_TL_FUNC345_STAT_ERR_UNEXP_CPL2\
5479 (1 << 4) /* Unexpected Completion Status Status for Function 2, \
5480 if set, generate pcie_err_attn output when this error is seen. WC \
5481 */
5482#define PXPCS_TL_FUNC345_STAT_ERR_MASTER_ABRT2\
5483 (1 << 3) /* Receive UR Statusfor Function 2. If set, generate \
5484 pcie_err_attn output when this error is seen. WC */
5485#define PXPCS_TL_FUNC345_STAT_ERR_CPL_TIMEOUT2\
5486 (1 << 2) /* Completer Timeout Status Status for Function 2, if \
5487 set, generate pcie_err_attn output when this error is seen. WC */
5488#define PXPCS_TL_FUNC345_STAT_ERR_FC_PRTL2\
5489 (1 << 1) /* Flow Control Protocol Error Status Status for \
5490 Function 2, if set, generate pcie_err_attn output when this error \
5491 is seen. WC */
5492#define PXPCS_TL_FUNC345_STAT_ERR_PSND_TLP2\
5493 (1 << 0) /* Poisoned Error Status Status for Function 2, if set, \
5494 generate pcie_err_attn output when this error is seen.. WC */
5495
5496
5497#define PXPCS_TL_FUNC678_STAT 0x85C
5498#define PXPCS_TL_FUNC678_STAT_PRI_SIG_TARGET_ABORT7 (1 << 29) /* WC */
5499#define PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT7\
5500 (1 << 28) /* Unsupported Request Error Status in function7, if \
5501 set, generate pcie_err_attn output when this error is seen. WC */
5502#define PXPCS_TL_FUNC678_STAT_ERR_ECRC7\
5503 (1 << 27) /* ECRC Error TLP Status Status in function 7, if set, \
5504 generate pcie_err_attn output when this error is seen.. WC */
5505#define PXPCS_TL_FUNC678_STAT_ERR_MALF_TLP7\
5506 (1 << 26) /* Malformed TLP Status Status in function 7, if set, \
5507 generate pcie_err_attn output when this error is seen.. WC */
5508#define PXPCS_TL_FUNC678_STAT_ERR_RX_OFLOW7\
5509 (1 << 25) /* Receiver Overflow Status Status in function 7, if \
5510 set, generate pcie_err_attn output when this error is seen.. WC \
5511 */
5512#define PXPCS_TL_FUNC678_STAT_ERR_UNEXP_CPL7\
5513 (1 << 24) /* Unexpected Completion Status Status in function 7, \
5514 if set, generate pcie_err_attn output when this error is seen. WC \
5515 */
5516#define PXPCS_TL_FUNC678_STAT_ERR_MASTER_ABRT7\
5517 (1 << 23) /* Receive UR Statusin function 7. If set, generate \
5518 pcie_err_attn output when this error is seen. WC */
5519#define PXPCS_TL_FUNC678_STAT_ERR_CPL_TIMEOUT7\
5520 (1 << 22) /* Completer Timeout Status Status in function 7, if \
5521 set, generate pcie_err_attn output when this error is seen. WC */
5522#define PXPCS_TL_FUNC678_STAT_ERR_FC_PRTL7\
5523 (1 << 21) /* Flow Control Protocol Error Status Status in \
5524 function 7, if set, generate pcie_err_attn output when this error \
5525 is seen. WC */
5526#define PXPCS_TL_FUNC678_STAT_ERR_PSND_TLP7\
5527 (1 << 20) /* Poisoned Error Status Status in function 7, if set, \
5528 generate pcie_err_attn output when this error is seen.. WC */
5529#define PXPCS_TL_FUNC678_STAT_PRI_SIG_TARGET_ABORT6 (1 << 19) /* WC */
5530#define PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT6\
5531 (1 << 18) /* Unsupported Request Error Status in function6, if \
5532 set, generate pcie_err_attn output when this error is seen. WC */
5533#define PXPCS_TL_FUNC678_STAT_ERR_ECRC6\
5534 (1 << 17) /* ECRC Error TLP Status Status in function 6, if set, \
5535 generate pcie_err_attn output when this error is seen.. WC */
5536#define PXPCS_TL_FUNC678_STAT_ERR_MALF_TLP6\
5537 (1 << 16) /* Malformed TLP Status Status in function 6, if set, \
5538 generate pcie_err_attn output when this error is seen.. WC */
5539#define PXPCS_TL_FUNC678_STAT_ERR_RX_OFLOW6\
5540 (1 << 15) /* Receiver Overflow Status Status in function 6, if \
5541 set, generate pcie_err_attn output when this error is seen.. WC \
5542 */
5543#define PXPCS_TL_FUNC678_STAT_ERR_UNEXP_CPL6\
5544 (1 << 14) /* Unexpected Completion Status Status in function 6, \
5545 if set, generate pcie_err_attn output when this error is seen. WC \
5546 */
5547#define PXPCS_TL_FUNC678_STAT_ERR_MASTER_ABRT6\
5548 (1 << 13) /* Receive UR Statusin function 6. If set, generate \
5549 pcie_err_attn output when this error is seen. WC */
5550#define PXPCS_TL_FUNC678_STAT_ERR_CPL_TIMEOUT6\
5551 (1 << 12) /* Completer Timeout Status Status in function 6, if \
5552 set, generate pcie_err_attn output when this error is seen. WC */
5553#define PXPCS_TL_FUNC678_STAT_ERR_FC_PRTL6\
5554 (1 << 11) /* Flow Control Protocol Error Status Status in \
5555 function 6, if set, generate pcie_err_attn output when this error \
5556 is seen. WC */
5557#define PXPCS_TL_FUNC678_STAT_ERR_PSND_TLP6\
5558 (1 << 10) /* Poisoned Error Status Status in function 6, if set, \
5559 generate pcie_err_attn output when this error is seen.. WC */
5560#define PXPCS_TL_FUNC678_STAT_PRI_SIG_TARGET_ABORT5 (1 << 9) /* WC */
5561#define PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT5\
5562 (1 << 8) /* Unsupported Request Error Status for Function 5, if \
5563 set, generate pcie_err_attn output when this error is seen. WC */
5564#define PXPCS_TL_FUNC678_STAT_ERR_ECRC5\
5565 (1 << 7) /* ECRC Error TLP Status Status for Function 5, if set, \
5566 generate pcie_err_attn output when this error is seen.. WC */
5567#define PXPCS_TL_FUNC678_STAT_ERR_MALF_TLP5\
5568 (1 << 6) /* Malformed TLP Status Status for Function 5, if set, \
5569 generate pcie_err_attn output when this error is seen.. WC */
5570#define PXPCS_TL_FUNC678_STAT_ERR_RX_OFLOW5\
5571 (1 << 5) /* Receiver Overflow Status Status for Function 5, if \
5572 set, generate pcie_err_attn output when this error is seen.. WC \
5573 */
5574#define PXPCS_TL_FUNC678_STAT_ERR_UNEXP_CPL5\
5575 (1 << 4) /* Unexpected Completion Status Status for Function 5, \
5576 if set, generate pcie_err_attn output when this error is seen. WC \
5577 */
5578#define PXPCS_TL_FUNC678_STAT_ERR_MASTER_ABRT5\
5579 (1 << 3) /* Receive UR Statusfor Function 5. If set, generate \
5580 pcie_err_attn output when this error is seen. WC */
5581#define PXPCS_TL_FUNC678_STAT_ERR_CPL_TIMEOUT5\
5582 (1 << 2) /* Completer Timeout Status Status for Function 5, if \
5583 set, generate pcie_err_attn output when this error is seen. WC */
5584#define PXPCS_TL_FUNC678_STAT_ERR_FC_PRTL5\
5585 (1 << 1) /* Flow Control Protocol Error Status Status for \
5586 Function 5, if set, generate pcie_err_attn output when this error \
5587 is seen. WC */
5588#define PXPCS_TL_FUNC678_STAT_ERR_PSND_TLP5\
5589 (1 << 0) /* Poisoned Error Status Status for Function 5, if set, \
5590 generate pcie_err_attn output when this error is seen.. WC */
5591
5592
5593#define BAR_USTRORM_INTMEM 0x400000
5594#define BAR_CSTRORM_INTMEM 0x410000
5595#define BAR_XSTRORM_INTMEM 0x420000
5596#define BAR_TSTRORM_INTMEM 0x430000
5597
5598/* for accessing the IGU in case of status block ACK */
5599#define BAR_IGU_INTMEM 0x440000
5600
5601#define BAR_DOORBELL_OFFSET 0x800000
5602
5603#define BAR_ME_REGISTER 0x450000
5604#define ME_REG_PF_NUM_SHIFT 0
5605#define ME_REG_PF_NUM\
5606 (7L<<ME_REG_PF_NUM_SHIFT) /* Relative PF Num */
5607#define ME_REG_VF_VALID (1<<8)
5608#define ME_REG_VF_NUM_SHIFT 9
5609#define ME_REG_VF_NUM_MASK (0x3f<<ME_REG_VF_NUM_SHIFT)
5610#define ME_REG_VF_ERR (0x1<<3)
5611#define ME_REG_ABS_PF_NUM_SHIFT 16
5612#define ME_REG_ABS_PF_NUM\
5613 (7L<<ME_REG_ABS_PF_NUM_SHIFT) /* Absolute PF Num */
5614
4799 5615
4800#define MDIO_REG_BANK_CL73_IEEEB0 0x0 5616#define MDIO_REG_BANK_CL73_IEEEB0 0x0
4801#define MDIO_CL73_IEEEB0_CL73_AN_CONTROL 0x0 5617#define MDIO_CL73_IEEEB0_CL73_AN_CONTROL 0x0
@@ -5276,6 +6092,11 @@ Theotherbitsarereservedandshouldbezero*/
5276#define IGU_INT_NOP 2 6092#define IGU_INT_NOP 2
5277#define IGU_INT_NOP2 3 6093#define IGU_INT_NOP2 3
5278 6094
6095#define IGU_USE_REGISTER_ustorm_type_0_sb_cleanup 0
6096#define IGU_USE_REGISTER_ustorm_type_1_sb_cleanup 1
6097#define IGU_USE_REGISTER_cstorm_type_0_sb_cleanup 2
6098#define IGU_USE_REGISTER_cstorm_type_1_sb_cleanup 3
6099
5279#define COMMAND_REG_INT_ACK 0x0 6100#define COMMAND_REG_INT_ACK 0x0
5280#define COMMAND_REG_PROD_UPD 0x4 6101#define COMMAND_REG_PROD_UPD 0x4
5281#define COMMAND_REG_ATTN_BITS_UPD 0x8 6102#define COMMAND_REG_ATTN_BITS_UPD 0x8
@@ -5318,6 +6139,50 @@ Theotherbitsarereservedandshouldbezero*/
5318#define IGU_REG_SISR_MDPC_WOMASK_UPPER 0x05a6 6139#define IGU_REG_SISR_MDPC_WOMASK_UPPER 0x05a6
5319 6140
5320#define IGU_REG_RESERVED_UPPER 0x05ff 6141#define IGU_REG_RESERVED_UPPER 0x05ff
6142/* Fields of IGU PF CONFIGRATION REGISTER */
6143#define IGU_PF_CONF_FUNC_EN (0x1<<0) /* function enable */
6144#define IGU_PF_CONF_MSI_MSIX_EN (0x1<<1) /* MSI/MSIX enable */
6145#define IGU_PF_CONF_INT_LINE_EN (0x1<<2) /* INT enable */
6146#define IGU_PF_CONF_ATTN_BIT_EN (0x1<<3) /* attention enable */
6147#define IGU_PF_CONF_SINGLE_ISR_EN (0x1<<4) /* single ISR mode enable */
6148#define IGU_PF_CONF_SIMD_MODE (0x1<<5) /* simd all ones mode */
6149
6150/* Fields of IGU VF CONFIGRATION REGISTER */
6151#define IGU_VF_CONF_FUNC_EN (0x1<<0) /* function enable */
6152#define IGU_VF_CONF_MSI_MSIX_EN (0x1<<1) /* MSI/MSIX enable */
6153#define IGU_VF_CONF_PARENT_MASK (0x3<<2) /* Parent PF */
6154#define IGU_VF_CONF_PARENT_SHIFT 2 /* Parent PF */
6155#define IGU_VF_CONF_SINGLE_ISR_EN (0x1<<4) /* single ISR mode enable */
6156
6157
6158#define IGU_BC_DSB_NUM_SEGS 5
6159#define IGU_BC_NDSB_NUM_SEGS 2
6160#define IGU_NORM_DSB_NUM_SEGS 2
6161#define IGU_NORM_NDSB_NUM_SEGS 1
6162#define IGU_BC_BASE_DSB_PROD 128
6163#define IGU_NORM_BASE_DSB_PROD 136
6164
6165#define IGU_CTRL_CMD_TYPE_WR\
6166 1
6167#define IGU_CTRL_CMD_TYPE_RD\
6168 0
6169
6170#define IGU_SEG_ACCESS_NORM 0
6171#define IGU_SEG_ACCESS_DEF 1
6172#define IGU_SEG_ACCESS_ATTN 2
6173
6174 /* FID (if VF - [6] = 0; [5:0] = VF number; if PF - [6] = 1; \
6175 [5:2] = 0; [1:0] = PF number) */
6176#define IGU_FID_ENCODE_IS_PF (0x1<<6)
6177#define IGU_FID_ENCODE_IS_PF_SHIFT 6
6178#define IGU_FID_VF_NUM_MASK (0x3f)
6179#define IGU_FID_PF_NUM_MASK (0x7)
6180
6181#define IGU_REG_MAPPING_MEMORY_VALID (1<<0)
6182#define IGU_REG_MAPPING_MEMORY_VECTOR_MASK (0x3F<<1)
6183#define IGU_REG_MAPPING_MEMORY_VECTOR_SHIFT 1
6184#define IGU_REG_MAPPING_MEMORY_FID_MASK (0x7F<<7)
6185#define IGU_REG_MAPPING_MEMORY_FID_SHIFT 7
5321 6186
5322 6187
5323#define CDU_REGION_NUMBER_XCM_AG 2 6188#define CDU_REGION_NUMBER_XCM_AG 2
diff --git a/drivers/net/bnx2x/bnx2x_stats.c b/drivers/net/bnx2x/bnx2x_stats.c
index efa1403ebf82..4733c835dad9 100644
--- a/drivers/net/bnx2x/bnx2x_stats.c
+++ b/drivers/net/bnx2x/bnx2x_stats.c
@@ -14,8 +14,8 @@
14 * Statistics and Link management by Yitchak Gertner 14 * Statistics and Link management by Yitchak Gertner
15 * 15 *
16 */ 16 */
17 #include "bnx2x_cmn.h" 17#include "bnx2x_cmn.h"
18 #include "bnx2x_stats.h" 18#include "bnx2x_stats.h"
19 19
20/* Statistics */ 20/* Statistics */
21 21
@@ -153,7 +153,7 @@ static inline long bnx2x_hilo(u32 *hiref)
153static void bnx2x_storm_stats_post(struct bnx2x *bp) 153static void bnx2x_storm_stats_post(struct bnx2x *bp)
154{ 154{
155 if (!bp->stats_pending) { 155 if (!bp->stats_pending) {
156 struct eth_query_ramrod_data ramrod_data = {0}; 156 struct common_query_ramrod_data ramrod_data = {0};
157 int i, rc; 157 int i, rc;
158 158
159 spin_lock_bh(&bp->stats_lock); 159 spin_lock_bh(&bp->stats_lock);
@@ -163,14 +163,11 @@ static void bnx2x_storm_stats_post(struct bnx2x *bp)
163 for_each_queue(bp, i) 163 for_each_queue(bp, i)
164 ramrod_data.ctr_id_vector |= (1 << bp->fp[i].cl_id); 164 ramrod_data.ctr_id_vector |= (1 << bp->fp[i].cl_id);
165 165
166 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0, 166 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_STAT_QUERY, 0,
167 ((u32 *)&ramrod_data)[1], 167 ((u32 *)&ramrod_data)[1],
168 ((u32 *)&ramrod_data)[0], 0); 168 ((u32 *)&ramrod_data)[0], 1);
169 if (rc == 0) { 169 if (rc == 0)
170 /* stats ramrod has it's own slot on the spq */
171 bp->spq_left++;
172 bp->stats_pending = 1; 170 bp->stats_pending = 1;
173 }
174 171
175 spin_unlock_bh(&bp->stats_lock); 172 spin_unlock_bh(&bp->stats_lock);
176 } 173 }
@@ -188,20 +185,12 @@ static void bnx2x_hw_stats_post(struct bnx2x *bp)
188 /* loader */ 185 /* loader */
189 if (bp->executer_idx) { 186 if (bp->executer_idx) {
190 int loader_idx = PMF_DMAE_C(bp); 187 int loader_idx = PMF_DMAE_C(bp);
188 u32 opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_PCI, DMAE_DST_GRC,
189 true, DMAE_COMP_GRC);
190 opcode = bnx2x_dmae_opcode_clr_src_reset(opcode);
191 191
192 memset(dmae, 0, sizeof(struct dmae_command)); 192 memset(dmae, 0, sizeof(struct dmae_command));
193 193 dmae->opcode = opcode;
194 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
195 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
196 DMAE_CMD_DST_RESET |
197#ifdef __BIG_ENDIAN
198 DMAE_CMD_ENDIANITY_B_DW_SWAP |
199#else
200 DMAE_CMD_ENDIANITY_DW_SWAP |
201#endif
202 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
203 DMAE_CMD_PORT_0) |
204 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
205 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0])); 194 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
206 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0])); 195 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
207 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM + 196 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
@@ -253,26 +242,17 @@ static void bnx2x_stats_pmf_update(struct bnx2x *bp)
253 u32 *stats_comp = bnx2x_sp(bp, stats_comp); 242 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
254 243
255 /* sanity */ 244 /* sanity */
256 if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) { 245 if (!IS_MF(bp) || !bp->port.pmf || !bp->port.port_stx) {
257 BNX2X_ERR("BUG!\n"); 246 BNX2X_ERR("BUG!\n");
258 return; 247 return;
259 } 248 }
260 249
261 bp->executer_idx = 0; 250 bp->executer_idx = 0;
262 251
263 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI | 252 opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_GRC, DMAE_DST_PCI, false, 0);
264 DMAE_CMD_C_ENABLE |
265 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
266#ifdef __BIG_ENDIAN
267 DMAE_CMD_ENDIANITY_B_DW_SWAP |
268#else
269 DMAE_CMD_ENDIANITY_DW_SWAP |
270#endif
271 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
272 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
273 253
274 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); 254 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
275 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC); 255 dmae->opcode = bnx2x_dmae_opcode_add_comp(opcode, DMAE_COMP_GRC);
276 dmae->src_addr_lo = bp->port.port_stx >> 2; 256 dmae->src_addr_lo = bp->port.port_stx >> 2;
277 dmae->src_addr_hi = 0; 257 dmae->src_addr_hi = 0;
278 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats)); 258 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
@@ -283,7 +263,7 @@ static void bnx2x_stats_pmf_update(struct bnx2x *bp)
283 dmae->comp_val = 1; 263 dmae->comp_val = 1;
284 264
285 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); 265 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
286 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI); 266 dmae->opcode = bnx2x_dmae_opcode_add_comp(opcode, DMAE_COMP_PCI);
287 dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX; 267 dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
288 dmae->src_addr_hi = 0; 268 dmae->src_addr_hi = 0;
289 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) + 269 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
@@ -304,7 +284,6 @@ static void bnx2x_port_stats_init(struct bnx2x *bp)
304{ 284{
305 struct dmae_command *dmae; 285 struct dmae_command *dmae;
306 int port = BP_PORT(bp); 286 int port = BP_PORT(bp);
307 int vn = BP_E1HVN(bp);
308 u32 opcode; 287 u32 opcode;
309 int loader_idx = PMF_DMAE_C(bp); 288 int loader_idx = PMF_DMAE_C(bp);
310 u32 mac_addr; 289 u32 mac_addr;
@@ -319,16 +298,8 @@ static void bnx2x_port_stats_init(struct bnx2x *bp)
319 bp->executer_idx = 0; 298 bp->executer_idx = 0;
320 299
321 /* MCP */ 300 /* MCP */
322 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC | 301 opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_PCI, DMAE_DST_GRC,
323 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE | 302 true, DMAE_COMP_GRC);
324 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
325#ifdef __BIG_ENDIAN
326 DMAE_CMD_ENDIANITY_B_DW_SWAP |
327#else
328 DMAE_CMD_ENDIANITY_DW_SWAP |
329#endif
330 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
331 (vn << DMAE_CMD_E1HVN_SHIFT));
332 303
333 if (bp->port.port_stx) { 304 if (bp->port.port_stx) {
334 305
@@ -359,16 +330,8 @@ static void bnx2x_port_stats_init(struct bnx2x *bp)
359 } 330 }
360 331
361 /* MAC */ 332 /* MAC */
362 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI | 333 opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_GRC, DMAE_DST_PCI,
363 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE | 334 true, DMAE_COMP_GRC);
364 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
365#ifdef __BIG_ENDIAN
366 DMAE_CMD_ENDIANITY_B_DW_SWAP |
367#else
368 DMAE_CMD_ENDIANITY_DW_SWAP |
369#endif
370 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
371 (vn << DMAE_CMD_E1HVN_SHIFT));
372 335
373 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) { 336 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
374 337
@@ -379,13 +342,21 @@ static void bnx2x_port_stats_init(struct bnx2x *bp)
379 BIGMAC_REGISTER_TX_STAT_GTBYT */ 342 BIGMAC_REGISTER_TX_STAT_GTBYT */
380 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); 343 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
381 dmae->opcode = opcode; 344 dmae->opcode = opcode;
382 dmae->src_addr_lo = (mac_addr + 345 if (CHIP_IS_E1x(bp)) {
346 dmae->src_addr_lo = (mac_addr +
383 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2; 347 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
348 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
349 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
350 } else {
351 dmae->src_addr_lo = (mac_addr +
352 BIGMAC2_REGISTER_TX_STAT_GTPOK) >> 2;
353 dmae->len = (8 + BIGMAC2_REGISTER_TX_STAT_GTBYT -
354 BIGMAC2_REGISTER_TX_STAT_GTPOK) >> 2;
355 }
356
384 dmae->src_addr_hi = 0; 357 dmae->src_addr_hi = 0;
385 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats)); 358 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
386 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats)); 359 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
387 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
388 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
389 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; 360 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
390 dmae->comp_addr_hi = 0; 361 dmae->comp_addr_hi = 0;
391 dmae->comp_val = 1; 362 dmae->comp_val = 1;
@@ -394,15 +365,31 @@ static void bnx2x_port_stats_init(struct bnx2x *bp)
394 BIGMAC_REGISTER_RX_STAT_GRIPJ */ 365 BIGMAC_REGISTER_RX_STAT_GRIPJ */
395 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); 366 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
396 dmae->opcode = opcode; 367 dmae->opcode = opcode;
397 dmae->src_addr_lo = (mac_addr +
398 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
399 dmae->src_addr_hi = 0; 368 dmae->src_addr_hi = 0;
400 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) + 369 if (CHIP_IS_E1x(bp)) {
401 offsetof(struct bmac_stats, rx_stat_gr64_lo)); 370 dmae->src_addr_lo = (mac_addr +
402 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) + 371 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
403 offsetof(struct bmac_stats, rx_stat_gr64_lo)); 372 dmae->dst_addr_lo =
404 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ - 373 U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
405 BIGMAC_REGISTER_RX_STAT_GR64) >> 2; 374 offsetof(struct bmac1_stats, rx_stat_gr64_lo));
375 dmae->dst_addr_hi =
376 U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
377 offsetof(struct bmac1_stats, rx_stat_gr64_lo));
378 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
379 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
380 } else {
381 dmae->src_addr_lo =
382 (mac_addr + BIGMAC2_REGISTER_RX_STAT_GR64) >> 2;
383 dmae->dst_addr_lo =
384 U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
385 offsetof(struct bmac2_stats, rx_stat_gr64_lo));
386 dmae->dst_addr_hi =
387 U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
388 offsetof(struct bmac2_stats, rx_stat_gr64_lo));
389 dmae->len = (8 + BIGMAC2_REGISTER_RX_STAT_GRIPJ -
390 BIGMAC2_REGISTER_RX_STAT_GR64) >> 2;
391 }
392
406 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; 393 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
407 dmae->comp_addr_hi = 0; 394 dmae->comp_addr_hi = 0;
408 dmae->comp_val = 1; 395 dmae->comp_val = 1;
@@ -483,16 +470,8 @@ static void bnx2x_port_stats_init(struct bnx2x *bp)
483 dmae->comp_val = 1; 470 dmae->comp_val = 1;
484 471
485 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); 472 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
486 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI | 473 dmae->opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_GRC, DMAE_DST_PCI,
487 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE | 474 true, DMAE_COMP_PCI);
488 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
489#ifdef __BIG_ENDIAN
490 DMAE_CMD_ENDIANITY_B_DW_SWAP |
491#else
492 DMAE_CMD_ENDIANITY_DW_SWAP |
493#endif
494 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
495 (vn << DMAE_CMD_E1HVN_SHIFT));
496 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 : 475 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
497 NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2; 476 NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
498 dmae->src_addr_hi = 0; 477 dmae->src_addr_hi = 0;
@@ -522,16 +501,8 @@ static void bnx2x_func_stats_init(struct bnx2x *bp)
522 bp->executer_idx = 0; 501 bp->executer_idx = 0;
523 memset(dmae, 0, sizeof(struct dmae_command)); 502 memset(dmae, 0, sizeof(struct dmae_command));
524 503
525 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC | 504 dmae->opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_PCI, DMAE_DST_GRC,
526 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE | 505 true, DMAE_COMP_PCI);
527 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
528#ifdef __BIG_ENDIAN
529 DMAE_CMD_ENDIANITY_B_DW_SWAP |
530#else
531 DMAE_CMD_ENDIANITY_DW_SWAP |
532#endif
533 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
534 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
535 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats)); 506 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
536 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats)); 507 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
537 dmae->dst_addr_lo = bp->func_stx >> 2; 508 dmae->dst_addr_lo = bp->func_stx >> 2;
@@ -571,7 +542,6 @@ static void bnx2x_stats_restart(struct bnx2x *bp)
571 542
572static void bnx2x_bmac_stats_update(struct bnx2x *bp) 543static void bnx2x_bmac_stats_update(struct bnx2x *bp)
573{ 544{
574 struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
575 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats); 545 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
576 struct bnx2x_eth_stats *estats = &bp->eth_stats; 546 struct bnx2x_eth_stats *estats = &bp->eth_stats;
577 struct { 547 struct {
@@ -579,35 +549,74 @@ static void bnx2x_bmac_stats_update(struct bnx2x *bp)
579 u32 hi; 549 u32 hi;
580 } diff; 550 } diff;
581 551
582 UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets); 552 if (CHIP_IS_E1x(bp)) {
583 UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors); 553 struct bmac1_stats *new = bnx2x_sp(bp, mac_stats.bmac1_stats);
584 UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts); 554
585 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong); 555 /* the macros below will use "bmac1_stats" type */
586 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments); 556 UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
587 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers); 557 UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
588 UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived); 558 UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
589 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered); 559 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
590 UPDATE_STAT64(rx_stat_grxpf, rx_stat_bmac_xpf); 560 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
591 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent); 561 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
592 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone); 562 UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
593 UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets); 563 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
594 UPDATE_STAT64(tx_stat_gt127, 564 UPDATE_STAT64(rx_stat_grxpf, rx_stat_bmac_xpf);
565 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
566 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
567 UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
568 UPDATE_STAT64(tx_stat_gt127,
569 tx_stat_etherstatspkts65octetsto127octets);
570 UPDATE_STAT64(tx_stat_gt255,
571 tx_stat_etherstatspkts128octetsto255octets);
572 UPDATE_STAT64(tx_stat_gt511,
573 tx_stat_etherstatspkts256octetsto511octets);
574 UPDATE_STAT64(tx_stat_gt1023,
575 tx_stat_etherstatspkts512octetsto1023octets);
576 UPDATE_STAT64(tx_stat_gt1518,
577 tx_stat_etherstatspkts1024octetsto1522octets);
578 UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
579 UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
580 UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
581 UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
582 UPDATE_STAT64(tx_stat_gterr,
583 tx_stat_dot3statsinternalmactransmiterrors);
584 UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
585
586 } else {
587 struct bmac2_stats *new = bnx2x_sp(bp, mac_stats.bmac2_stats);
588
589 /* the macros below will use "bmac2_stats" type */
590 UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
591 UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
592 UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
593 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
594 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
595 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
596 UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
597 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
598 UPDATE_STAT64(rx_stat_grxpf, rx_stat_bmac_xpf);
599 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
600 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
601 UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
602 UPDATE_STAT64(tx_stat_gt127,
595 tx_stat_etherstatspkts65octetsto127octets); 603 tx_stat_etherstatspkts65octetsto127octets);
596 UPDATE_STAT64(tx_stat_gt255, 604 UPDATE_STAT64(tx_stat_gt255,
597 tx_stat_etherstatspkts128octetsto255octets); 605 tx_stat_etherstatspkts128octetsto255octets);
598 UPDATE_STAT64(tx_stat_gt511, 606 UPDATE_STAT64(tx_stat_gt511,
599 tx_stat_etherstatspkts256octetsto511octets); 607 tx_stat_etherstatspkts256octetsto511octets);
600 UPDATE_STAT64(tx_stat_gt1023, 608 UPDATE_STAT64(tx_stat_gt1023,
601 tx_stat_etherstatspkts512octetsto1023octets); 609 tx_stat_etherstatspkts512octetsto1023octets);
602 UPDATE_STAT64(tx_stat_gt1518, 610 UPDATE_STAT64(tx_stat_gt1518,
603 tx_stat_etherstatspkts1024octetsto1522octets); 611 tx_stat_etherstatspkts1024octetsto1522octets);
604 UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047); 612 UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
605 UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095); 613 UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
606 UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216); 614 UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
607 UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383); 615 UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
608 UPDATE_STAT64(tx_stat_gterr, 616 UPDATE_STAT64(tx_stat_gterr,
609 tx_stat_dot3statsinternalmactransmiterrors); 617 tx_stat_dot3statsinternalmactransmiterrors);
610 UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl); 618 UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
619 }
611 620
612 estats->pause_frames_received_hi = 621 estats->pause_frames_received_hi =
613 pstats->mac_stx[1].rx_stat_bmac_xpf_hi; 622 pstats->mac_stx[1].rx_stat_bmac_xpf_hi;
@@ -1124,24 +1133,17 @@ static void bnx2x_port_stats_stop(struct bnx2x *bp)
1124 1133
1125 bp->executer_idx = 0; 1134 bp->executer_idx = 0;
1126 1135
1127 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC | 1136 opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_PCI, DMAE_DST_GRC, false, 0);
1128 DMAE_CMD_C_ENABLE |
1129 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
1130#ifdef __BIG_ENDIAN
1131 DMAE_CMD_ENDIANITY_B_DW_SWAP |
1132#else
1133 DMAE_CMD_ENDIANITY_DW_SWAP |
1134#endif
1135 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
1136 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
1137 1137
1138 if (bp->port.port_stx) { 1138 if (bp->port.port_stx) {
1139 1139
1140 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); 1140 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
1141 if (bp->func_stx) 1141 if (bp->func_stx)
1142 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC); 1142 dmae->opcode = bnx2x_dmae_opcode_add_comp(
1143 opcode, DMAE_COMP_GRC);
1143 else 1144 else
1144 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI); 1145 dmae->opcode = bnx2x_dmae_opcode_add_comp(
1146 opcode, DMAE_COMP_PCI);
1145 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats)); 1147 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
1146 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats)); 1148 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
1147 dmae->dst_addr_lo = bp->port.port_stx >> 2; 1149 dmae->dst_addr_lo = bp->port.port_stx >> 2;
@@ -1165,7 +1167,8 @@ static void bnx2x_port_stats_stop(struct bnx2x *bp)
1165 if (bp->func_stx) { 1167 if (bp->func_stx) {
1166 1168
1167 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); 1169 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
1168 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI); 1170 dmae->opcode =
1171 bnx2x_dmae_opcode_add_comp(opcode, DMAE_COMP_PCI);
1169 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats)); 1172 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
1170 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats)); 1173 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
1171 dmae->dst_addr_lo = bp->func_stx >> 2; 1174 dmae->dst_addr_lo = bp->func_stx >> 2;
@@ -1258,16 +1261,8 @@ static void bnx2x_port_stats_base_init(struct bnx2x *bp)
1258 bp->executer_idx = 0; 1261 bp->executer_idx = 0;
1259 1262
1260 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); 1263 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
1261 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC | 1264 dmae->opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_PCI, DMAE_DST_GRC,
1262 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE | 1265 true, DMAE_COMP_PCI);
1263 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
1264#ifdef __BIG_ENDIAN
1265 DMAE_CMD_ENDIANITY_B_DW_SWAP |
1266#else
1267 DMAE_CMD_ENDIANITY_DW_SWAP |
1268#endif
1269 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
1270 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
1271 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats)); 1266 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
1272 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats)); 1267 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
1273 dmae->dst_addr_lo = bp->port.port_stx >> 2; 1268 dmae->dst_addr_lo = bp->port.port_stx >> 2;
@@ -1284,9 +1279,7 @@ static void bnx2x_port_stats_base_init(struct bnx2x *bp)
1284 1279
1285static void bnx2x_func_stats_base_init(struct bnx2x *bp) 1280static void bnx2x_func_stats_base_init(struct bnx2x *bp)
1286{ 1281{
1287 int vn, vn_max = IS_E1HMF(bp) ? E1HVN_MAX : E1VN_MAX; 1282 int vn, vn_max = IS_MF(bp) ? E1HVN_MAX : E1VN_MAX;
1288 int port = BP_PORT(bp);
1289 int func;
1290 u32 func_stx; 1283 u32 func_stx;
1291 1284
1292 /* sanity */ 1285 /* sanity */
@@ -1299,9 +1292,9 @@ static void bnx2x_func_stats_base_init(struct bnx2x *bp)
1299 func_stx = bp->func_stx; 1292 func_stx = bp->func_stx;
1300 1293
1301 for (vn = VN_0; vn < vn_max; vn++) { 1294 for (vn = VN_0; vn < vn_max; vn++) {
1302 func = 2*vn + port; 1295 int mb_idx = !CHIP_IS_E2(bp) ? 2*vn + BP_PORT(bp) : vn;
1303 1296
1304 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param); 1297 bp->func_stx = SHMEM_RD(bp, func_mb[mb_idx].fw_mb_param);
1305 bnx2x_func_stats_init(bp); 1298 bnx2x_func_stats_init(bp);
1306 bnx2x_hw_stats_post(bp); 1299 bnx2x_hw_stats_post(bp);
1307 bnx2x_stats_comp(bp); 1300 bnx2x_stats_comp(bp);
@@ -1325,16 +1318,8 @@ static void bnx2x_func_stats_base_update(struct bnx2x *bp)
1325 bp->executer_idx = 0; 1318 bp->executer_idx = 0;
1326 memset(dmae, 0, sizeof(struct dmae_command)); 1319 memset(dmae, 0, sizeof(struct dmae_command));
1327 1320
1328 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI | 1321 dmae->opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_GRC, DMAE_DST_PCI,
1329 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE | 1322 true, DMAE_COMP_PCI);
1330 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
1331#ifdef __BIG_ENDIAN
1332 DMAE_CMD_ENDIANITY_B_DW_SWAP |
1333#else
1334 DMAE_CMD_ENDIANITY_DW_SWAP |
1335#endif
1336 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
1337 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
1338 dmae->src_addr_lo = bp->func_stx >> 2; 1323 dmae->src_addr_lo = bp->func_stx >> 2;
1339 dmae->src_addr_hi = 0; 1324 dmae->src_addr_hi = 0;
1340 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats_base)); 1325 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats_base));
@@ -1352,8 +1337,9 @@ static void bnx2x_func_stats_base_update(struct bnx2x *bp)
1352void bnx2x_stats_init(struct bnx2x *bp) 1337void bnx2x_stats_init(struct bnx2x *bp)
1353{ 1338{
1354 int port = BP_PORT(bp); 1339 int port = BP_PORT(bp);
1355 int func = BP_FUNC(bp); 1340 int mb_idx = BP_FW_MB_IDX(bp);
1356 int i; 1341 int i;
1342 struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
1357 1343
1358 bp->stats_pending = 0; 1344 bp->stats_pending = 0;
1359 bp->executer_idx = 0; 1345 bp->executer_idx = 0;
@@ -1362,7 +1348,7 @@ void bnx2x_stats_init(struct bnx2x *bp)
1362 /* port and func stats for management */ 1348 /* port and func stats for management */
1363 if (!BP_NOMCP(bp)) { 1349 if (!BP_NOMCP(bp)) {
1364 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx); 1350 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
1365 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param); 1351 bp->func_stx = SHMEM_RD(bp, func_mb[mb_idx].fw_mb_param);
1366 1352
1367 } else { 1353 } else {
1368 bp->port.port_stx = 0; 1354 bp->port.port_stx = 0;
@@ -1395,6 +1381,18 @@ void bnx2x_stats_init(struct bnx2x *bp)
1395 memset(&fp->eth_q_stats, 0, sizeof(struct bnx2x_eth_q_stats)); 1381 memset(&fp->eth_q_stats, 0, sizeof(struct bnx2x_eth_q_stats));
1396 } 1382 }
1397 1383
1384 for_each_queue(bp, i) {
1385 /* Set initial stats counter in the stats ramrod data to -1 */
1386 int cl_id = bp->fp[i].cl_id;
1387
1388 stats->xstorm_common.client_statistics[cl_id].
1389 stats_counter = 0xffff;
1390 stats->ustorm_common.client_statistics[cl_id].
1391 stats_counter = 0xffff;
1392 stats->tstorm_common.client_statistics[cl_id].
1393 stats_counter = 0xffff;
1394 }
1395
1398 memset(&bp->dev->stats, 0, sizeof(struct net_device_stats)); 1396 memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
1399 memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats)); 1397 memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
1400 1398
diff --git a/drivers/net/bnx2x/bnx2x_stats.h b/drivers/net/bnx2x/bnx2x_stats.h
index 38a4e908f4fb..afd15efa429a 100644
--- a/drivers/net/bnx2x/bnx2x_stats.h
+++ b/drivers/net/bnx2x/bnx2x_stats.h
@@ -9,6 +9,10 @@
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com> 9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir 10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver 11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath and fastpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
15 *
12 */ 16 */
13 17
14#ifndef BNX2X_STATS_H 18#ifndef BNX2X_STATS_H
@@ -228,12 +232,8 @@ struct bnx2x_eth_stats {
228/* Forward declaration */ 232/* Forward declaration */
229struct bnx2x; 233struct bnx2x;
230 234
231
232void bnx2x_stats_init(struct bnx2x *bp); 235void bnx2x_stats_init(struct bnx2x *bp);
233 236
234extern const u32 dmae_reg_go_c[]; 237extern const u32 dmae_reg_go_c[];
235extern int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
236 u32 data_hi, u32 data_lo, int common);
237
238 238
239#endif /* BNX2X_STATS_H */ 239#endif /* BNX2X_STATS_H */
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
index 822f586d72af..881914bc4e9c 100644
--- a/drivers/net/bonding/bond_3ad.c
+++ b/drivers/net/bonding/bond_3ad.c
@@ -93,7 +93,7 @@
93// compare MAC addresses 93// compare MAC addresses
94#define MAC_ADDRESS_COMPARE(A, B) memcmp(A, B, ETH_ALEN) 94#define MAC_ADDRESS_COMPARE(A, B) memcmp(A, B, ETH_ALEN)
95 95
96static struct mac_addr null_mac_addr = {{0, 0, 0, 0, 0, 0}}; 96static struct mac_addr null_mac_addr = { { 0, 0, 0, 0, 0, 0 } };
97static u16 ad_ticks_per_sec; 97static u16 ad_ticks_per_sec;
98static const int ad_delta_in_ticks = (AD_TIMER_INTERVAL * HZ) / 1000; 98static const int ad_delta_in_ticks = (AD_TIMER_INTERVAL * HZ) / 1000;
99 99
@@ -129,9 +129,8 @@ static void ad_marker_response_received(struct bond_marker *marker, struct port
129 */ 129 */
130static inline struct bonding *__get_bond_by_port(struct port *port) 130static inline struct bonding *__get_bond_by_port(struct port *port)
131{ 131{
132 if (port->slave == NULL) { 132 if (port->slave == NULL)
133 return NULL; 133 return NULL;
134 }
135 134
136 return bond_get_bond_by_slave(port->slave); 135 return bond_get_bond_by_slave(port->slave);
137} 136}
@@ -144,9 +143,8 @@ static inline struct bonding *__get_bond_by_port(struct port *port)
144 */ 143 */
145static inline struct port *__get_first_port(struct bonding *bond) 144static inline struct port *__get_first_port(struct bonding *bond)
146{ 145{
147 if (bond->slave_cnt == 0) { 146 if (bond->slave_cnt == 0)
148 return NULL; 147 return NULL;
149 }
150 148
151 return &(SLAVE_AD_INFO(bond->first_slave).port); 149 return &(SLAVE_AD_INFO(bond->first_slave).port);
152} 150}
@@ -164,9 +162,8 @@ static inline struct port *__get_next_port(struct port *port)
164 struct slave *slave = port->slave; 162 struct slave *slave = port->slave;
165 163
166 // If there's no bond for this port, or this is the last slave 164 // If there's no bond for this port, or this is the last slave
167 if ((bond == NULL) || (slave->next == bond->first_slave)) { 165 if ((bond == NULL) || (slave->next == bond->first_slave))
168 return NULL; 166 return NULL;
169 }
170 167
171 return &(SLAVE_AD_INFO(slave->next).port); 168 return &(SLAVE_AD_INFO(slave->next).port);
172} 169}
@@ -183,9 +180,8 @@ static inline struct aggregator *__get_first_agg(struct port *port)
183 struct bonding *bond = __get_bond_by_port(port); 180 struct bonding *bond = __get_bond_by_port(port);
184 181
185 // If there's no bond for this port, or bond has no slaves 182 // If there's no bond for this port, or bond has no slaves
186 if ((bond == NULL) || (bond->slave_cnt == 0)) { 183 if ((bond == NULL) || (bond->slave_cnt == 0))
187 return NULL; 184 return NULL;
188 }
189 185
190 return &(SLAVE_AD_INFO(bond->first_slave).aggregator); 186 return &(SLAVE_AD_INFO(bond->first_slave).aggregator);
191} 187}
@@ -203,9 +199,8 @@ static inline struct aggregator *__get_next_agg(struct aggregator *aggregator)
203 struct bonding *bond = bond_get_bond_by_slave(slave); 199 struct bonding *bond = bond_get_bond_by_slave(slave);
204 200
205 // If there's no bond for this aggregator, or this is the last slave 201 // If there's no bond for this aggregator, or this is the last slave
206 if ((bond == NULL) || (slave->next == bond->first_slave)) { 202 if ((bond == NULL) || (slave->next == bond->first_slave))
207 return NULL; 203 return NULL;
208 }
209 204
210 return &(SLAVE_AD_INFO(slave->next).aggregator); 205 return &(SLAVE_AD_INFO(slave->next).aggregator);
211} 206}
@@ -240,9 +235,8 @@ static inline void __enable_port(struct port *port)
240{ 235{
241 struct slave *slave = port->slave; 236 struct slave *slave = port->slave;
242 237
243 if ((slave->link == BOND_LINK_UP) && IS_UP(slave->dev)) { 238 if ((slave->link == BOND_LINK_UP) && IS_UP(slave->dev))
244 bond_set_slave_active_flags(slave); 239 bond_set_slave_active_flags(slave);
245 }
246} 240}
247 241
248/** 242/**
@@ -252,7 +246,7 @@ static inline void __enable_port(struct port *port)
252 */ 246 */
253static inline int __port_is_enabled(struct port *port) 247static inline int __port_is_enabled(struct port *port)
254{ 248{
255 return(port->slave->state == BOND_STATE_ACTIVE); 249 return port->slave->state == BOND_STATE_ACTIVE;
256} 250}
257 251
258/** 252/**
@@ -265,9 +259,8 @@ static inline u32 __get_agg_selection_mode(struct port *port)
265{ 259{
266 struct bonding *bond = __get_bond_by_port(port); 260 struct bonding *bond = __get_bond_by_port(port);
267 261
268 if (bond == NULL) { 262 if (bond == NULL)
269 return BOND_AD_STABLE; 263 return BOND_AD_STABLE;
270 }
271 264
272 return BOND_AD_INFO(bond).agg_select_mode; 265 return BOND_AD_INFO(bond).agg_select_mode;
273} 266}
@@ -281,9 +274,8 @@ static inline int __check_agg_selection_timer(struct port *port)
281{ 274{
282 struct bonding *bond = __get_bond_by_port(port); 275 struct bonding *bond = __get_bond_by_port(port);
283 276
284 if (bond == NULL) { 277 if (bond == NULL)
285 return 0; 278 return 0;
286 }
287 279
288 return BOND_AD_INFO(bond).agg_select_timer ? 1 : 0; 280 return BOND_AD_INFO(bond).agg_select_timer ? 1 : 0;
289} 281}
@@ -328,9 +320,9 @@ static u16 __get_link_speed(struct port *port)
328 * link down, it sets the speed to 0. 320 * link down, it sets the speed to 0.
329 * This is done in spite of the fact that the e100 driver reports 0 to be 321 * This is done in spite of the fact that the e100 driver reports 0 to be
330 * compatible with MVT in the future.*/ 322 * compatible with MVT in the future.*/
331 if (slave->link != BOND_LINK_UP) { 323 if (slave->link != BOND_LINK_UP)
332 speed=0; 324 speed = 0;
333 } else { 325 else {
334 switch (slave->speed) { 326 switch (slave->speed) {
335 case SPEED_10: 327 case SPEED_10:
336 speed = AD_LINK_SPEED_BITMASK_10MBPS; 328 speed = AD_LINK_SPEED_BITMASK_10MBPS;
@@ -375,18 +367,18 @@ static u8 __get_duplex(struct port *port)
375 367
376 // handling a special case: when the configuration starts with 368 // handling a special case: when the configuration starts with
377 // link down, it sets the duplex to 0. 369 // link down, it sets the duplex to 0.
378 if (slave->link != BOND_LINK_UP) { 370 if (slave->link != BOND_LINK_UP)
379 retval=0x0; 371 retval = 0x0;
380 } else { 372 else {
381 switch (slave->duplex) { 373 switch (slave->duplex) {
382 case DUPLEX_FULL: 374 case DUPLEX_FULL:
383 retval=0x1; 375 retval = 0x1;
384 pr_debug("Port %d Received status full duplex update from adapter\n", 376 pr_debug("Port %d Received status full duplex update from adapter\n",
385 port->actor_port_number); 377 port->actor_port_number);
386 break; 378 break;
387 case DUPLEX_HALF: 379 case DUPLEX_HALF:
388 default: 380 default:
389 retval=0x0; 381 retval = 0x0;
390 pr_debug("Port %d Received status NOT full duplex update from adapter\n", 382 pr_debug("Port %d Received status NOT full duplex update from adapter\n",
391 port->actor_port_number); 383 port->actor_port_number);
392 break; 384 break;
@@ -419,15 +411,14 @@ static inline void __initialize_port_locks(struct port *port)
419 */ 411 */
420static u16 __ad_timer_to_ticks(u16 timer_type, u16 par) 412static u16 __ad_timer_to_ticks(u16 timer_type, u16 par)
421{ 413{
422 u16 retval=0; //to silence the compiler 414 u16 retval = 0; /* to silence the compiler */
423 415
424 switch (timer_type) { 416 switch (timer_type) {
425 case AD_CURRENT_WHILE_TIMER: // for rx machine usage 417 case AD_CURRENT_WHILE_TIMER: // for rx machine usage
426 if (par) { // for short or long timeout 418 if (par)
427 retval = (AD_SHORT_TIMEOUT_TIME*ad_ticks_per_sec); // short timeout 419 retval = (AD_SHORT_TIMEOUT_TIME*ad_ticks_per_sec); // short timeout
428 } else { 420 else
429 retval = (AD_LONG_TIMEOUT_TIME*ad_ticks_per_sec); // long timeout 421 retval = (AD_LONG_TIMEOUT_TIME*ad_ticks_per_sec); // long timeout
430 }
431 break; 422 break;
432 case AD_ACTOR_CHURN_TIMER: // for local churn machine 423 case AD_ACTOR_CHURN_TIMER: // for local churn machine
433 retval = (AD_CHURN_DETECTION_TIME*ad_ticks_per_sec); 424 retval = (AD_CHURN_DETECTION_TIME*ad_ticks_per_sec);
@@ -519,11 +510,11 @@ static void __record_pdu(struct lacpdu *lacpdu, struct port *port)
519 port->actor_oper_port_state &= ~AD_STATE_DEFAULTED; 510 port->actor_oper_port_state &= ~AD_STATE_DEFAULTED;
520 511
521 // set the partner sync. to on if the partner is sync. and the port is matched 512 // set the partner sync. to on if the partner is sync. and the port is matched
522 if ((port->sm_vars & AD_PORT_MATCHED) && (lacpdu->actor_state & AD_STATE_SYNCHRONIZATION)) { 513 if ((port->sm_vars & AD_PORT_MATCHED)
514 && (lacpdu->actor_state & AD_STATE_SYNCHRONIZATION))
523 partner->port_state |= AD_STATE_SYNCHRONIZATION; 515 partner->port_state |= AD_STATE_SYNCHRONIZATION;
524 } else { 516 else
525 partner->port_state &= ~AD_STATE_SYNCHRONIZATION; 517 partner->port_state &= ~AD_STATE_SYNCHRONIZATION;
526 }
527 } 518 }
528} 519}
529 520
@@ -653,7 +644,7 @@ static void __update_ntt(struct lacpdu *lacpdu, struct port *port)
653 */ 644 */
654static void __attach_bond_to_agg(struct port *port) 645static void __attach_bond_to_agg(struct port *port)
655{ 646{
656 port=NULL; // just to satisfy the compiler 647 port = NULL; /* just to satisfy the compiler */
657 // This function does nothing since the parser/multiplexer of the receive 648 // This function does nothing since the parser/multiplexer of the receive
658 // and the parser/multiplexer of the aggregator are already combined 649 // and the parser/multiplexer of the aggregator are already combined
659} 650}
@@ -668,7 +659,7 @@ static void __attach_bond_to_agg(struct port *port)
668 */ 659 */
669static void __detach_bond_from_agg(struct port *port) 660static void __detach_bond_from_agg(struct port *port)
670{ 661{
671 port=NULL; // just to satisfy the compiler 662 port = NULL; /* just to satisfy the compiler */
672 // This function does nothing sience the parser/multiplexer of the receive 663 // This function does nothing sience the parser/multiplexer of the receive
673 // and the parser/multiplexer of the aggregator are already combined 664 // and the parser/multiplexer of the aggregator are already combined
674} 665}
@@ -685,7 +676,9 @@ static int __agg_ports_are_ready(struct aggregator *aggregator)
685 676
686 if (aggregator) { 677 if (aggregator) {
687 // scan all ports in this aggregator to verfy if they are all ready 678 // scan all ports in this aggregator to verfy if they are all ready
688 for (port=aggregator->lag_ports; port; port=port->next_port_in_aggregator) { 679 for (port = aggregator->lag_ports;
680 port;
681 port = port->next_port_in_aggregator) {
689 if (!(port->sm_vars & AD_PORT_READY_N)) { 682 if (!(port->sm_vars & AD_PORT_READY_N)) {
690 retval = 0; 683 retval = 0;
691 break; 684 break;
@@ -706,12 +699,12 @@ static void __set_agg_ports_ready(struct aggregator *aggregator, int val)
706{ 699{
707 struct port *port; 700 struct port *port;
708 701
709 for (port=aggregator->lag_ports; port; port=port->next_port_in_aggregator) { 702 for (port = aggregator->lag_ports; port;
710 if (val) { 703 port = port->next_port_in_aggregator) {
704 if (val)
711 port->sm_vars |= AD_PORT_READY; 705 port->sm_vars |= AD_PORT_READY;
712 } else { 706 else
713 port->sm_vars &= ~AD_PORT_READY; 707 port->sm_vars &= ~AD_PORT_READY;
714 }
715 } 708 }
716} 709}
717 710
@@ -722,7 +715,7 @@ static void __set_agg_ports_ready(struct aggregator *aggregator, int val)
722 */ 715 */
723static u32 __get_agg_bandwidth(struct aggregator *aggregator) 716static u32 __get_agg_bandwidth(struct aggregator *aggregator)
724{ 717{
725 u32 bandwidth=0; 718 u32 bandwidth = 0;
726 u32 basic_speed; 719 u32 basic_speed;
727 720
728 if (aggregator->num_of_ports) { 721 if (aggregator->num_of_ports) {
@@ -744,7 +737,7 @@ static u32 __get_agg_bandwidth(struct aggregator *aggregator)
744 bandwidth = aggregator->num_of_ports * 10000; 737 bandwidth = aggregator->num_of_ports * 10000;
745 break; 738 break;
746 default: 739 default:
747 bandwidth=0; // to silent the compilor .... 740 bandwidth = 0; /*to silence the compiler ....*/
748 } 741 }
749 } 742 }
750 return bandwidth; 743 return bandwidth;
@@ -835,9 +828,8 @@ static int ad_lacpdu_send(struct port *port)
835 int length = sizeof(struct lacpdu_header); 828 int length = sizeof(struct lacpdu_header);
836 829
837 skb = dev_alloc_skb(length); 830 skb = dev_alloc_skb(length);
838 if (!skb) { 831 if (!skb)
839 return -ENOMEM; 832 return -ENOMEM;
840 }
841 833
842 skb->dev = slave->dev; 834 skb->dev = slave->dev;
843 skb_reset_mac_header(skb); 835 skb_reset_mac_header(skb);
@@ -876,9 +868,8 @@ static int ad_marker_send(struct port *port, struct bond_marker *marker)
876 int length = sizeof(struct bond_marker_header); 868 int length = sizeof(struct bond_marker_header);
877 869
878 skb = dev_alloc_skb(length + 16); 870 skb = dev_alloc_skb(length + 16);
879 if (!skb) { 871 if (!skb)
880 return -ENOMEM; 872 return -ENOMEM;
881 }
882 873
883 skb_reserve(skb, 16); 874 skb_reserve(skb, 16);
884 875
@@ -919,9 +910,10 @@ static void ad_mux_machine(struct port *port)
919 } else { 910 } else {
920 switch (port->sm_mux_state) { 911 switch (port->sm_mux_state) {
921 case AD_MUX_DETACHED: 912 case AD_MUX_DETACHED:
922 if ((port->sm_vars & AD_PORT_SELECTED) || (port->sm_vars & AD_PORT_STANDBY)) { // if SELECTED or STANDBY 913 if ((port->sm_vars & AD_PORT_SELECTED)
914 || (port->sm_vars & AD_PORT_STANDBY))
915 /* if SELECTED or STANDBY */
923 port->sm_mux_state = AD_MUX_WAITING; // next state 916 port->sm_mux_state = AD_MUX_WAITING; // next state
924 }
925 break; 917 break;
926 case AD_MUX_WAITING: 918 case AD_MUX_WAITING:
927 // if SELECTED == FALSE return to DETACH state 919 // if SELECTED == FALSE return to DETACH state
@@ -935,18 +927,18 @@ static void ad_mux_machine(struct port *port)
935 } 927 }
936 928
937 // check if the wait_while_timer expired 929 // check if the wait_while_timer expired
938 if (port->sm_mux_timer_counter && !(--port->sm_mux_timer_counter)) { 930 if (port->sm_mux_timer_counter
931 && !(--port->sm_mux_timer_counter))
939 port->sm_vars |= AD_PORT_READY_N; 932 port->sm_vars |= AD_PORT_READY_N;
940 }
941 933
942 // in order to withhold the selection logic to check all ports READY_N value 934 // in order to withhold the selection logic to check all ports READY_N value
943 // every callback cycle to update ready variable, we check READY_N and update READY here 935 // every callback cycle to update ready variable, we check READY_N and update READY here
944 __set_agg_ports_ready(port->aggregator, __agg_ports_are_ready(port->aggregator)); 936 __set_agg_ports_ready(port->aggregator, __agg_ports_are_ready(port->aggregator));
945 937
946 // if the wait_while_timer expired, and the port is in READY state, move to ATTACHED state 938 // if the wait_while_timer expired, and the port is in READY state, move to ATTACHED state
947 if ((port->sm_vars & AD_PORT_READY) && !port->sm_mux_timer_counter) { 939 if ((port->sm_vars & AD_PORT_READY)
940 && !port->sm_mux_timer_counter)
948 port->sm_mux_state = AD_MUX_ATTACHED; // next state 941 port->sm_mux_state = AD_MUX_ATTACHED; // next state
949 }
950 break; 942 break;
951 case AD_MUX_ATTACHED: 943 case AD_MUX_ATTACHED:
952 // check also if agg_select_timer expired(so the edable port will take place only after this timer) 944 // check also if agg_select_timer expired(so the edable port will take place only after this timer)
@@ -1041,13 +1033,14 @@ static void ad_rx_machine(struct lacpdu *lacpdu, struct port *port)
1041 1033
1042 // check if state machine should change state 1034 // check if state machine should change state
1043 // first, check if port was reinitialized 1035 // first, check if port was reinitialized
1044 if (port->sm_vars & AD_PORT_BEGIN) { 1036 if (port->sm_vars & AD_PORT_BEGIN)
1045 port->sm_rx_state = AD_RX_INITIALIZE; // next state 1037 /* next state */
1046 } 1038 port->sm_rx_state = AD_RX_INITIALIZE;
1047 // check if port is not enabled 1039 // check if port is not enabled
1048 else if (!(port->sm_vars & AD_PORT_BEGIN) && !port->is_enabled && !(port->sm_vars & AD_PORT_MOVED)) { 1040 else if (!(port->sm_vars & AD_PORT_BEGIN)
1049 port->sm_rx_state = AD_RX_PORT_DISABLED; // next state 1041 && !port->is_enabled && !(port->sm_vars & AD_PORT_MOVED))
1050 } 1042 /* next state */
1043 port->sm_rx_state = AD_RX_PORT_DISABLED;
1051 // check if new lacpdu arrived 1044 // check if new lacpdu arrived
1052 else if (lacpdu && ((port->sm_rx_state == AD_RX_EXPIRED) || (port->sm_rx_state == AD_RX_DEFAULTED) || (port->sm_rx_state == AD_RX_CURRENT))) { 1045 else if (lacpdu && ((port->sm_rx_state == AD_RX_EXPIRED) || (port->sm_rx_state == AD_RX_DEFAULTED) || (port->sm_rx_state == AD_RX_CURRENT))) {
1053 port->sm_rx_timer_counter = 0; // zero timer 1046 port->sm_rx_timer_counter = 0; // zero timer
@@ -1069,13 +1062,16 @@ static void ad_rx_machine(struct lacpdu *lacpdu, struct port *port)
1069 // if no lacpdu arrived and no timer is on 1062 // if no lacpdu arrived and no timer is on
1070 switch (port->sm_rx_state) { 1063 switch (port->sm_rx_state) {
1071 case AD_RX_PORT_DISABLED: 1064 case AD_RX_PORT_DISABLED:
1072 if (port->sm_vars & AD_PORT_MOVED) { 1065 if (port->sm_vars & AD_PORT_MOVED)
1073 port->sm_rx_state = AD_RX_INITIALIZE; // next state 1066 port->sm_rx_state = AD_RX_INITIALIZE; // next state
1074 } else if (port->is_enabled && (port->sm_vars & AD_PORT_LACP_ENABLED)) { 1067 else if (port->is_enabled
1068 && (port->sm_vars
1069 & AD_PORT_LACP_ENABLED))
1075 port->sm_rx_state = AD_RX_EXPIRED; // next state 1070 port->sm_rx_state = AD_RX_EXPIRED; // next state
1076 } else if (port->is_enabled && ((port->sm_vars & AD_PORT_LACP_ENABLED) == 0)) { 1071 else if (port->is_enabled
1072 && ((port->sm_vars
1073 & AD_PORT_LACP_ENABLED) == 0))
1077 port->sm_rx_state = AD_RX_LACP_DISABLED; // next state 1074 port->sm_rx_state = AD_RX_LACP_DISABLED; // next state
1078 }
1079 break; 1075 break;
1080 default: //to silence the compiler 1076 default: //to silence the compiler
1081 break; 1077 break;
@@ -1091,11 +1087,10 @@ static void ad_rx_machine(struct lacpdu *lacpdu, struct port *port)
1091 port->sm_rx_state); 1087 port->sm_rx_state);
1092 switch (port->sm_rx_state) { 1088 switch (port->sm_rx_state) {
1093 case AD_RX_INITIALIZE: 1089 case AD_RX_INITIALIZE:
1094 if (!(port->actor_oper_port_key & AD_DUPLEX_KEY_BITS)) { 1090 if (!(port->actor_oper_port_key & AD_DUPLEX_KEY_BITS))
1095 port->sm_vars &= ~AD_PORT_LACP_ENABLED; 1091 port->sm_vars &= ~AD_PORT_LACP_ENABLED;
1096 } else { 1092 else
1097 port->sm_vars |= AD_PORT_LACP_ENABLED; 1093 port->sm_vars |= AD_PORT_LACP_ENABLED;
1098 }
1099 port->sm_vars &= ~AD_PORT_SELECTED; 1094 port->sm_vars &= ~AD_PORT_SELECTED;
1100 __record_default(port); 1095 __record_default(port);
1101 port->actor_oper_port_state &= ~AD_STATE_EXPIRED; 1096 port->actor_oper_port_state &= ~AD_STATE_EXPIRED;
@@ -1149,9 +1144,10 @@ static void ad_rx_machine(struct lacpdu *lacpdu, struct port *port)
1149 // verify that if the aggregator is enabled, the port is enabled too. 1144 // verify that if the aggregator is enabled, the port is enabled too.
1150 //(because if the link goes down for a short time, the 802.3ad will not 1145 //(because if the link goes down for a short time, the 802.3ad will not
1151 // catch it, and the port will continue to be disabled) 1146 // catch it, and the port will continue to be disabled)
1152 if (port->aggregator && port->aggregator->is_active && !__port_is_enabled(port)) { 1147 if (port->aggregator
1148 && port->aggregator->is_active
1149 && !__port_is_enabled(port))
1153 __enable_port(port); 1150 __enable_port(port);
1154 }
1155 break; 1151 break;
1156 default: //to silence the compiler 1152 default: //to silence the compiler
1157 break; 1153 break;
@@ -1183,7 +1179,8 @@ static void ad_tx_machine(struct port *port)
1183 } 1179 }
1184 } 1180 }
1185 // restart tx timer(to verify that we will not exceed AD_MAX_TX_IN_SECOND 1181 // restart tx timer(to verify that we will not exceed AD_MAX_TX_IN_SECOND
1186 port->sm_tx_timer_counter=ad_ticks_per_sec/AD_MAX_TX_IN_SECOND; 1182 port->sm_tx_timer_counter =
1183 ad_ticks_per_sec/AD_MAX_TX_IN_SECOND;
1187 } 1184 }
1188} 1185}
1189 1186
@@ -1216,9 +1213,9 @@ static void ad_periodic_machine(struct port *port)
1216 // If not expired, check if there is some new timeout parameter from the partner state 1213 // If not expired, check if there is some new timeout parameter from the partner state
1217 switch (port->sm_periodic_state) { 1214 switch (port->sm_periodic_state) {
1218 case AD_FAST_PERIODIC: 1215 case AD_FAST_PERIODIC:
1219 if (!(port->partner_oper.port_state & AD_STATE_LACP_TIMEOUT)) { 1216 if (!(port->partner_oper.port_state
1217 & AD_STATE_LACP_TIMEOUT))
1220 port->sm_periodic_state = AD_SLOW_PERIODIC; // next state 1218 port->sm_periodic_state = AD_SLOW_PERIODIC; // next state
1221 }
1222 break; 1219 break;
1223 case AD_SLOW_PERIODIC: 1220 case AD_SLOW_PERIODIC:
1224 if ((port->partner_oper.port_state & AD_STATE_LACP_TIMEOUT)) { 1221 if ((port->partner_oper.port_state & AD_STATE_LACP_TIMEOUT)) {
@@ -1237,11 +1234,11 @@ static void ad_periodic_machine(struct port *port)
1237 port->sm_periodic_state = AD_FAST_PERIODIC; // next state 1234 port->sm_periodic_state = AD_FAST_PERIODIC; // next state
1238 break; 1235 break;
1239 case AD_PERIODIC_TX: 1236 case AD_PERIODIC_TX:
1240 if (!(port->partner_oper.port_state & AD_STATE_LACP_TIMEOUT)) { 1237 if (!(port->partner_oper.port_state
1238 & AD_STATE_LACP_TIMEOUT))
1241 port->sm_periodic_state = AD_SLOW_PERIODIC; // next state 1239 port->sm_periodic_state = AD_SLOW_PERIODIC; // next state
1242 } else { 1240 else
1243 port->sm_periodic_state = AD_FAST_PERIODIC; // next state 1241 port->sm_periodic_state = AD_FAST_PERIODIC; // next state
1244 }
1245 break; 1242 break;
1246 default: //to silence the compiler 1243 default: //to silence the compiler
1247 break; 1244 break;
@@ -1287,35 +1284,37 @@ static void ad_port_selection_logic(struct port *port)
1287 int found = 0; 1284 int found = 0;
1288 1285
1289 // if the port is already Selected, do nothing 1286 // if the port is already Selected, do nothing
1290 if (port->sm_vars & AD_PORT_SELECTED) { 1287 if (port->sm_vars & AD_PORT_SELECTED)
1291 return; 1288 return;
1292 }
1293 1289
1294 // if the port is connected to other aggregator, detach it 1290 // if the port is connected to other aggregator, detach it
1295 if (port->aggregator) { 1291 if (port->aggregator) {
1296 // detach the port from its former aggregator 1292 // detach the port from its former aggregator
1297 temp_aggregator=port->aggregator; 1293 temp_aggregator = port->aggregator;
1298 for (curr_port=temp_aggregator->lag_ports; curr_port; last_port=curr_port, curr_port=curr_port->next_port_in_aggregator) { 1294 for (curr_port = temp_aggregator->lag_ports; curr_port;
1295 last_port = curr_port,
1296 curr_port = curr_port->next_port_in_aggregator) {
1299 if (curr_port == port) { 1297 if (curr_port == port) {
1300 temp_aggregator->num_of_ports--; 1298 temp_aggregator->num_of_ports--;
1301 if (!last_port) {// if it is the first port attached to the aggregator 1299 if (!last_port) {// if it is the first port attached to the aggregator
1302 temp_aggregator->lag_ports=port->next_port_in_aggregator; 1300 temp_aggregator->lag_ports =
1301 port->next_port_in_aggregator;
1303 } else {// not the first port attached to the aggregator 1302 } else {// not the first port attached to the aggregator
1304 last_port->next_port_in_aggregator=port->next_port_in_aggregator; 1303 last_port->next_port_in_aggregator =
1304 port->next_port_in_aggregator;
1305 } 1305 }
1306 1306
1307 // clear the port's relations to this aggregator 1307 // clear the port's relations to this aggregator
1308 port->aggregator = NULL; 1308 port->aggregator = NULL;
1309 port->next_port_in_aggregator=NULL; 1309 port->next_port_in_aggregator = NULL;
1310 port->actor_port_aggregator_identifier=0; 1310 port->actor_port_aggregator_identifier = 0;
1311 1311
1312 pr_debug("Port %d left LAG %d\n", 1312 pr_debug("Port %d left LAG %d\n",
1313 port->actor_port_number, 1313 port->actor_port_number,
1314 temp_aggregator->aggregator_identifier); 1314 temp_aggregator->aggregator_identifier);
1315 // if the aggregator is empty, clear its parameters, and set it ready to be attached 1315 // if the aggregator is empty, clear its parameters, and set it ready to be attached
1316 if (!temp_aggregator->lag_ports) { 1316 if (!temp_aggregator->lag_ports)
1317 ad_clear_agg(temp_aggregator); 1317 ad_clear_agg(temp_aggregator);
1318 }
1319 break; 1318 break;
1320 } 1319 }
1321 } 1320 }
@@ -1333,9 +1332,8 @@ static void ad_port_selection_logic(struct port *port)
1333 1332
1334 // keep a free aggregator for later use(if needed) 1333 // keep a free aggregator for later use(if needed)
1335 if (!aggregator->lag_ports) { 1334 if (!aggregator->lag_ports) {
1336 if (!free_aggregator) { 1335 if (!free_aggregator)
1337 free_aggregator=aggregator; 1336 free_aggregator = aggregator;
1338 }
1339 continue; 1337 continue;
1340 } 1338 }
1341 // check if current aggregator suits us 1339 // check if current aggregator suits us
@@ -1350,10 +1348,11 @@ static void ad_port_selection_logic(struct port *port)
1350 ) { 1348 ) {
1351 // attach to the founded aggregator 1349 // attach to the founded aggregator
1352 port->aggregator = aggregator; 1350 port->aggregator = aggregator;
1353 port->actor_port_aggregator_identifier=port->aggregator->aggregator_identifier; 1351 port->actor_port_aggregator_identifier =
1354 port->next_port_in_aggregator=aggregator->lag_ports; 1352 port->aggregator->aggregator_identifier;
1353 port->next_port_in_aggregator = aggregator->lag_ports;
1355 port->aggregator->num_of_ports++; 1354 port->aggregator->num_of_ports++;
1356 aggregator->lag_ports=port; 1355 aggregator->lag_ports = port;
1357 pr_debug("Port %d joined LAG %d(existing LAG)\n", 1356 pr_debug("Port %d joined LAG %d(existing LAG)\n",
1358 port->actor_port_number, 1357 port->actor_port_number,
1359 port->aggregator->aggregator_identifier); 1358 port->aggregator->aggregator_identifier);
@@ -1370,20 +1369,23 @@ static void ad_port_selection_logic(struct port *port)
1370 if (free_aggregator) { 1369 if (free_aggregator) {
1371 // assign port a new aggregator 1370 // assign port a new aggregator
1372 port->aggregator = free_aggregator; 1371 port->aggregator = free_aggregator;
1373 port->actor_port_aggregator_identifier=port->aggregator->aggregator_identifier; 1372 port->actor_port_aggregator_identifier =
1373 port->aggregator->aggregator_identifier;
1374 1374
1375 // update the new aggregator's parameters 1375 // update the new aggregator's parameters
1376 // if port was responsed from the end-user 1376 // if port was responsed from the end-user
1377 if (port->actor_oper_port_key & AD_DUPLEX_KEY_BITS) {// if port is full duplex 1377 if (port->actor_oper_port_key & AD_DUPLEX_KEY_BITS)
1378 /* if port is full duplex */
1378 port->aggregator->is_individual = false; 1379 port->aggregator->is_individual = false;
1379 } else { 1380 else
1380 port->aggregator->is_individual = true; 1381 port->aggregator->is_individual = true;
1381 }
1382 1382
1383 port->aggregator->actor_admin_aggregator_key = port->actor_admin_port_key; 1383 port->aggregator->actor_admin_aggregator_key = port->actor_admin_port_key;
1384 port->aggregator->actor_oper_aggregator_key = port->actor_oper_port_key; 1384 port->aggregator->actor_oper_aggregator_key = port->actor_oper_port_key;
1385 port->aggregator->partner_system=port->partner_oper.system; 1385 port->aggregator->partner_system =
1386 port->aggregator->partner_system_priority = port->partner_oper.system_priority; 1386 port->partner_oper.system;
1387 port->aggregator->partner_system_priority =
1388 port->partner_oper.system_priority;
1387 port->aggregator->partner_oper_aggregator_key = port->partner_oper.key; 1389 port->aggregator->partner_oper_aggregator_key = port->partner_oper.key;
1388 port->aggregator->receive_state = 1; 1390 port->aggregator->receive_state = 1;
1389 port->aggregator->transmit_state = 1; 1391 port->aggregator->transmit_state = 1;
@@ -1704,9 +1706,8 @@ static void ad_initialize_port(struct port *port, int lacp_fast)
1704 port->actor_admin_port_state = AD_STATE_AGGREGATION | AD_STATE_LACP_ACTIVITY; 1706 port->actor_admin_port_state = AD_STATE_AGGREGATION | AD_STATE_LACP_ACTIVITY;
1705 port->actor_oper_port_state = AD_STATE_AGGREGATION | AD_STATE_LACP_ACTIVITY; 1707 port->actor_oper_port_state = AD_STATE_AGGREGATION | AD_STATE_LACP_ACTIVITY;
1706 1708
1707 if (lacp_fast) { 1709 if (lacp_fast)
1708 port->actor_oper_port_state |= AD_STATE_LACP_TIMEOUT; 1710 port->actor_oper_port_state |= AD_STATE_LACP_TIMEOUT;
1709 }
1710 1711
1711 memcpy(&port->partner_admin, &tmpl, sizeof(tmpl)); 1712 memcpy(&port->partner_admin, &tmpl, sizeof(tmpl));
1712 memcpy(&port->partner_oper, &tmpl, sizeof(tmpl)); 1713 memcpy(&port->partner_oper, &tmpl, sizeof(tmpl));
@@ -1785,13 +1786,16 @@ static void ad_marker_info_send(struct port *port)
1785 marker.requester_port = (((port->actor_port_number & 0xFF) << 8) |((u16)(port->actor_port_number & 0xFF00) >> 8)); 1786 marker.requester_port = (((port->actor_port_number & 0xFF) << 8) |((u16)(port->actor_port_number & 0xFF00) >> 8));
1786 marker.requester_system = port->actor_system; 1787 marker.requester_system = port->actor_system;
1787 // convert requester_port(u32) to Big Endian 1788 // convert requester_port(u32) to Big Endian
1788 marker.requester_transaction_id = (((++port->transaction_id & 0xFF) << 24) |((port->transaction_id & 0xFF00) << 8) |((port->transaction_id & 0xFF0000) >> 8) |((port->transaction_id & 0xFF000000) >> 24)); 1789 marker.requester_transaction_id =
1790 (((++port->transaction_id & 0xFF) << 24)
1791 | ((port->transaction_id & 0xFF00) << 8)
1792 | ((port->transaction_id & 0xFF0000) >> 8)
1793 | ((port->transaction_id & 0xFF000000) >> 24));
1789 marker.pad = 0; 1794 marker.pad = 0;
1790 marker.tlv_type_terminator = 0x00; 1795 marker.tlv_type_terminator = 0x00;
1791 marker.terminator_length = 0x00; 1796 marker.terminator_length = 0x00;
1792 for (index=0; index<90; index++) { 1797 for (index = 0; index < 90; index++)
1793 marker.reserved_90[index]=0; 1798 marker.reserved_90[index] = 0;
1794 }
1795 1799
1796 // send the marker information 1800 // send the marker information
1797 if (ad_marker_send(port, &marker) >= 0) { 1801 if (ad_marker_send(port, &marker) >= 0) {
@@ -1816,7 +1820,7 @@ static void ad_marker_info_received(struct bond_marker *marker_info,
1816 //marker = *marker_info; 1820 //marker = *marker_info;
1817 memcpy(&marker, marker_info, sizeof(struct bond_marker)); 1821 memcpy(&marker, marker_info, sizeof(struct bond_marker));
1818 // change the marker subtype to marker response 1822 // change the marker subtype to marker response
1819 marker.tlv_type=AD_MARKER_RESPONSE_SUBTYPE; 1823 marker.tlv_type = AD_MARKER_RESPONSE_SUBTYPE;
1820 // send the marker response 1824 // send the marker response
1821 1825
1822 if (ad_marker_send(port, &marker) >= 0) { 1826 if (ad_marker_send(port, &marker) >= 0) {
@@ -1837,8 +1841,8 @@ static void ad_marker_info_received(struct bond_marker *marker_info,
1837static void ad_marker_response_received(struct bond_marker *marker, 1841static void ad_marker_response_received(struct bond_marker *marker,
1838 struct port *port) 1842 struct port *port)
1839{ 1843{
1840 marker=NULL; // just to satisfy the compiler 1844 marker = NULL; /* just to satisfy the compiler */
1841 port=NULL; // just to satisfy the compiler 1845 port = NULL; /* just to satisfy the compiler */
1842 // DO NOTHING, SINCE WE DECIDED NOT TO IMPLEMENT THIS FEATURE FOR NOW 1846 // DO NOTHING, SINCE WE DECIDED NOT TO IMPLEMENT THIS FEATURE FOR NOW
1843} 1847}
1844 1848
@@ -1932,9 +1936,8 @@ int bond_3ad_bind_slave(struct slave *slave)
1932 port->actor_admin_port_key |= (__get_link_speed(port) << 1); 1936 port->actor_admin_port_key |= (__get_link_speed(port) << 1);
1933 port->actor_oper_port_key = port->actor_admin_port_key; 1937 port->actor_oper_port_key = port->actor_admin_port_key;
1934 // if the port is not full duplex, then the port should be not lacp Enabled 1938 // if the port is not full duplex, then the port should be not lacp Enabled
1935 if (!(port->actor_oper_port_key & AD_DUPLEX_KEY_BITS)) { 1939 if (!(port->actor_oper_port_key & AD_DUPLEX_KEY_BITS))
1936 port->sm_vars &= ~AD_PORT_LACP_ENABLED; 1940 port->sm_vars &= ~AD_PORT_LACP_ENABLED;
1937 }
1938 // actor system is the bond's system 1941 // actor system is the bond's system
1939 port->actor_system = BOND_AD_INFO(bond).system.sys_mac_addr; 1942 port->actor_system = BOND_AD_INFO(bond).system.sys_mac_addr;
1940 // tx timer(to verify that no more than MAX_TX_IN_SECOND lacpdu's are sent in one second) 1943 // tx timer(to verify that no more than MAX_TX_IN_SECOND lacpdu's are sent in one second)
@@ -2006,9 +2009,10 @@ void bond_3ad_unbind_slave(struct slave *slave)
2006 new_aggregator = __get_first_agg(port); 2009 new_aggregator = __get_first_agg(port);
2007 for (; new_aggregator; new_aggregator = __get_next_agg(new_aggregator)) { 2010 for (; new_aggregator; new_aggregator = __get_next_agg(new_aggregator)) {
2008 // if the new aggregator is empty, or it is connected to our port only 2011 // if the new aggregator is empty, or it is connected to our port only
2009 if (!new_aggregator->lag_ports || ((new_aggregator->lag_ports == port) && !new_aggregator->lag_ports->next_port_in_aggregator)) { 2012 if (!new_aggregator->lag_ports
2013 || ((new_aggregator->lag_ports == port)
2014 && !new_aggregator->lag_ports->next_port_in_aggregator))
2010 break; 2015 break;
2011 }
2012 } 2016 }
2013 // if new aggregator found, copy the aggregator's parameters 2017 // if new aggregator found, copy the aggregator's parameters
2014 // and connect the related lag_ports to the new aggregator 2018 // and connect the related lag_ports to the new aggregator
@@ -2037,17 +2041,17 @@ void bond_3ad_unbind_slave(struct slave *slave)
2037 new_aggregator->num_of_ports = aggregator->num_of_ports; 2041 new_aggregator->num_of_ports = aggregator->num_of_ports;
2038 2042
2039 // update the information that is written on the ports about the aggregator 2043 // update the information that is written on the ports about the aggregator
2040 for (temp_port=aggregator->lag_ports; temp_port; temp_port=temp_port->next_port_in_aggregator) { 2044 for (temp_port = aggregator->lag_ports; temp_port;
2041 temp_port->aggregator=new_aggregator; 2045 temp_port = temp_port->next_port_in_aggregator) {
2046 temp_port->aggregator = new_aggregator;
2042 temp_port->actor_port_aggregator_identifier = new_aggregator->aggregator_identifier; 2047 temp_port->actor_port_aggregator_identifier = new_aggregator->aggregator_identifier;
2043 } 2048 }
2044 2049
2045 // clear the aggregator 2050 // clear the aggregator
2046 ad_clear_agg(aggregator); 2051 ad_clear_agg(aggregator);
2047 2052
2048 if (select_new_active_agg) { 2053 if (select_new_active_agg)
2049 ad_agg_selection_logic(__get_first_agg(port)); 2054 ad_agg_selection_logic(__get_first_agg(port));
2050 }
2051 } else { 2055 } else {
2052 pr_warning("%s: Warning: unbinding aggregator, and could not find a new aggregator for its ports\n", 2056 pr_warning("%s: Warning: unbinding aggregator, and could not find a new aggregator for its ports\n",
2053 slave->dev->master->name); 2057 slave->dev->master->name);
@@ -2071,15 +2075,16 @@ void bond_3ad_unbind_slave(struct slave *slave)
2071 for (; temp_aggregator; temp_aggregator = __get_next_agg(temp_aggregator)) { 2075 for (; temp_aggregator; temp_aggregator = __get_next_agg(temp_aggregator)) {
2072 prev_port = NULL; 2076 prev_port = NULL;
2073 // search the port in the aggregator's related ports 2077 // search the port in the aggregator's related ports
2074 for (temp_port=temp_aggregator->lag_ports; temp_port; prev_port=temp_port, temp_port=temp_port->next_port_in_aggregator) { 2078 for (temp_port = temp_aggregator->lag_ports; temp_port;
2079 prev_port = temp_port,
2080 temp_port = temp_port->next_port_in_aggregator) {
2075 if (temp_port == port) { // the aggregator found - detach the port from this aggregator 2081 if (temp_port == port) { // the aggregator found - detach the port from this aggregator
2076 if (prev_port) { 2082 if (prev_port)
2077 prev_port->next_port_in_aggregator = temp_port->next_port_in_aggregator; 2083 prev_port->next_port_in_aggregator = temp_port->next_port_in_aggregator;
2078 } else { 2084 else
2079 temp_aggregator->lag_ports = temp_port->next_port_in_aggregator; 2085 temp_aggregator->lag_ports = temp_port->next_port_in_aggregator;
2080 }
2081 temp_aggregator->num_of_ports--; 2086 temp_aggregator->num_of_ports--;
2082 if (temp_aggregator->num_of_ports==0) { 2087 if (temp_aggregator->num_of_ports == 0) {
2083 select_new_active_agg = temp_aggregator->is_active; 2088 select_new_active_agg = temp_aggregator->is_active;
2084 // clear the aggregator 2089 // clear the aggregator
2085 ad_clear_agg(temp_aggregator); 2090 ad_clear_agg(temp_aggregator);
@@ -2094,7 +2099,7 @@ void bond_3ad_unbind_slave(struct slave *slave)
2094 } 2099 }
2095 } 2100 }
2096 } 2101 }
2097 port->slave=NULL; 2102 port->slave = NULL;
2098} 2103}
2099 2104
2100/** 2105/**
@@ -2119,14 +2124,12 @@ void bond_3ad_state_machine_handler(struct work_struct *work)
2119 2124
2120 read_lock(&bond->lock); 2125 read_lock(&bond->lock);
2121 2126
2122 if (bond->kill_timers) { 2127 if (bond->kill_timers)
2123 goto out; 2128 goto out;
2124 }
2125 2129
2126 //check if there are any slaves 2130 //check if there are any slaves
2127 if (bond->slave_cnt == 0) { 2131 if (bond->slave_cnt == 0)
2128 goto re_arm; 2132 goto re_arm;
2129 }
2130 2133
2131 // check if agg_select_timer timer after initialize is timed out 2134 // check if agg_select_timer timer after initialize is timed out
2132 if (BOND_AD_INFO(bond).agg_select_timer && !(--BOND_AD_INFO(bond).agg_select_timer)) { 2135 if (BOND_AD_INFO(bond).agg_select_timer && !(--BOND_AD_INFO(bond).agg_select_timer)) {
@@ -2159,9 +2162,8 @@ void bond_3ad_state_machine_handler(struct work_struct *work)
2159 ad_tx_machine(port); 2162 ad_tx_machine(port);
2160 2163
2161 // turn off the BEGIN bit, since we already handled it 2164 // turn off the BEGIN bit, since we already handled it
2162 if (port->sm_vars & AD_PORT_BEGIN) { 2165 if (port->sm_vars & AD_PORT_BEGIN)
2163 port->sm_vars &= ~AD_PORT_BEGIN; 2166 port->sm_vars &= ~AD_PORT_BEGIN;
2164 }
2165 } 2167 }
2166 2168
2167re_arm: 2169re_arm:
@@ -2245,7 +2247,8 @@ void bond_3ad_adapter_speed_changed(struct slave *slave)
2245 } 2247 }
2246 2248
2247 port->actor_admin_port_key &= ~AD_SPEED_KEY_BITS; 2249 port->actor_admin_port_key &= ~AD_SPEED_KEY_BITS;
2248 port->actor_oper_port_key=port->actor_admin_port_key |= (__get_link_speed(port) << 1); 2250 port->actor_oper_port_key = port->actor_admin_port_key |=
2251 (__get_link_speed(port) << 1);
2249 pr_debug("Port %d changed speed\n", port->actor_port_number); 2252 pr_debug("Port %d changed speed\n", port->actor_port_number);
2250 // there is no need to reselect a new aggregator, just signal the 2253 // there is no need to reselect a new aggregator, just signal the
2251 // state machines to reinitialize 2254 // state machines to reinitialize
@@ -2262,7 +2265,7 @@ void bond_3ad_adapter_duplex_changed(struct slave *slave)
2262{ 2265{
2263 struct port *port; 2266 struct port *port;
2264 2267
2265 port=&(SLAVE_AD_INFO(slave).port); 2268 port = &(SLAVE_AD_INFO(slave).port);
2266 2269
2267 // if slave is null, the whole port is not initialized 2270 // if slave is null, the whole port is not initialized
2268 if (!port->slave) { 2271 if (!port->slave) {
@@ -2272,7 +2275,8 @@ void bond_3ad_adapter_duplex_changed(struct slave *slave)
2272 } 2275 }
2273 2276
2274 port->actor_admin_port_key &= ~AD_DUPLEX_KEY_BITS; 2277 port->actor_admin_port_key &= ~AD_DUPLEX_KEY_BITS;
2275 port->actor_oper_port_key=port->actor_admin_port_key |= __get_duplex(port); 2278 port->actor_oper_port_key = port->actor_admin_port_key |=
2279 __get_duplex(port);
2276 pr_debug("Port %d changed duplex\n", port->actor_port_number); 2280 pr_debug("Port %d changed duplex\n", port->actor_port_number);
2277 // there is no need to reselect a new aggregator, just signal the 2281 // there is no need to reselect a new aggregator, just signal the
2278 // state machines to reinitialize 2282 // state machines to reinitialize
@@ -2304,14 +2308,17 @@ void bond_3ad_handle_link_change(struct slave *slave, char link)
2304 if (link == BOND_LINK_UP) { 2308 if (link == BOND_LINK_UP) {
2305 port->is_enabled = true; 2309 port->is_enabled = true;
2306 port->actor_admin_port_key &= ~AD_DUPLEX_KEY_BITS; 2310 port->actor_admin_port_key &= ~AD_DUPLEX_KEY_BITS;
2307 port->actor_oper_port_key=port->actor_admin_port_key |= __get_duplex(port); 2311 port->actor_oper_port_key = port->actor_admin_port_key |=
2312 __get_duplex(port);
2308 port->actor_admin_port_key &= ~AD_SPEED_KEY_BITS; 2313 port->actor_admin_port_key &= ~AD_SPEED_KEY_BITS;
2309 port->actor_oper_port_key=port->actor_admin_port_key |= (__get_link_speed(port) << 1); 2314 port->actor_oper_port_key = port->actor_admin_port_key |=
2315 (__get_link_speed(port) << 1);
2310 } else { 2316 } else {
2311 /* link has failed */ 2317 /* link has failed */
2312 port->is_enabled = false; 2318 port->is_enabled = false;
2313 port->actor_admin_port_key &= ~AD_DUPLEX_KEY_BITS; 2319 port->actor_admin_port_key &= ~AD_DUPLEX_KEY_BITS;
2314 port->actor_oper_port_key= (port->actor_admin_port_key &= ~AD_SPEED_KEY_BITS); 2320 port->actor_oper_port_key = (port->actor_admin_port_key &=
2321 ~AD_SPEED_KEY_BITS);
2315 } 2322 }
2316 //BOND_PRINT_DBG(("Port %d changed link status to %s", port->actor_port_number, ((link == BOND_LINK_UP)?"UP":"DOWN"))); 2323 //BOND_PRINT_DBG(("Port %d changed link status to %s", port->actor_port_number, ((link == BOND_LINK_UP)?"UP":"DOWN")));
2317 // there is no need to reselect a new aggregator, just signal the 2324 // there is no need to reselect a new aggregator, just signal the
@@ -2394,9 +2401,8 @@ int bond_3ad_xmit_xor(struct sk_buff *skb, struct net_device *dev)
2394 */ 2401 */
2395 read_lock(&bond->lock); 2402 read_lock(&bond->lock);
2396 2403
2397 if (!BOND_IS_OK(bond)) { 2404 if (!BOND_IS_OK(bond))
2398 goto out; 2405 goto out;
2399 }
2400 2406
2401 if (bond_3ad_get_active_agg_info(bond, &ad_info)) { 2407 if (bond_3ad_get_active_agg_info(bond, &ad_info)) {
2402 pr_debug("%s: Error: bond_3ad_get_active_agg_info failed\n", 2408 pr_debug("%s: Error: bond_3ad_get_active_agg_info failed\n",
@@ -2420,9 +2426,8 @@ int bond_3ad_xmit_xor(struct sk_buff *skb, struct net_device *dev)
2420 2426
2421 if (agg && (agg->aggregator_identifier == agg_id)) { 2427 if (agg && (agg->aggregator_identifier == agg_id)) {
2422 slave_agg_no--; 2428 slave_agg_no--;
2423 if (slave_agg_no < 0) { 2429 if (slave_agg_no < 0)
2424 break; 2430 break;
2425 }
2426 } 2431 }
2427 } 2432 }
2428 2433
@@ -2438,9 +2443,8 @@ int bond_3ad_xmit_xor(struct sk_buff *skb, struct net_device *dev)
2438 int slave_agg_id = 0; 2443 int slave_agg_id = 0;
2439 struct aggregator *agg = SLAVE_AD_INFO(slave).port.aggregator; 2444 struct aggregator *agg = SLAVE_AD_INFO(slave).port.aggregator;
2440 2445
2441 if (agg) { 2446 if (agg)
2442 slave_agg_id = agg->aggregator_identifier; 2447 slave_agg_id = agg->aggregator_identifier;
2443 }
2444 2448
2445 if (SLAVE_IS_OK(slave) && agg && (slave_agg_id == agg_id)) { 2449 if (SLAVE_IS_OK(slave) && agg && (slave_agg_id == agg_id)) {
2446 res = bond_dev_queue_xmit(bond, skb, slave->dev); 2450 res = bond_dev_queue_xmit(bond, skb, slave->dev);
@@ -2466,6 +2470,9 @@ int bond_3ad_lacpdu_recv(struct sk_buff *skb, struct net_device *dev, struct pac
2466 if (!(dev->flags & IFF_MASTER)) 2470 if (!(dev->flags & IFF_MASTER))
2467 goto out; 2471 goto out;
2468 2472
2473 if (!pskb_may_pull(skb, sizeof(struct lacpdu)))
2474 goto out;
2475
2469 read_lock(&bond->lock); 2476 read_lock(&bond->lock);
2470 slave = bond_get_slave_by_dev((struct bonding *)netdev_priv(dev), 2477 slave = bond_get_slave_by_dev((struct bonding *)netdev_priv(dev),
2471 orig_dev); 2478 orig_dev);
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
index c746b331771d..26bb118c4533 100644
--- a/drivers/net/bonding/bond_alb.c
+++ b/drivers/net/bonding/bond_alb.c
@@ -362,6 +362,9 @@ static int rlb_arp_recv(struct sk_buff *skb, struct net_device *bond_dev, struct
362 goto out; 362 goto out;
363 } 363 }
364 364
365 if (!pskb_may_pull(skb, arp_hdr_len(bond_dev)))
366 goto out;
367
365 if (skb->len < sizeof(struct arp_pkt)) { 368 if (skb->len < sizeof(struct arp_pkt)) {
366 pr_debug("Packet is too small to be an ARP\n"); 369 pr_debug("Packet is too small to be an ARP\n");
367 goto out; 370 goto out;
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 3b16f62d5606..beb3b7cecd52 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -76,6 +76,7 @@
76#include <linux/if_vlan.h> 76#include <linux/if_vlan.h>
77#include <linux/if_bonding.h> 77#include <linux/if_bonding.h>
78#include <linux/jiffies.h> 78#include <linux/jiffies.h>
79#include <linux/preempt.h>
79#include <net/route.h> 80#include <net/route.h>
80#include <net/net_namespace.h> 81#include <net/net_namespace.h>
81#include <net/netns/generic.h> 82#include <net/netns/generic.h>
@@ -109,6 +110,7 @@ static char *arp_validate;
109static char *fail_over_mac; 110static char *fail_over_mac;
110static int all_slaves_active = 0; 111static int all_slaves_active = 0;
111static struct bond_params bonding_defaults; 112static struct bond_params bonding_defaults;
113static int resend_igmp = BOND_DEFAULT_RESEND_IGMP;
112 114
113module_param(max_bonds, int, 0); 115module_param(max_bonds, int, 0);
114MODULE_PARM_DESC(max_bonds, "Max number of bonded devices"); 116MODULE_PARM_DESC(max_bonds, "Max number of bonded devices");
@@ -163,9 +165,15 @@ module_param(all_slaves_active, int, 0);
163MODULE_PARM_DESC(all_slaves_active, "Keep all frames received on an interface" 165MODULE_PARM_DESC(all_slaves_active, "Keep all frames received on an interface"
164 "by setting active flag for all slaves. " 166 "by setting active flag for all slaves. "
165 "0 for never (default), 1 for always."); 167 "0 for never (default), 1 for always.");
168module_param(resend_igmp, int, 0);
169MODULE_PARM_DESC(resend_igmp, "Number of IGMP membership reports to send on link failure");
166 170
167/*----------------------------- Global variables ----------------------------*/ 171/*----------------------------- Global variables ----------------------------*/
168 172
173#ifdef CONFIG_NET_POLL_CONTROLLER
174cpumask_var_t netpoll_block_tx;
175#endif
176
169static const char * const version = 177static const char * const version =
170 DRV_DESCRIPTION ": v" DRV_VERSION " (" DRV_RELDATE ")\n"; 178 DRV_DESCRIPTION ": v" DRV_VERSION " (" DRV_RELDATE ")\n";
171 179
@@ -176,9 +184,6 @@ static int arp_ip_count;
176static int bond_mode = BOND_MODE_ROUNDROBIN; 184static int bond_mode = BOND_MODE_ROUNDROBIN;
177static int xmit_hashtype = BOND_XMIT_POLICY_LAYER2; 185static int xmit_hashtype = BOND_XMIT_POLICY_LAYER2;
178static int lacp_fast; 186static int lacp_fast;
179#ifdef CONFIG_NET_POLL_CONTROLLER
180static int disable_netpoll = 1;
181#endif
182 187
183const struct bond_parm_tbl bond_lacp_tbl[] = { 188const struct bond_parm_tbl bond_lacp_tbl[] = {
184{ "slow", AD_LACP_SLOW}, 189{ "slow", AD_LACP_SLOW},
@@ -307,6 +312,7 @@ static int bond_del_vlan(struct bonding *bond, unsigned short vlan_id)
307 312
308 pr_debug("bond: %s, vlan id %d\n", bond->dev->name, vlan_id); 313 pr_debug("bond: %s, vlan id %d\n", bond->dev->name, vlan_id);
309 314
315 block_netpoll_tx();
310 write_lock_bh(&bond->lock); 316 write_lock_bh(&bond->lock);
311 317
312 list_for_each_entry(vlan, &bond->vlan_list, vlan_list) { 318 list_for_each_entry(vlan, &bond->vlan_list, vlan_list) {
@@ -341,6 +347,7 @@ static int bond_del_vlan(struct bonding *bond, unsigned short vlan_id)
341 347
342out: 348out:
343 write_unlock_bh(&bond->lock); 349 write_unlock_bh(&bond->lock);
350 unblock_netpoll_tx();
344 return res; 351 return res;
345} 352}
346 353
@@ -446,11 +453,9 @@ int bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb,
446 if (unlikely(bond->dev->priv_flags & IFF_IN_NETPOLL)) { 453 if (unlikely(bond->dev->priv_flags & IFF_IN_NETPOLL)) {
447 struct netpoll *np = bond->dev->npinfo->netpoll; 454 struct netpoll *np = bond->dev->npinfo->netpoll;
448 slave_dev->npinfo = bond->dev->npinfo; 455 slave_dev->npinfo = bond->dev->npinfo;
449 np->real_dev = np->dev = skb->dev;
450 slave_dev->priv_flags |= IFF_IN_NETPOLL; 456 slave_dev->priv_flags |= IFF_IN_NETPOLL;
451 netpoll_send_skb(np, skb); 457 netpoll_send_skb_on_dev(np, skb, slave_dev);
452 slave_dev->priv_flags &= ~IFF_IN_NETPOLL; 458 slave_dev->priv_flags &= ~IFF_IN_NETPOLL;
453 np->dev = bond->dev;
454 } else 459 } else
455#endif 460#endif
456 dev_queue_xmit(skb); 461 dev_queue_xmit(skb);
@@ -865,18 +870,13 @@ static void bond_mc_del(struct bonding *bond, void *addr)
865} 870}
866 871
867 872
868/* 873static void __bond_resend_igmp_join_requests(struct net_device *dev)
869 * Retrieve the list of registered multicast addresses for the bonding
870 * device and retransmit an IGMP JOIN request to the current active
871 * slave.
872 */
873static void bond_resend_igmp_join_requests(struct bonding *bond)
874{ 874{
875 struct in_device *in_dev; 875 struct in_device *in_dev;
876 struct ip_mc_list *im; 876 struct ip_mc_list *im;
877 877
878 rcu_read_lock(); 878 rcu_read_lock();
879 in_dev = __in_dev_get_rcu(bond->dev); 879 in_dev = __in_dev_get_rcu(dev);
880 if (in_dev) { 880 if (in_dev) {
881 for (im = in_dev->mc_list; im; im = im->next) 881 for (im = in_dev->mc_list; im; im = im->next)
882 ip_mc_rejoin_group(im); 882 ip_mc_rejoin_group(im);
@@ -886,6 +886,44 @@ static void bond_resend_igmp_join_requests(struct bonding *bond)
886} 886}
887 887
888/* 888/*
889 * Retrieve the list of registered multicast addresses for the bonding
890 * device and retransmit an IGMP JOIN request to the current active
891 * slave.
892 */
893static void bond_resend_igmp_join_requests(struct bonding *bond)
894{
895 struct net_device *vlan_dev;
896 struct vlan_entry *vlan;
897
898 read_lock(&bond->lock);
899
900 /* rejoin all groups on bond device */
901 __bond_resend_igmp_join_requests(bond->dev);
902
903 /* rejoin all groups on vlan devices */
904 if (bond->vlgrp) {
905 list_for_each_entry(vlan, &bond->vlan_list, vlan_list) {
906 vlan_dev = vlan_group_get_device(bond->vlgrp,
907 vlan->vlan_id);
908 if (vlan_dev)
909 __bond_resend_igmp_join_requests(vlan_dev);
910 }
911 }
912
913 if (--bond->igmp_retrans > 0)
914 queue_delayed_work(bond->wq, &bond->mcast_work, HZ/5);
915
916 read_unlock(&bond->lock);
917}
918
919static void bond_resend_igmp_join_requests_delayed(struct work_struct *work)
920{
921 struct bonding *bond = container_of(work, struct bonding,
922 mcast_work.work);
923 bond_resend_igmp_join_requests(bond);
924}
925
926/*
889 * flush all members of flush->mc_list from device dev->mc_list 927 * flush all members of flush->mc_list from device dev->mc_list
890 */ 928 */
891static void bond_mc_list_flush(struct net_device *bond_dev, 929static void bond_mc_list_flush(struct net_device *bond_dev,
@@ -944,7 +982,6 @@ static void bond_mc_swap(struct bonding *bond, struct slave *new_active,
944 982
945 netdev_for_each_mc_addr(ha, bond->dev) 983 netdev_for_each_mc_addr(ha, bond->dev)
946 dev_mc_add(new_active->dev, ha->addr); 984 dev_mc_add(new_active->dev, ha->addr);
947 bond_resend_igmp_join_requests(bond);
948 } 985 }
949} 986}
950 987
@@ -1180,9 +1217,12 @@ void bond_change_active_slave(struct bonding *bond, struct slave *new_active)
1180 } 1217 }
1181 } 1218 }
1182 1219
1183 /* resend IGMP joins since all were sent on curr_active_slave */ 1220 /* resend IGMP joins since active slave has changed or
1184 if (bond->params.mode == BOND_MODE_ROUNDROBIN) { 1221 * all were sent on curr_active_slave */
1185 bond_resend_igmp_join_requests(bond); 1222 if ((USES_PRIMARY(bond->params.mode) && new_active) ||
1223 bond->params.mode == BOND_MODE_ROUNDROBIN) {
1224 bond->igmp_retrans = bond->params.resend_igmp;
1225 queue_delayed_work(bond->wq, &bond->mcast_work, 0);
1186 } 1226 }
1187} 1227}
1188 1228
@@ -1294,9 +1334,14 @@ static bool slaves_support_netpoll(struct net_device *bond_dev)
1294 1334
1295static void bond_poll_controller(struct net_device *bond_dev) 1335static void bond_poll_controller(struct net_device *bond_dev)
1296{ 1336{
1297 struct net_device *dev = bond_dev->npinfo->netpoll->real_dev; 1337 struct bonding *bond = netdev_priv(bond_dev);
1298 if (dev != bond_dev) 1338 struct slave *slave;
1299 netpoll_poll_dev(dev); 1339 int i;
1340
1341 bond_for_each_slave(bond, slave, i) {
1342 if (slave->dev && IS_UP(slave->dev))
1343 netpoll_poll_dev(slave->dev);
1344 }
1300} 1345}
1301 1346
1302static void bond_netpoll_cleanup(struct net_device *bond_dev) 1347static void bond_netpoll_cleanup(struct net_device *bond_dev)
@@ -1763,23 +1808,15 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
1763 bond_set_carrier(bond); 1808 bond_set_carrier(bond);
1764 1809
1765#ifdef CONFIG_NET_POLL_CONTROLLER 1810#ifdef CONFIG_NET_POLL_CONTROLLER
1766 /* 1811 if (slaves_support_netpoll(bond_dev)) {
1767 * Netpoll and bonding is broken, make sure it is not initialized 1812 bond_dev->priv_flags &= ~IFF_DISABLE_NETPOLL;
1768 * until it is fixed. 1813 if (bond_dev->npinfo)
1769 */ 1814 slave_dev->npinfo = bond_dev->npinfo;
1770 if (disable_netpoll) { 1815 } else if (!(bond_dev->priv_flags & IFF_DISABLE_NETPOLL)) {
1771 bond_dev->priv_flags |= IFF_DISABLE_NETPOLL; 1816 bond_dev->priv_flags |= IFF_DISABLE_NETPOLL;
1772 } else { 1817 pr_info("New slave device %s does not support netpoll\n",
1773 if (slaves_support_netpoll(bond_dev)) { 1818 slave_dev->name);
1774 bond_dev->priv_flags &= ~IFF_DISABLE_NETPOLL; 1819 pr_info("Disabling netpoll support for %s\n", bond_dev->name);
1775 if (bond_dev->npinfo)
1776 slave_dev->npinfo = bond_dev->npinfo;
1777 } else if (!(bond_dev->priv_flags & IFF_DISABLE_NETPOLL)) {
1778 bond_dev->priv_flags |= IFF_DISABLE_NETPOLL;
1779 pr_info("New slave device %s does not support netpoll\n",
1780 slave_dev->name);
1781 pr_info("Disabling netpoll support for %s\n", bond_dev->name);
1782 }
1783 } 1820 }
1784#endif 1821#endif
1785 read_unlock(&bond->lock); 1822 read_unlock(&bond->lock);
@@ -1851,6 +1888,7 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev)
1851 return -EINVAL; 1888 return -EINVAL;
1852 } 1889 }
1853 1890
1891 block_netpoll_tx();
1854 netdev_bonding_change(bond_dev, NETDEV_BONDING_DESLAVE); 1892 netdev_bonding_change(bond_dev, NETDEV_BONDING_DESLAVE);
1855 write_lock_bh(&bond->lock); 1893 write_lock_bh(&bond->lock);
1856 1894
@@ -1860,6 +1898,7 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev)
1860 pr_info("%s: %s not enslaved\n", 1898 pr_info("%s: %s not enslaved\n",
1861 bond_dev->name, slave_dev->name); 1899 bond_dev->name, slave_dev->name);
1862 write_unlock_bh(&bond->lock); 1900 write_unlock_bh(&bond->lock);
1901 unblock_netpoll_tx();
1863 return -EINVAL; 1902 return -EINVAL;
1864 } 1903 }
1865 1904
@@ -1953,6 +1992,7 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev)
1953 } 1992 }
1954 1993
1955 write_unlock_bh(&bond->lock); 1994 write_unlock_bh(&bond->lock);
1995 unblock_netpoll_tx();
1956 1996
1957 /* must do this from outside any spinlocks */ 1997 /* must do this from outside any spinlocks */
1958 bond_destroy_slave_symlinks(bond_dev, slave_dev); 1998 bond_destroy_slave_symlinks(bond_dev, slave_dev);
@@ -1983,10 +2023,8 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev)
1983#ifdef CONFIG_NET_POLL_CONTROLLER 2023#ifdef CONFIG_NET_POLL_CONTROLLER
1984 read_lock_bh(&bond->lock); 2024 read_lock_bh(&bond->lock);
1985 2025
1986 /* Make sure netpoll over stays disabled until fixed. */ 2026 if (slaves_support_netpoll(bond_dev))
1987 if (!disable_netpoll) 2027 bond_dev->priv_flags &= ~IFF_DISABLE_NETPOLL;
1988 if (slaves_support_netpoll(bond_dev))
1989 bond_dev->priv_flags &= ~IFF_DISABLE_NETPOLL;
1990 read_unlock_bh(&bond->lock); 2028 read_unlock_bh(&bond->lock);
1991 if (slave_dev->netdev_ops->ndo_netpoll_cleanup) 2029 if (slave_dev->netdev_ops->ndo_netpoll_cleanup)
1992 slave_dev->netdev_ops->ndo_netpoll_cleanup(slave_dev); 2030 slave_dev->netdev_ops->ndo_netpoll_cleanup(slave_dev);
@@ -2019,8 +2057,8 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev)
2019* First release a slave and than destroy the bond if no more slaves are left. 2057* First release a slave and than destroy the bond if no more slaves are left.
2020* Must be under rtnl_lock when this function is called. 2058* Must be under rtnl_lock when this function is called.
2021*/ 2059*/
2022int bond_release_and_destroy(struct net_device *bond_dev, 2060static int bond_release_and_destroy(struct net_device *bond_dev,
2023 struct net_device *slave_dev) 2061 struct net_device *slave_dev)
2024{ 2062{
2025 struct bonding *bond = netdev_priv(bond_dev); 2063 struct bonding *bond = netdev_priv(bond_dev);
2026 int ret; 2064 int ret;
@@ -2142,7 +2180,6 @@ static int bond_release_all(struct net_device *bond_dev)
2142 2180
2143out: 2181out:
2144 write_unlock_bh(&bond->lock); 2182 write_unlock_bh(&bond->lock);
2145
2146 return 0; 2183 return 0;
2147} 2184}
2148 2185
@@ -2191,9 +2228,11 @@ static int bond_ioctl_change_active(struct net_device *bond_dev, struct net_devi
2191 (old_active) && 2228 (old_active) &&
2192 (new_active->link == BOND_LINK_UP) && 2229 (new_active->link == BOND_LINK_UP) &&
2193 IS_UP(new_active->dev)) { 2230 IS_UP(new_active->dev)) {
2231 block_netpoll_tx();
2194 write_lock_bh(&bond->curr_slave_lock); 2232 write_lock_bh(&bond->curr_slave_lock);
2195 bond_change_active_slave(bond, new_active); 2233 bond_change_active_slave(bond, new_active);
2196 write_unlock_bh(&bond->curr_slave_lock); 2234 write_unlock_bh(&bond->curr_slave_lock);
2235 unblock_netpoll_tx();
2197 } else 2236 } else
2198 res = -EINVAL; 2237 res = -EINVAL;
2199 2238
@@ -2368,8 +2407,11 @@ static void bond_miimon_commit(struct bonding *bond)
2368 slave->state = BOND_STATE_BACKUP; 2407 slave->state = BOND_STATE_BACKUP;
2369 } 2408 }
2370 2409
2371 pr_info("%s: link status definitely up for interface %s.\n", 2410 bond_update_speed_duplex(slave);
2372 bond->dev->name, slave->dev->name); 2411
2412 pr_info("%s: link status definitely up for interface %s, %d Mbps %s duplex.\n",
2413 bond->dev->name, slave->dev->name,
2414 slave->speed, slave->duplex ? "full" : "half");
2373 2415
2374 /* notify ad that the link status has changed */ 2416 /* notify ad that the link status has changed */
2375 if (bond->params.mode == BOND_MODE_8023AD) 2417 if (bond->params.mode == BOND_MODE_8023AD)
@@ -2422,9 +2464,11 @@ static void bond_miimon_commit(struct bonding *bond)
2422 2464
2423do_failover: 2465do_failover:
2424 ASSERT_RTNL(); 2466 ASSERT_RTNL();
2467 block_netpoll_tx();
2425 write_lock_bh(&bond->curr_slave_lock); 2468 write_lock_bh(&bond->curr_slave_lock);
2426 bond_select_active_slave(bond); 2469 bond_select_active_slave(bond);
2427 write_unlock_bh(&bond->curr_slave_lock); 2470 write_unlock_bh(&bond->curr_slave_lock);
2471 unblock_netpoll_tx();
2428 } 2472 }
2429 2473
2430 bond_set_carrier(bond); 2474 bond_set_carrier(bond);
@@ -2867,11 +2911,13 @@ void bond_loadbalance_arp_mon(struct work_struct *work)
2867 } 2911 }
2868 2912
2869 if (do_failover) { 2913 if (do_failover) {
2914 block_netpoll_tx();
2870 write_lock_bh(&bond->curr_slave_lock); 2915 write_lock_bh(&bond->curr_slave_lock);
2871 2916
2872 bond_select_active_slave(bond); 2917 bond_select_active_slave(bond);
2873 2918
2874 write_unlock_bh(&bond->curr_slave_lock); 2919 write_unlock_bh(&bond->curr_slave_lock);
2920 unblock_netpoll_tx();
2875 } 2921 }
2876 2922
2877re_arm: 2923re_arm:
@@ -3030,9 +3076,11 @@ static void bond_ab_arp_commit(struct bonding *bond, int delta_in_ticks)
3030 3076
3031do_failover: 3077do_failover:
3032 ASSERT_RTNL(); 3078 ASSERT_RTNL();
3079 block_netpoll_tx();
3033 write_lock_bh(&bond->curr_slave_lock); 3080 write_lock_bh(&bond->curr_slave_lock);
3034 bond_select_active_slave(bond); 3081 bond_select_active_slave(bond);
3035 write_unlock_bh(&bond->curr_slave_lock); 3082 write_unlock_bh(&bond->curr_slave_lock);
3083 unblock_netpoll_tx();
3036 } 3084 }
3037 3085
3038 bond_set_carrier(bond); 3086 bond_set_carrier(bond);
@@ -3312,6 +3360,8 @@ static void bond_info_show_slave(struct seq_file *seq,
3312 seq_printf(seq, "\nSlave Interface: %s\n", slave->dev->name); 3360 seq_printf(seq, "\nSlave Interface: %s\n", slave->dev->name);
3313 seq_printf(seq, "MII Status: %s\n", 3361 seq_printf(seq, "MII Status: %s\n",
3314 (slave->link == BOND_LINK_UP) ? "up" : "down"); 3362 (slave->link == BOND_LINK_UP) ? "up" : "down");
3363 seq_printf(seq, "Speed: %d Mbps\n", slave->speed);
3364 seq_printf(seq, "Duplex: %s\n", slave->duplex ? "full" : "half");
3315 seq_printf(seq, "Link Failure Count: %u\n", 3365 seq_printf(seq, "Link Failure Count: %u\n",
3316 slave->link_failure_count); 3366 slave->link_failure_count);
3317 3367
@@ -3744,6 +3794,8 @@ static int bond_open(struct net_device *bond_dev)
3744 3794
3745 bond->kill_timers = 0; 3795 bond->kill_timers = 0;
3746 3796
3797 INIT_DELAYED_WORK(&bond->mcast_work, bond_resend_igmp_join_requests_delayed);
3798
3747 if (bond_is_lb(bond)) { 3799 if (bond_is_lb(bond)) {
3748 /* bond_alb_initialize must be called before the timer 3800 /* bond_alb_initialize must be called before the timer
3749 * is started. 3801 * is started.
@@ -3828,6 +3880,8 @@ static int bond_close(struct net_device *bond_dev)
3828 break; 3880 break;
3829 } 3881 }
3830 3882
3883 if (delayed_work_pending(&bond->mcast_work))
3884 cancel_delayed_work(&bond->mcast_work);
3831 3885
3832 if (bond_is_lb(bond)) { 3886 if (bond_is_lb(bond)) {
3833 /* Must be called only after all 3887 /* Must be called only after all
@@ -4514,6 +4568,13 @@ static netdev_tx_t bond_start_xmit(struct sk_buff *skb, struct net_device *dev)
4514{ 4568{
4515 struct bonding *bond = netdev_priv(dev); 4569 struct bonding *bond = netdev_priv(dev);
4516 4570
4571 /*
4572 * If we risk deadlock from transmitting this in the
4573 * netpoll path, tell netpoll to queue the frame for later tx
4574 */
4575 if (is_netpoll_tx_blocked(dev))
4576 return NETDEV_TX_BUSY;
4577
4517 if (TX_QUEUE_OVERRIDE(bond->params.mode)) { 4578 if (TX_QUEUE_OVERRIDE(bond->params.mode)) {
4518 if (!bond_slave_override(bond, skb)) 4579 if (!bond_slave_override(bond, skb))
4519 return NETDEV_TX_OK; 4580 return NETDEV_TX_OK;
@@ -4678,6 +4739,10 @@ static void bond_setup(struct net_device *bond_dev)
4678 NETIF_F_HW_VLAN_RX | 4739 NETIF_F_HW_VLAN_RX |
4679 NETIF_F_HW_VLAN_FILTER); 4740 NETIF_F_HW_VLAN_FILTER);
4680 4741
4742 /* By default, we enable GRO on bonding devices.
4743 * Actual support requires lowlevel drivers are GRO ready.
4744 */
4745 bond_dev->features |= NETIF_F_GRO;
4681} 4746}
4682 4747
4683static void bond_work_cancel_all(struct bonding *bond) 4748static void bond_work_cancel_all(struct bonding *bond)
@@ -4699,6 +4764,9 @@ static void bond_work_cancel_all(struct bonding *bond)
4699 if (bond->params.mode == BOND_MODE_8023AD && 4764 if (bond->params.mode == BOND_MODE_8023AD &&
4700 delayed_work_pending(&bond->ad_work)) 4765 delayed_work_pending(&bond->ad_work))
4701 cancel_delayed_work(&bond->ad_work); 4766 cancel_delayed_work(&bond->ad_work);
4767
4768 if (delayed_work_pending(&bond->mcast_work))
4769 cancel_delayed_work(&bond->mcast_work);
4702} 4770}
4703 4771
4704/* 4772/*
@@ -4891,6 +4959,13 @@ static int bond_check_params(struct bond_params *params)
4891 all_slaves_active = 0; 4959 all_slaves_active = 0;
4892 } 4960 }
4893 4961
4962 if (resend_igmp < 0 || resend_igmp > 255) {
4963 pr_warning("Warning: resend_igmp (%d) should be between "
4964 "0 and 255, resetting to %d\n",
4965 resend_igmp, BOND_DEFAULT_RESEND_IGMP);
4966 resend_igmp = BOND_DEFAULT_RESEND_IGMP;
4967 }
4968
4894 /* reset values for TLB/ALB */ 4969 /* reset values for TLB/ALB */
4895 if ((bond_mode == BOND_MODE_TLB) || 4970 if ((bond_mode == BOND_MODE_TLB) ||
4896 (bond_mode == BOND_MODE_ALB)) { 4971 (bond_mode == BOND_MODE_ALB)) {
@@ -5063,6 +5138,7 @@ static int bond_check_params(struct bond_params *params)
5063 params->fail_over_mac = fail_over_mac_value; 5138 params->fail_over_mac = fail_over_mac_value;
5064 params->tx_queues = tx_queues; 5139 params->tx_queues = tx_queues;
5065 params->all_slaves_active = all_slaves_active; 5140 params->all_slaves_active = all_slaves_active;
5141 params->resend_igmp = resend_igmp;
5066 5142
5067 if (primary) { 5143 if (primary) {
5068 strncpy(params->primary, primary, IFNAMSIZ); 5144 strncpy(params->primary, primary, IFNAMSIZ);
@@ -5164,6 +5240,15 @@ int bond_create(struct net *net, const char *name)
5164 res = dev_alloc_name(bond_dev, "bond%d"); 5240 res = dev_alloc_name(bond_dev, "bond%d");
5165 if (res < 0) 5241 if (res < 0)
5166 goto out; 5242 goto out;
5243 } else {
5244 /*
5245 * If we're given a name to register
5246 * we need to ensure that its not already
5247 * registered
5248 */
5249 res = -EEXIST;
5250 if (__dev_get_by_name(net, name) != NULL)
5251 goto out;
5167 } 5252 }
5168 5253
5169 res = register_netdevice(bond_dev); 5254 res = register_netdevice(bond_dev);
@@ -5212,6 +5297,13 @@ static int __init bonding_init(void)
5212 if (res) 5297 if (res)
5213 goto out; 5298 goto out;
5214 5299
5300#ifdef CONFIG_NET_POLL_CONTROLLER
5301 if (!alloc_cpumask_var(&netpoll_block_tx, GFP_KERNEL)) {
5302 res = -ENOMEM;
5303 goto out;
5304 }
5305#endif
5306
5215 res = register_pernet_subsys(&bond_net_ops); 5307 res = register_pernet_subsys(&bond_net_ops);
5216 if (res) 5308 if (res)
5217 goto out; 5309 goto out;
@@ -5230,6 +5322,7 @@ static int __init bonding_init(void)
5230 if (res) 5322 if (res)
5231 goto err; 5323 goto err;
5232 5324
5325
5233 register_netdevice_notifier(&bond_netdev_notifier); 5326 register_netdevice_notifier(&bond_netdev_notifier);
5234 register_inetaddr_notifier(&bond_inetaddr_notifier); 5327 register_inetaddr_notifier(&bond_inetaddr_notifier);
5235 bond_register_ipv6_notifier(); 5328 bond_register_ipv6_notifier();
@@ -5239,6 +5332,9 @@ err:
5239 rtnl_link_unregister(&bond_link_ops); 5332 rtnl_link_unregister(&bond_link_ops);
5240err_link: 5333err_link:
5241 unregister_pernet_subsys(&bond_net_ops); 5334 unregister_pernet_subsys(&bond_net_ops);
5335#ifdef CONFIG_NET_POLL_CONTROLLER
5336 free_cpumask_var(netpoll_block_tx);
5337#endif
5242 goto out; 5338 goto out;
5243 5339
5244} 5340}
@@ -5253,6 +5349,10 @@ static void __exit bonding_exit(void)
5253 5349
5254 rtnl_link_unregister(&bond_link_ops); 5350 rtnl_link_unregister(&bond_link_ops);
5255 unregister_pernet_subsys(&bond_net_ops); 5351 unregister_pernet_subsys(&bond_net_ops);
5352
5353#ifdef CONFIG_NET_POLL_CONTROLLER
5354 free_cpumask_var(netpoll_block_tx);
5355#endif
5256} 5356}
5257 5357
5258module_init(bonding_init); 5358module_init(bonding_init);
diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c
index c311aed9bd02..8fd0174c5380 100644
--- a/drivers/net/bonding/bond_sysfs.c
+++ b/drivers/net/bonding/bond_sysfs.c
@@ -1066,6 +1066,7 @@ static ssize_t bonding_store_primary(struct device *d,
1066 1066
1067 if (!rtnl_trylock()) 1067 if (!rtnl_trylock())
1068 return restart_syscall(); 1068 return restart_syscall();
1069 block_netpoll_tx();
1069 read_lock(&bond->lock); 1070 read_lock(&bond->lock);
1070 write_lock_bh(&bond->curr_slave_lock); 1071 write_lock_bh(&bond->curr_slave_lock);
1071 1072
@@ -1101,6 +1102,7 @@ static ssize_t bonding_store_primary(struct device *d,
1101out: 1102out:
1102 write_unlock_bh(&bond->curr_slave_lock); 1103 write_unlock_bh(&bond->curr_slave_lock);
1103 read_unlock(&bond->lock); 1104 read_unlock(&bond->lock);
1105 unblock_netpoll_tx();
1104 rtnl_unlock(); 1106 rtnl_unlock();
1105 1107
1106 return count; 1108 return count;
@@ -1146,11 +1148,13 @@ static ssize_t bonding_store_primary_reselect(struct device *d,
1146 bond->dev->name, pri_reselect_tbl[new_value].modename, 1148 bond->dev->name, pri_reselect_tbl[new_value].modename,
1147 new_value); 1149 new_value);
1148 1150
1151 block_netpoll_tx();
1149 read_lock(&bond->lock); 1152 read_lock(&bond->lock);
1150 write_lock_bh(&bond->curr_slave_lock); 1153 write_lock_bh(&bond->curr_slave_lock);
1151 bond_select_active_slave(bond); 1154 bond_select_active_slave(bond);
1152 write_unlock_bh(&bond->curr_slave_lock); 1155 write_unlock_bh(&bond->curr_slave_lock);
1153 read_unlock(&bond->lock); 1156 read_unlock(&bond->lock);
1157 unblock_netpoll_tx();
1154out: 1158out:
1155 rtnl_unlock(); 1159 rtnl_unlock();
1156 return ret; 1160 return ret;
@@ -1232,6 +1236,8 @@ static ssize_t bonding_store_active_slave(struct device *d,
1232 1236
1233 if (!rtnl_trylock()) 1237 if (!rtnl_trylock())
1234 return restart_syscall(); 1238 return restart_syscall();
1239
1240 block_netpoll_tx();
1235 read_lock(&bond->lock); 1241 read_lock(&bond->lock);
1236 write_lock_bh(&bond->curr_slave_lock); 1242 write_lock_bh(&bond->curr_slave_lock);
1237 1243
@@ -1288,6 +1294,8 @@ static ssize_t bonding_store_active_slave(struct device *d,
1288 out: 1294 out:
1289 write_unlock_bh(&bond->curr_slave_lock); 1295 write_unlock_bh(&bond->curr_slave_lock);
1290 read_unlock(&bond->lock); 1296 read_unlock(&bond->lock);
1297 unblock_netpoll_tx();
1298
1291 rtnl_unlock(); 1299 rtnl_unlock();
1292 1300
1293 return count; 1301 return count;
@@ -1592,6 +1600,49 @@ out:
1592static DEVICE_ATTR(all_slaves_active, S_IRUGO | S_IWUSR, 1600static DEVICE_ATTR(all_slaves_active, S_IRUGO | S_IWUSR,
1593 bonding_show_slaves_active, bonding_store_slaves_active); 1601 bonding_show_slaves_active, bonding_store_slaves_active);
1594 1602
1603/*
1604 * Show and set the number of IGMP membership reports to send on link failure
1605 */
1606static ssize_t bonding_show_resend_igmp(struct device *d,
1607 struct device_attribute *attr,
1608 char *buf)
1609{
1610 struct bonding *bond = to_bond(d);
1611
1612 return sprintf(buf, "%d\n", bond->params.resend_igmp);
1613}
1614
1615static ssize_t bonding_store_resend_igmp(struct device *d,
1616 struct device_attribute *attr,
1617 const char *buf, size_t count)
1618{
1619 int new_value, ret = count;
1620 struct bonding *bond = to_bond(d);
1621
1622 if (sscanf(buf, "%d", &new_value) != 1) {
1623 pr_err("%s: no resend_igmp value specified.\n",
1624 bond->dev->name);
1625 ret = -EINVAL;
1626 goto out;
1627 }
1628
1629 if (new_value < 0) {
1630 pr_err("%s: Invalid resend_igmp value %d not in range 0-255; rejected.\n",
1631 bond->dev->name, new_value);
1632 ret = -EINVAL;
1633 goto out;
1634 }
1635
1636 pr_info("%s: Setting resend_igmp to %d.\n",
1637 bond->dev->name, new_value);
1638 bond->params.resend_igmp = new_value;
1639out:
1640 return ret;
1641}
1642
1643static DEVICE_ATTR(resend_igmp, S_IRUGO | S_IWUSR,
1644 bonding_show_resend_igmp, bonding_store_resend_igmp);
1645
1595static struct attribute *per_bond_attrs[] = { 1646static struct attribute *per_bond_attrs[] = {
1596 &dev_attr_slaves.attr, 1647 &dev_attr_slaves.attr,
1597 &dev_attr_mode.attr, 1648 &dev_attr_mode.attr,
@@ -1619,6 +1670,7 @@ static struct attribute *per_bond_attrs[] = {
1619 &dev_attr_ad_partner_mac.attr, 1670 &dev_attr_ad_partner_mac.attr,
1620 &dev_attr_queue_id.attr, 1671 &dev_attr_queue_id.attr,
1621 &dev_attr_all_slaves_active.attr, 1672 &dev_attr_all_slaves_active.attr,
1673 &dev_attr_resend_igmp.attr,
1622 NULL, 1674 NULL,
1623}; 1675};
1624 1676
diff --git a/drivers/net/bonding/bonding.h b/drivers/net/bonding/bonding.h
index c6fdd851579a..4eedb12df6ca 100644
--- a/drivers/net/bonding/bonding.h
+++ b/drivers/net/bonding/bonding.h
@@ -19,6 +19,7 @@
19#include <linux/proc_fs.h> 19#include <linux/proc_fs.h>
20#include <linux/if_bonding.h> 20#include <linux/if_bonding.h>
21#include <linux/kobject.h> 21#include <linux/kobject.h>
22#include <linux/cpumask.h>
22#include <linux/in6.h> 23#include <linux/in6.h>
23#include "bond_3ad.h" 24#include "bond_3ad.h"
24#include "bond_alb.h" 25#include "bond_alb.h"
@@ -117,6 +118,35 @@
117 bond_for_each_slave_from(bond, pos, cnt, (bond)->first_slave) 118 bond_for_each_slave_from(bond, pos, cnt, (bond)->first_slave)
118 119
119 120
121#ifdef CONFIG_NET_POLL_CONTROLLER
122extern cpumask_var_t netpoll_block_tx;
123
124static inline void block_netpoll_tx(void)
125{
126 preempt_disable();
127 BUG_ON(cpumask_test_and_set_cpu(smp_processor_id(),
128 netpoll_block_tx));
129}
130
131static inline void unblock_netpoll_tx(void)
132{
133 BUG_ON(!cpumask_test_and_clear_cpu(smp_processor_id(),
134 netpoll_block_tx));
135 preempt_enable();
136}
137
138static inline int is_netpoll_tx_blocked(struct net_device *dev)
139{
140 if (unlikely(dev->priv_flags & IFF_IN_NETPOLL))
141 return cpumask_test_cpu(smp_processor_id(), netpoll_block_tx);
142 return 0;
143}
144#else
145#define block_netpoll_tx()
146#define unblock_netpoll_tx()
147#define is_netpoll_tx_blocked(dev) (0)
148#endif
149
120struct bond_params { 150struct bond_params {
121 int mode; 151 int mode;
122 int xmit_policy; 152 int xmit_policy;
@@ -136,6 +166,7 @@ struct bond_params {
136 __be32 arp_targets[BOND_MAX_ARP_TARGETS]; 166 __be32 arp_targets[BOND_MAX_ARP_TARGETS];
137 int tx_queues; 167 int tx_queues;
138 int all_slaves_active; 168 int all_slaves_active;
169 int resend_igmp;
139}; 170};
140 171
141struct bond_parm_tbl { 172struct bond_parm_tbl {
@@ -202,6 +233,7 @@ struct bonding {
202 s8 send_grat_arp; 233 s8 send_grat_arp;
203 s8 send_unsol_na; 234 s8 send_unsol_na;
204 s8 setup_by_slave; 235 s8 setup_by_slave;
236 s8 igmp_retrans;
205#ifdef CONFIG_PROC_FS 237#ifdef CONFIG_PROC_FS
206 struct proc_dir_entry *proc_entry; 238 struct proc_dir_entry *proc_entry;
207 char proc_file_name[IFNAMSIZ]; 239 char proc_file_name[IFNAMSIZ];
@@ -223,6 +255,7 @@ struct bonding {
223 struct delayed_work arp_work; 255 struct delayed_work arp_work;
224 struct delayed_work alb_work; 256 struct delayed_work alb_work;
225 struct delayed_work ad_work; 257 struct delayed_work ad_work;
258 struct delayed_work mcast_work;
226#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) 259#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
227 struct in6_addr master_ipv6; 260 struct in6_addr master_ipv6;
228#endif 261#endif
@@ -331,7 +364,6 @@ static inline void bond_unset_master_alb_flags(struct bonding *bond)
331struct vlan_entry *bond_next_vlan(struct bonding *bond, struct vlan_entry *curr); 364struct vlan_entry *bond_next_vlan(struct bonding *bond, struct vlan_entry *curr);
332int bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb, struct net_device *slave_dev); 365int bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb, struct net_device *slave_dev);
333int bond_create(struct net *net, const char *name); 366int bond_create(struct net *net, const char *name);
334int bond_release_and_destroy(struct net_device *bond_dev, struct net_device *slave_dev);
335int bond_create_sysfs(void); 367int bond_create_sysfs(void);
336void bond_destroy_sysfs(void); 368void bond_destroy_sysfs(void);
337void bond_prepare_sysfs_group(struct bonding *bond); 369void bond_prepare_sysfs_group(struct bonding *bond);
diff --git a/drivers/net/bsd_comp.c b/drivers/net/bsd_comp.c
index 88edb986691a..6e99d80ec409 100644
--- a/drivers/net/bsd_comp.c
+++ b/drivers/net/bsd_comp.c
@@ -429,7 +429,7 @@ static void *bsd_alloc (unsigned char *options, int opt_len, int decomp)
429 if (!db->lens) 429 if (!db->lens)
430 { 430 {
431 bsd_free (db); 431 bsd_free (db);
432 return (NULL); 432 return NULL;
433 } 433 }
434 } 434 }
435/* 435/*
diff --git a/drivers/net/can/mcp251x.c b/drivers/net/can/mcp251x.c
index b11a0cb5ed81..6aadc3e32bd5 100644
--- a/drivers/net/can/mcp251x.c
+++ b/drivers/net/can/mcp251x.c
@@ -38,14 +38,14 @@
38 * static struct mcp251x_platform_data mcp251x_info = { 38 * static struct mcp251x_platform_data mcp251x_info = {
39 * .oscillator_frequency = 8000000, 39 * .oscillator_frequency = 8000000,
40 * .board_specific_setup = &mcp251x_setup, 40 * .board_specific_setup = &mcp251x_setup,
41 * .model = CAN_MCP251X_MCP2510,
42 * .power_enable = mcp251x_power_enable, 41 * .power_enable = mcp251x_power_enable,
43 * .transceiver_enable = NULL, 42 * .transceiver_enable = NULL,
44 * }; 43 * };
45 * 44 *
46 * static struct spi_board_info spi_board_info[] = { 45 * static struct spi_board_info spi_board_info[] = {
47 * { 46 * {
48 * .modalias = "mcp251x", 47 * .modalias = "mcp2510",
48 * // or "mcp2515" depending on your controller
49 * .platform_data = &mcp251x_info, 49 * .platform_data = &mcp251x_info,
50 * .irq = IRQ_EINT13, 50 * .irq = IRQ_EINT13,
51 * .max_speed_hz = 2*1000*1000, 51 * .max_speed_hz = 2*1000*1000,
@@ -125,6 +125,9 @@
125# define CANINTF_TX0IF 0x04 125# define CANINTF_TX0IF 0x04
126# define CANINTF_RX1IF 0x02 126# define CANINTF_RX1IF 0x02
127# define CANINTF_RX0IF 0x01 127# define CANINTF_RX0IF 0x01
128# define CANINTF_RX (CANINTF_RX0IF | CANINTF_RX1IF)
129# define CANINTF_TX (CANINTF_TX2IF | CANINTF_TX1IF | CANINTF_TX0IF)
130# define CANINTF_ERR (CANINTF_ERRIF)
128#define EFLG 0x2d 131#define EFLG 0x2d
129# define EFLG_EWARN 0x01 132# define EFLG_EWARN 0x01
130# define EFLG_RXWAR 0x02 133# define EFLG_RXWAR 0x02
@@ -222,10 +225,16 @@ static struct can_bittiming_const mcp251x_bittiming_const = {
222 .brp_inc = 1, 225 .brp_inc = 1,
223}; 226};
224 227
228enum mcp251x_model {
229 CAN_MCP251X_MCP2510 = 0x2510,
230 CAN_MCP251X_MCP2515 = 0x2515,
231};
232
225struct mcp251x_priv { 233struct mcp251x_priv {
226 struct can_priv can; 234 struct can_priv can;
227 struct net_device *net; 235 struct net_device *net;
228 struct spi_device *spi; 236 struct spi_device *spi;
237 enum mcp251x_model model;
229 238
230 struct mutex mcp_lock; /* SPI device lock */ 239 struct mutex mcp_lock; /* SPI device lock */
231 240
@@ -250,6 +259,16 @@ struct mcp251x_priv {
250 int restart_tx; 259 int restart_tx;
251}; 260};
252 261
262#define MCP251X_IS(_model) \
263static inline int mcp251x_is_##_model(struct spi_device *spi) \
264{ \
265 struct mcp251x_priv *priv = dev_get_drvdata(&spi->dev); \
266 return priv->model == CAN_MCP251X_MCP##_model; \
267}
268
269MCP251X_IS(2510);
270MCP251X_IS(2515);
271
253static void mcp251x_clean(struct net_device *net) 272static void mcp251x_clean(struct net_device *net)
254{ 273{
255 struct mcp251x_priv *priv = netdev_priv(net); 274 struct mcp251x_priv *priv = netdev_priv(net);
@@ -319,6 +338,20 @@ static u8 mcp251x_read_reg(struct spi_device *spi, uint8_t reg)
319 return val; 338 return val;
320} 339}
321 340
341static void mcp251x_read_2regs(struct spi_device *spi, uint8_t reg,
342 uint8_t *v1, uint8_t *v2)
343{
344 struct mcp251x_priv *priv = dev_get_drvdata(&spi->dev);
345
346 priv->spi_tx_buf[0] = INSTRUCTION_READ;
347 priv->spi_tx_buf[1] = reg;
348
349 mcp251x_spi_trans(spi, 4);
350
351 *v1 = priv->spi_rx_buf[2];
352 *v2 = priv->spi_rx_buf[3];
353}
354
322static void mcp251x_write_reg(struct spi_device *spi, u8 reg, uint8_t val) 355static void mcp251x_write_reg(struct spi_device *spi, u8 reg, uint8_t val)
323{ 356{
324 struct mcp251x_priv *priv = dev_get_drvdata(&spi->dev); 357 struct mcp251x_priv *priv = dev_get_drvdata(&spi->dev);
@@ -346,10 +379,9 @@ static void mcp251x_write_bits(struct spi_device *spi, u8 reg,
346static void mcp251x_hw_tx_frame(struct spi_device *spi, u8 *buf, 379static void mcp251x_hw_tx_frame(struct spi_device *spi, u8 *buf,
347 int len, int tx_buf_idx) 380 int len, int tx_buf_idx)
348{ 381{
349 struct mcp251x_platform_data *pdata = spi->dev.platform_data;
350 struct mcp251x_priv *priv = dev_get_drvdata(&spi->dev); 382 struct mcp251x_priv *priv = dev_get_drvdata(&spi->dev);
351 383
352 if (pdata->model == CAN_MCP251X_MCP2510) { 384 if (mcp251x_is_2510(spi)) {
353 int i; 385 int i;
354 386
355 for (i = 1; i < TXBDAT_OFF + len; i++) 387 for (i = 1; i < TXBDAT_OFF + len; i++)
@@ -392,9 +424,8 @@ static void mcp251x_hw_rx_frame(struct spi_device *spi, u8 *buf,
392 int buf_idx) 424 int buf_idx)
393{ 425{
394 struct mcp251x_priv *priv = dev_get_drvdata(&spi->dev); 426 struct mcp251x_priv *priv = dev_get_drvdata(&spi->dev);
395 struct mcp251x_platform_data *pdata = spi->dev.platform_data;
396 427
397 if (pdata->model == CAN_MCP251X_MCP2510) { 428 if (mcp251x_is_2510(spi)) {
398 int i, len; 429 int i, len;
399 430
400 for (i = 1; i < RXBDAT_OFF; i++) 431 for (i = 1; i < RXBDAT_OFF; i++)
@@ -451,7 +482,7 @@ static void mcp251x_hw_rx(struct spi_device *spi, int buf_idx)
451 482
452 priv->net->stats.rx_packets++; 483 priv->net->stats.rx_packets++;
453 priv->net->stats.rx_bytes += frame->can_dlc; 484 priv->net->stats.rx_bytes += frame->can_dlc;
454 netif_rx(skb); 485 netif_rx_ni(skb);
455} 486}
456 487
457static void mcp251x_hw_sleep(struct spi_device *spi) 488static void mcp251x_hw_sleep(struct spi_device *spi)
@@ -674,9 +705,9 @@ static void mcp251x_error_skb(struct net_device *net, int can_id, int data1)
674 705
675 skb = alloc_can_err_skb(net, &frame); 706 skb = alloc_can_err_skb(net, &frame);
676 if (skb) { 707 if (skb) {
677 frame->can_id = can_id; 708 frame->can_id |= can_id;
678 frame->data[1] = data1; 709 frame->data[1] = data1;
679 netif_rx(skb); 710 netif_rx_ni(skb);
680 } else { 711 } else {
681 dev_err(&net->dev, 712 dev_err(&net->dev,
682 "cannot allocate error skb\n"); 713 "cannot allocate error skb\n");
@@ -754,24 +785,42 @@ static irqreturn_t mcp251x_can_ist(int irq, void *dev_id)
754 mutex_lock(&priv->mcp_lock); 785 mutex_lock(&priv->mcp_lock);
755 while (!priv->force_quit) { 786 while (!priv->force_quit) {
756 enum can_state new_state; 787 enum can_state new_state;
757 u8 intf = mcp251x_read_reg(spi, CANINTF); 788 u8 intf, eflag;
758 u8 eflag; 789 u8 clear_intf = 0;
759 int can_id = 0, data1 = 0; 790 int can_id = 0, data1 = 0;
760 791
792 mcp251x_read_2regs(spi, CANINTF, &intf, &eflag);
793
794 /* mask out flags we don't care about */
795 intf &= CANINTF_RX | CANINTF_TX | CANINTF_ERR;
796
797 /* receive buffer 0 */
761 if (intf & CANINTF_RX0IF) { 798 if (intf & CANINTF_RX0IF) {
762 mcp251x_hw_rx(spi, 0); 799 mcp251x_hw_rx(spi, 0);
763 /* Free one buffer ASAP */ 800 /*
764 mcp251x_write_bits(spi, CANINTF, intf & CANINTF_RX0IF, 801 * Free one buffer ASAP
765 0x00); 802 * (The MCP2515 does this automatically.)
803 */
804 if (mcp251x_is_2510(spi))
805 mcp251x_write_bits(spi, CANINTF, CANINTF_RX0IF, 0x00);
766 } 806 }
767 807
768 if (intf & CANINTF_RX1IF) 808 /* receive buffer 1 */
809 if (intf & CANINTF_RX1IF) {
769 mcp251x_hw_rx(spi, 1); 810 mcp251x_hw_rx(spi, 1);
811 /* the MCP2515 does this automatically */
812 if (mcp251x_is_2510(spi))
813 clear_intf |= CANINTF_RX1IF;
814 }
770 815
771 mcp251x_write_bits(spi, CANINTF, intf, 0x00); 816 /* any error or tx interrupt we need to clear? */
817 if (intf & (CANINTF_ERR | CANINTF_TX))
818 clear_intf |= intf & (CANINTF_ERR | CANINTF_TX);
819 if (clear_intf)
820 mcp251x_write_bits(spi, CANINTF, clear_intf, 0x00);
772 821
773 eflag = mcp251x_read_reg(spi, EFLG); 822 if (eflag)
774 mcp251x_write_reg(spi, EFLG, 0x00); 823 mcp251x_write_bits(spi, EFLG, eflag, 0x00);
775 824
776 /* Update can state */ 825 /* Update can state */
777 if (eflag & EFLG_TXBO) { 826 if (eflag & EFLG_TXBO) {
@@ -816,10 +865,14 @@ static irqreturn_t mcp251x_can_ist(int irq, void *dev_id)
816 if (intf & CANINTF_ERRIF) { 865 if (intf & CANINTF_ERRIF) {
817 /* Handle overflow counters */ 866 /* Handle overflow counters */
818 if (eflag & (EFLG_RX0OVR | EFLG_RX1OVR)) { 867 if (eflag & (EFLG_RX0OVR | EFLG_RX1OVR)) {
819 if (eflag & EFLG_RX0OVR) 868 if (eflag & EFLG_RX0OVR) {
820 net->stats.rx_over_errors++; 869 net->stats.rx_over_errors++;
821 if (eflag & EFLG_RX1OVR) 870 net->stats.rx_errors++;
871 }
872 if (eflag & EFLG_RX1OVR) {
822 net->stats.rx_over_errors++; 873 net->stats.rx_over_errors++;
874 net->stats.rx_errors++;
875 }
823 can_id |= CAN_ERR_CRTL; 876 can_id |= CAN_ERR_CRTL;
824 data1 |= CAN_ERR_CRTL_RX_OVERFLOW; 877 data1 |= CAN_ERR_CRTL_RX_OVERFLOW;
825 } 878 }
@@ -838,7 +891,7 @@ static irqreturn_t mcp251x_can_ist(int irq, void *dev_id)
838 if (intf == 0) 891 if (intf == 0)
839 break; 892 break;
840 893
841 if (intf & (CANINTF_TX2IF | CANINTF_TX1IF | CANINTF_TX0IF)) { 894 if (intf & CANINTF_TX) {
842 net->stats.tx_packets++; 895 net->stats.tx_packets++;
843 net->stats.tx_bytes += priv->tx_len - 1; 896 net->stats.tx_bytes += priv->tx_len - 1;
844 if (priv->tx_len) { 897 if (priv->tx_len) {
@@ -921,16 +974,12 @@ static int __devinit mcp251x_can_probe(struct spi_device *spi)
921 struct net_device *net; 974 struct net_device *net;
922 struct mcp251x_priv *priv; 975 struct mcp251x_priv *priv;
923 struct mcp251x_platform_data *pdata = spi->dev.platform_data; 976 struct mcp251x_platform_data *pdata = spi->dev.platform_data;
924 int model = spi_get_device_id(spi)->driver_data;
925 int ret = -ENODEV; 977 int ret = -ENODEV;
926 978
927 if (!pdata) 979 if (!pdata)
928 /* Platform data is required for osc freq */ 980 /* Platform data is required for osc freq */
929 goto error_out; 981 goto error_out;
930 982
931 if (model)
932 pdata->model = model;
933
934 /* Allocate can/net device */ 983 /* Allocate can/net device */
935 net = alloc_candev(sizeof(struct mcp251x_priv), TX_ECHO_SKB_MAX); 984 net = alloc_candev(sizeof(struct mcp251x_priv), TX_ECHO_SKB_MAX);
936 if (!net) { 985 if (!net) {
@@ -947,6 +996,7 @@ static int __devinit mcp251x_can_probe(struct spi_device *spi)
947 priv->can.clock.freq = pdata->oscillator_frequency / 2; 996 priv->can.clock.freq = pdata->oscillator_frequency / 2;
948 priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES | 997 priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES |
949 CAN_CTRLMODE_LOOPBACK | CAN_CTRLMODE_LISTENONLY; 998 CAN_CTRLMODE_LOOPBACK | CAN_CTRLMODE_LISTENONLY;
999 priv->model = spi_get_device_id(spi)->driver_data;
950 priv->net = net; 1000 priv->net = net;
951 dev_set_drvdata(&spi->dev, priv); 1001 dev_set_drvdata(&spi->dev, priv);
952 1002
@@ -1120,8 +1170,7 @@ static int mcp251x_can_resume(struct spi_device *spi)
1120#define mcp251x_can_resume NULL 1170#define mcp251x_can_resume NULL
1121#endif 1171#endif
1122 1172
1123static struct spi_device_id mcp251x_id_table[] = { 1173static const struct spi_device_id mcp251x_id_table[] = {
1124 { "mcp251x", 0 /* Use pdata.model */ },
1125 { "mcp2510", CAN_MCP251X_MCP2510 }, 1174 { "mcp2510", CAN_MCP251X_MCP2510 },
1126 { "mcp2515", CAN_MCP251X_MCP2515 }, 1175 { "mcp2515", CAN_MCP251X_MCP2515 },
1127 { }, 1176 { },
diff --git a/drivers/net/cassini.c b/drivers/net/cassini.c
index 32aaadc4734f..d6b6d6aa565a 100644
--- a/drivers/net/cassini.c
+++ b/drivers/net/cassini.c
@@ -419,7 +419,7 @@ static u16 cas_phy_read(struct cas *cp, int reg)
419 udelay(10); 419 udelay(10);
420 cmd = readl(cp->regs + REG_MIF_FRAME); 420 cmd = readl(cp->regs + REG_MIF_FRAME);
421 if (cmd & MIF_FRAME_TURN_AROUND_LSB) 421 if (cmd & MIF_FRAME_TURN_AROUND_LSB)
422 return (cmd & MIF_FRAME_DATA_MASK); 422 return cmd & MIF_FRAME_DATA_MASK;
423 } 423 }
424 return 0xFFFF; /* -1 */ 424 return 0xFFFF; /* -1 */
425} 425}
@@ -804,7 +804,7 @@ static int cas_reset_mii_phy(struct cas *cp)
804 break; 804 break;
805 udelay(10); 805 udelay(10);
806 } 806 }
807 return (limit <= 0); 807 return limit <= 0;
808} 808}
809 809
810static int cas_saturn_firmware_init(struct cas *cp) 810static int cas_saturn_firmware_init(struct cas *cp)
diff --git a/drivers/net/chelsio/sge.c b/drivers/net/chelsio/sge.c
index 1950b9a20ecd..70221ca32683 100644
--- a/drivers/net/chelsio/sge.c
+++ b/drivers/net/chelsio/sge.c
@@ -1551,7 +1551,7 @@ static inline int responses_pending(const struct adapter *adapter)
1551 const struct respQ *Q = &adapter->sge->respQ; 1551 const struct respQ *Q = &adapter->sge->respQ;
1552 const struct respQ_e *e = &Q->entries[Q->cidx]; 1552 const struct respQ_e *e = &Q->entries[Q->cidx];
1553 1553
1554 return (e->GenerationBit == Q->genbit); 1554 return e->GenerationBit == Q->genbit;
1555} 1555}
1556 1556
1557/* 1557/*
@@ -1870,7 +1870,7 @@ netdev_tx_t t1_start_xmit(struct sk_buff *skb, struct net_device *dev)
1870 cpl->iff = dev->if_port; 1870 cpl->iff = dev->if_port;
1871 1871
1872#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) 1872#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
1873 if (adapter->vlan_grp && vlan_tx_tag_present(skb)) { 1873 if (vlan_tx_tag_present(skb)) {
1874 cpl->vlan_valid = 1; 1874 cpl->vlan_valid = 1;
1875 cpl->vlan = htons(vlan_tx_tag_get(skb)); 1875 cpl->vlan = htons(vlan_tx_tag_get(skb));
1876 st->vlan_insert++; 1876 st->vlan_insert++;
diff --git a/drivers/net/chelsio/vsc7326.c b/drivers/net/chelsio/vsc7326.c
index c844111cffeb..106a590f0d9a 100644
--- a/drivers/net/chelsio/vsc7326.c
+++ b/drivers/net/chelsio/vsc7326.c
@@ -255,7 +255,7 @@ static int bist_rd(adapter_t *adapter, int moduleid, int address)
255 else if ((result & (1 << 8)) != 0x0) 255 else if ((result & (1 << 8)) != 0x0)
256 pr_err("bist read error: 0x%x\n", result); 256 pr_err("bist read error: 0x%x\n", result);
257 257
258 return (result & 0xff); 258 return result & 0xff;
259} 259}
260 260
261static int bist_wr(adapter_t *adapter, int moduleid, int address, int value) 261static int bist_wr(adapter_t *adapter, int moduleid, int address, int value)
diff --git a/drivers/net/cnic.c b/drivers/net/cnic.c
index 2ab6a7c4ffc1..92bac19ad60a 100644
--- a/drivers/net/cnic.c
+++ b/drivers/net/cnic.c
@@ -60,6 +60,7 @@ MODULE_LICENSE("GPL");
60MODULE_VERSION(CNIC_MODULE_VERSION); 60MODULE_VERSION(CNIC_MODULE_VERSION);
61 61
62static LIST_HEAD(cnic_dev_list); 62static LIST_HEAD(cnic_dev_list);
63static LIST_HEAD(cnic_udev_list);
63static DEFINE_RWLOCK(cnic_dev_lock); 64static DEFINE_RWLOCK(cnic_dev_lock);
64static DEFINE_MUTEX(cnic_lock); 65static DEFINE_MUTEX(cnic_lock);
65 66
@@ -81,29 +82,34 @@ static struct cnic_ops cnic_bnx2x_ops = {
81 .cnic_ctl = cnic_ctl, 82 .cnic_ctl = cnic_ctl,
82}; 83};
83 84
85static struct workqueue_struct *cnic_wq;
86
84static void cnic_shutdown_rings(struct cnic_dev *); 87static void cnic_shutdown_rings(struct cnic_dev *);
85static void cnic_init_rings(struct cnic_dev *); 88static void cnic_init_rings(struct cnic_dev *);
86static int cnic_cm_set_pg(struct cnic_sock *); 89static int cnic_cm_set_pg(struct cnic_sock *);
87 90
88static int cnic_uio_open(struct uio_info *uinfo, struct inode *inode) 91static int cnic_uio_open(struct uio_info *uinfo, struct inode *inode)
89{ 92{
90 struct cnic_dev *dev = uinfo->priv; 93 struct cnic_uio_dev *udev = uinfo->priv;
91 struct cnic_local *cp = dev->cnic_priv; 94 struct cnic_dev *dev;
92 95
93 if (!capable(CAP_NET_ADMIN)) 96 if (!capable(CAP_NET_ADMIN))
94 return -EPERM; 97 return -EPERM;
95 98
96 if (cp->uio_dev != -1) 99 if (udev->uio_dev != -1)
97 return -EBUSY; 100 return -EBUSY;
98 101
99 rtnl_lock(); 102 rtnl_lock();
100 if (!test_bit(CNIC_F_CNIC_UP, &dev->flags)) { 103 dev = udev->dev;
104
105 if (!dev || !test_bit(CNIC_F_CNIC_UP, &dev->flags)) {
101 rtnl_unlock(); 106 rtnl_unlock();
102 return -ENODEV; 107 return -ENODEV;
103 } 108 }
104 109
105 cp->uio_dev = iminor(inode); 110 udev->uio_dev = iminor(inode);
106 111
112 cnic_shutdown_rings(dev);
107 cnic_init_rings(dev); 113 cnic_init_rings(dev);
108 rtnl_unlock(); 114 rtnl_unlock();
109 115
@@ -112,12 +118,9 @@ static int cnic_uio_open(struct uio_info *uinfo, struct inode *inode)
112 118
113static int cnic_uio_close(struct uio_info *uinfo, struct inode *inode) 119static int cnic_uio_close(struct uio_info *uinfo, struct inode *inode)
114{ 120{
115 struct cnic_dev *dev = uinfo->priv; 121 struct cnic_uio_dev *udev = uinfo->priv;
116 struct cnic_local *cp = dev->cnic_priv;
117
118 cnic_shutdown_rings(dev);
119 122
120 cp->uio_dev = -1; 123 udev->uio_dev = -1;
121 return 0; 124 return 0;
122} 125}
123 126
@@ -242,14 +245,14 @@ static int cnic_in_use(struct cnic_sock *csk)
242 return test_bit(SK_F_INUSE, &csk->flags); 245 return test_bit(SK_F_INUSE, &csk->flags);
243} 246}
244 247
245static void cnic_kwq_completion(struct cnic_dev *dev, u32 count) 248static void cnic_spq_completion(struct cnic_dev *dev, int cmd, u32 count)
246{ 249{
247 struct cnic_local *cp = dev->cnic_priv; 250 struct cnic_local *cp = dev->cnic_priv;
248 struct cnic_eth_dev *ethdev = cp->ethdev; 251 struct cnic_eth_dev *ethdev = cp->ethdev;
249 struct drv_ctl_info info; 252 struct drv_ctl_info info;
250 253
251 info.cmd = DRV_CTL_COMPLETION_CMD; 254 info.cmd = cmd;
252 info.data.comp.comp_count = count; 255 info.data.credit.credit_count = count;
253 ethdev->drv_ctl(dev->netdev, &info); 256 ethdev->drv_ctl(dev->netdev, &info);
254} 257}
255 258
@@ -274,8 +277,9 @@ static int cnic_send_nlmsg(struct cnic_local *cp, u32 type,
274 u16 len = 0; 277 u16 len = 0;
275 u32 msg_type = ISCSI_KEVENT_IF_DOWN; 278 u32 msg_type = ISCSI_KEVENT_IF_DOWN;
276 struct cnic_ulp_ops *ulp_ops; 279 struct cnic_ulp_ops *ulp_ops;
280 struct cnic_uio_dev *udev = cp->udev;
277 281
278 if (cp->uio_dev == -1) 282 if (!udev || udev->uio_dev == -1)
279 return -ENODEV; 283 return -ENODEV;
280 284
281 if (csk) { 285 if (csk) {
@@ -406,8 +410,7 @@ static void cnic_uio_stop(void)
406 list_for_each_entry(dev, &cnic_dev_list, list) { 410 list_for_each_entry(dev, &cnic_dev_list, list) {
407 struct cnic_local *cp = dev->cnic_priv; 411 struct cnic_local *cp = dev->cnic_priv;
408 412
409 if (cp->cnic_uinfo) 413 cnic_send_nlmsg(cp, ISCSI_KEVENT_IF_DOWN, NULL);
410 cnic_send_nlmsg(cp, ISCSI_KEVENT_IF_DOWN, NULL);
411 } 414 }
412 read_unlock(&cnic_dev_lock); 415 read_unlock(&cnic_dev_lock);
413} 416}
@@ -768,31 +771,45 @@ static void cnic_free_context(struct cnic_dev *dev)
768 } 771 }
769} 772}
770 773
771static void cnic_free_resc(struct cnic_dev *dev) 774static void __cnic_free_uio(struct cnic_uio_dev *udev)
772{ 775{
773 struct cnic_local *cp = dev->cnic_priv; 776 uio_unregister_device(&udev->cnic_uinfo);
774 int i = 0;
775 777
776 if (cp->cnic_uinfo) { 778 if (udev->l2_buf) {
777 while (cp->uio_dev != -1 && i < 15) { 779 dma_free_coherent(&udev->pdev->dev, udev->l2_buf_size,
778 msleep(100); 780 udev->l2_buf, udev->l2_buf_map);
779 i++; 781 udev->l2_buf = NULL;
780 }
781 uio_unregister_device(cp->cnic_uinfo);
782 kfree(cp->cnic_uinfo);
783 cp->cnic_uinfo = NULL;
784 } 782 }
785 783
786 if (cp->l2_buf) { 784 if (udev->l2_ring) {
787 dma_free_coherent(&dev->pcidev->dev, cp->l2_buf_size, 785 dma_free_coherent(&udev->pdev->dev, udev->l2_ring_size,
788 cp->l2_buf, cp->l2_buf_map); 786 udev->l2_ring, udev->l2_ring_map);
789 cp->l2_buf = NULL; 787 udev->l2_ring = NULL;
790 } 788 }
791 789
792 if (cp->l2_ring) { 790 pci_dev_put(udev->pdev);
793 dma_free_coherent(&dev->pcidev->dev, cp->l2_ring_size, 791 kfree(udev);
794 cp->l2_ring, cp->l2_ring_map); 792}
795 cp->l2_ring = NULL; 793
794static void cnic_free_uio(struct cnic_uio_dev *udev)
795{
796 if (!udev)
797 return;
798
799 write_lock(&cnic_dev_lock);
800 list_del_init(&udev->list);
801 write_unlock(&cnic_dev_lock);
802 __cnic_free_uio(udev);
803}
804
805static void cnic_free_resc(struct cnic_dev *dev)
806{
807 struct cnic_local *cp = dev->cnic_priv;
808 struct cnic_uio_dev *udev = cp->udev;
809
810 if (udev) {
811 udev->dev = NULL;
812 cp->udev = NULL;
796 } 813 }
797 814
798 cnic_free_context(dev); 815 cnic_free_context(dev);
@@ -894,37 +911,68 @@ static int cnic_alloc_kcq(struct cnic_dev *dev, struct kcq_info *info)
894 return 0; 911 return 0;
895} 912}
896 913
897static int cnic_alloc_l2_rings(struct cnic_dev *dev, int pages) 914static int cnic_alloc_uio_rings(struct cnic_dev *dev, int pages)
898{ 915{
899 struct cnic_local *cp = dev->cnic_priv; 916 struct cnic_local *cp = dev->cnic_priv;
917 struct cnic_uio_dev *udev;
918
919 read_lock(&cnic_dev_lock);
920 list_for_each_entry(udev, &cnic_udev_list, list) {
921 if (udev->pdev == dev->pcidev) {
922 udev->dev = dev;
923 cp->udev = udev;
924 read_unlock(&cnic_dev_lock);
925 return 0;
926 }
927 }
928 read_unlock(&cnic_dev_lock);
929
930 udev = kzalloc(sizeof(struct cnic_uio_dev), GFP_ATOMIC);
931 if (!udev)
932 return -ENOMEM;
900 933
901 cp->l2_ring_size = pages * BCM_PAGE_SIZE; 934 udev->uio_dev = -1;
902 cp->l2_ring = dma_alloc_coherent(&dev->pcidev->dev, cp->l2_ring_size, 935
903 &cp->l2_ring_map, 936 udev->dev = dev;
904 GFP_KERNEL | __GFP_COMP); 937 udev->pdev = dev->pcidev;
905 if (!cp->l2_ring) 938 udev->l2_ring_size = pages * BCM_PAGE_SIZE;
939 udev->l2_ring = dma_alloc_coherent(&udev->pdev->dev, udev->l2_ring_size,
940 &udev->l2_ring_map,
941 GFP_KERNEL | __GFP_COMP);
942 if (!udev->l2_ring)
906 return -ENOMEM; 943 return -ENOMEM;
907 944
908 cp->l2_buf_size = (cp->l2_rx_ring_size + 1) * cp->l2_single_buf_size; 945 udev->l2_buf_size = (cp->l2_rx_ring_size + 1) * cp->l2_single_buf_size;
909 cp->l2_buf_size = PAGE_ALIGN(cp->l2_buf_size); 946 udev->l2_buf_size = PAGE_ALIGN(udev->l2_buf_size);
910 cp->l2_buf = dma_alloc_coherent(&dev->pcidev->dev, cp->l2_buf_size, 947 udev->l2_buf = dma_alloc_coherent(&udev->pdev->dev, udev->l2_buf_size,
911 &cp->l2_buf_map, 948 &udev->l2_buf_map,
912 GFP_KERNEL | __GFP_COMP); 949 GFP_KERNEL | __GFP_COMP);
913 if (!cp->l2_buf) 950 if (!udev->l2_buf)
914 return -ENOMEM; 951 return -ENOMEM;
915 952
953 write_lock(&cnic_dev_lock);
954 list_add(&udev->list, &cnic_udev_list);
955 write_unlock(&cnic_dev_lock);
956
957 pci_dev_get(udev->pdev);
958
959 cp->udev = udev;
960
916 return 0; 961 return 0;
917} 962}
918 963
919static int cnic_alloc_uio(struct cnic_dev *dev) { 964static int cnic_init_uio(struct cnic_dev *dev)
965{
920 struct cnic_local *cp = dev->cnic_priv; 966 struct cnic_local *cp = dev->cnic_priv;
967 struct cnic_uio_dev *udev = cp->udev;
921 struct uio_info *uinfo; 968 struct uio_info *uinfo;
922 int ret; 969 int ret = 0;
923 970
924 uinfo = kzalloc(sizeof(*uinfo), GFP_ATOMIC); 971 if (!udev)
925 if (!uinfo)
926 return -ENOMEM; 972 return -ENOMEM;
927 973
974 uinfo = &udev->cnic_uinfo;
975
928 uinfo->mem[0].addr = dev->netdev->base_addr; 976 uinfo->mem[0].addr = dev->netdev->base_addr;
929 uinfo->mem[0].internal_addr = dev->regview; 977 uinfo->mem[0].internal_addr = dev->regview;
930 uinfo->mem[0].size = dev->netdev->mem_end - dev->netdev->mem_start; 978 uinfo->mem[0].size = dev->netdev->mem_end - dev->netdev->mem_start;
@@ -932,7 +980,7 @@ static int cnic_alloc_uio(struct cnic_dev *dev) {
932 980
933 if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags)) { 981 if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags)) {
934 uinfo->mem[1].addr = (unsigned long) cp->status_blk.gen & 982 uinfo->mem[1].addr = (unsigned long) cp->status_blk.gen &
935 PAGE_MASK; 983 PAGE_MASK;
936 if (cp->ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) 984 if (cp->ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX)
937 uinfo->mem[1].size = BNX2_SBLK_MSIX_ALIGN_SIZE * 9; 985 uinfo->mem[1].size = BNX2_SBLK_MSIX_ALIGN_SIZE * 9;
938 else 986 else
@@ -942,19 +990,19 @@ static int cnic_alloc_uio(struct cnic_dev *dev) {
942 } else if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) { 990 } else if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) {
943 uinfo->mem[1].addr = (unsigned long) cp->bnx2x_def_status_blk & 991 uinfo->mem[1].addr = (unsigned long) cp->bnx2x_def_status_blk &
944 PAGE_MASK; 992 PAGE_MASK;
945 uinfo->mem[1].size = sizeof(struct host_def_status_block); 993 uinfo->mem[1].size = sizeof(*cp->bnx2x_def_status_blk);
946 994
947 uinfo->name = "bnx2x_cnic"; 995 uinfo->name = "bnx2x_cnic";
948 } 996 }
949 997
950 uinfo->mem[1].memtype = UIO_MEM_LOGICAL; 998 uinfo->mem[1].memtype = UIO_MEM_LOGICAL;
951 999
952 uinfo->mem[2].addr = (unsigned long) cp->l2_ring; 1000 uinfo->mem[2].addr = (unsigned long) udev->l2_ring;
953 uinfo->mem[2].size = cp->l2_ring_size; 1001 uinfo->mem[2].size = udev->l2_ring_size;
954 uinfo->mem[2].memtype = UIO_MEM_LOGICAL; 1002 uinfo->mem[2].memtype = UIO_MEM_LOGICAL;
955 1003
956 uinfo->mem[3].addr = (unsigned long) cp->l2_buf; 1004 uinfo->mem[3].addr = (unsigned long) udev->l2_buf;
957 uinfo->mem[3].size = cp->l2_buf_size; 1005 uinfo->mem[3].size = udev->l2_buf_size;
958 uinfo->mem[3].memtype = UIO_MEM_LOGICAL; 1006 uinfo->mem[3].memtype = UIO_MEM_LOGICAL;
959 1007
960 uinfo->version = CNIC_MODULE_VERSION; 1008 uinfo->version = CNIC_MODULE_VERSION;
@@ -963,16 +1011,17 @@ static int cnic_alloc_uio(struct cnic_dev *dev) {
963 uinfo->open = cnic_uio_open; 1011 uinfo->open = cnic_uio_open;
964 uinfo->release = cnic_uio_close; 1012 uinfo->release = cnic_uio_close;
965 1013
966 uinfo->priv = dev; 1014 if (udev->uio_dev == -1) {
1015 if (!uinfo->priv) {
1016 uinfo->priv = udev;
967 1017
968 ret = uio_register_device(&dev->pcidev->dev, uinfo); 1018 ret = uio_register_device(&udev->pdev->dev, uinfo);
969 if (ret) { 1019 }
970 kfree(uinfo); 1020 } else {
971 return ret; 1021 cnic_init_rings(dev);
972 } 1022 }
973 1023
974 cp->cnic_uinfo = uinfo; 1024 return ret;
975 return 0;
976} 1025}
977 1026
978static int cnic_alloc_bnx2_resc(struct cnic_dev *dev) 1027static int cnic_alloc_bnx2_resc(struct cnic_dev *dev)
@@ -993,11 +1042,11 @@ static int cnic_alloc_bnx2_resc(struct cnic_dev *dev)
993 if (ret) 1042 if (ret)
994 goto error; 1043 goto error;
995 1044
996 ret = cnic_alloc_l2_rings(dev, 2); 1045 ret = cnic_alloc_uio_rings(dev, 2);
997 if (ret) 1046 if (ret)
998 goto error; 1047 goto error;
999 1048
1000 ret = cnic_alloc_uio(dev); 1049 ret = cnic_init_uio(dev);
1001 if (ret) 1050 if (ret)
1002 goto error; 1051 goto error;
1003 1052
@@ -1028,7 +1077,7 @@ static int cnic_alloc_bnx2x_context(struct cnic_dev *dev)
1028 1077
1029 cp->ctx_blks = blks; 1078 cp->ctx_blks = blks;
1030 cp->ctx_blk_size = ctx_blk_size; 1079 cp->ctx_blk_size = ctx_blk_size;
1031 if (BNX2X_CHIP_IS_E1H(cp->chip_id)) 1080 if (!BNX2X_CHIP_IS_57710(cp->chip_id))
1032 cp->ctx_align = 0; 1081 cp->ctx_align = 0;
1033 else 1082 else
1034 cp->ctx_align = ctx_blk_size; 1083 cp->ctx_align = ctx_blk_size;
@@ -1063,6 +1112,8 @@ static int cnic_alloc_bnx2x_resc(struct cnic_dev *dev)
1063 int i, j, n, ret, pages; 1112 int i, j, n, ret, pages;
1064 struct cnic_dma *kwq_16_dma = &cp->kwq_16_data_info; 1113 struct cnic_dma *kwq_16_dma = &cp->kwq_16_data_info;
1065 1114
1115 cp->iro_arr = ethdev->iro_arr;
1116
1066 cp->max_cid_space = MAX_ISCSI_TBL_SZ; 1117 cp->max_cid_space = MAX_ISCSI_TBL_SZ;
1067 cp->iscsi_start_cid = start_cid; 1118 cp->iscsi_start_cid = start_cid;
1068 if (start_cid < BNX2X_ISCSI_START_CID) { 1119 if (start_cid < BNX2X_ISCSI_START_CID) {
@@ -1127,15 +1178,13 @@ static int cnic_alloc_bnx2x_resc(struct cnic_dev *dev)
1127 1178
1128 cp->bnx2x_def_status_blk = cp->ethdev->irq_arr[1].status_blk; 1179 cp->bnx2x_def_status_blk = cp->ethdev->irq_arr[1].status_blk;
1129 1180
1130 memset(cp->status_blk.bnx2x, 0, sizeof(*cp->status_blk.bnx2x));
1131
1132 cp->l2_rx_ring_size = 15; 1181 cp->l2_rx_ring_size = 15;
1133 1182
1134 ret = cnic_alloc_l2_rings(dev, 4); 1183 ret = cnic_alloc_uio_rings(dev, 4);
1135 if (ret) 1184 if (ret)
1136 goto error; 1185 goto error;
1137 1186
1138 ret = cnic_alloc_uio(dev); 1187 ret = cnic_init_uio(dev);
1139 if (ret) 1188 if (ret)
1140 goto error; 1189 goto error;
1141 1190
@@ -1209,9 +1258,9 @@ static int cnic_submit_kwqe_16(struct cnic_dev *dev, u32 cmd, u32 cid,
1209 1258
1210 kwqe.hdr.conn_and_cmd_data = 1259 kwqe.hdr.conn_and_cmd_data =
1211 cpu_to_le32(((cmd << SPE_HDR_CMD_ID_SHIFT) | 1260 cpu_to_le32(((cmd << SPE_HDR_CMD_ID_SHIFT) |
1212 BNX2X_HW_CID(cid, cp->func))); 1261 BNX2X_HW_CID(cp, cid)));
1213 kwqe.hdr.type = cpu_to_le16(type); 1262 kwqe.hdr.type = cpu_to_le16(type);
1214 kwqe.hdr.reserved = 0; 1263 kwqe.hdr.reserved1 = 0;
1215 kwqe.data.phy_address.lo = cpu_to_le32(l5_data->phy_address.lo); 1264 kwqe.data.phy_address.lo = cpu_to_le32(l5_data->phy_address.lo);
1216 kwqe.data.phy_address.hi = cpu_to_le32(l5_data->phy_address.hi); 1265 kwqe.data.phy_address.hi = cpu_to_le32(l5_data->phy_address.hi);
1217 1266
@@ -1246,8 +1295,8 @@ static int cnic_bnx2x_iscsi_init1(struct cnic_dev *dev, struct kwqe *kwqe)
1246{ 1295{
1247 struct cnic_local *cp = dev->cnic_priv; 1296 struct cnic_local *cp = dev->cnic_priv;
1248 struct iscsi_kwqe_init1 *req1 = (struct iscsi_kwqe_init1 *) kwqe; 1297 struct iscsi_kwqe_init1 *req1 = (struct iscsi_kwqe_init1 *) kwqe;
1249 int func = cp->func, pages; 1298 int hq_bds, pages;
1250 int hq_bds; 1299 u32 pfid = cp->pfid;
1251 1300
1252 cp->num_iscsi_tasks = req1->num_tasks_per_conn; 1301 cp->num_iscsi_tasks = req1->num_tasks_per_conn;
1253 cp->num_ccells = req1->num_ccells_per_conn; 1302 cp->num_ccells = req1->num_ccells_per_conn;
@@ -1264,60 +1313,60 @@ static int cnic_bnx2x_iscsi_init1(struct cnic_dev *dev, struct kwqe *kwqe)
1264 return 0; 1313 return 0;
1265 1314
1266 /* init Tstorm RAM */ 1315 /* init Tstorm RAM */
1267 CNIC_WR16(dev, BAR_TSTRORM_INTMEM + TSTORM_ISCSI_RQ_SIZE_OFFSET(func), 1316 CNIC_WR16(dev, BAR_TSTRORM_INTMEM + TSTORM_ISCSI_RQ_SIZE_OFFSET(pfid),
1268 req1->rq_num_wqes); 1317 req1->rq_num_wqes);
1269 CNIC_WR16(dev, BAR_TSTRORM_INTMEM + TSTORM_ISCSI_PAGE_SIZE_OFFSET(func), 1318 CNIC_WR16(dev, BAR_TSTRORM_INTMEM + TSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid),
1270 PAGE_SIZE); 1319 PAGE_SIZE);
1271 CNIC_WR8(dev, BAR_TSTRORM_INTMEM + 1320 CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
1272 TSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(func), PAGE_SHIFT); 1321 TSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), PAGE_SHIFT);
1273 CNIC_WR16(dev, BAR_TSTRORM_INTMEM + 1322 CNIC_WR16(dev, BAR_TSTRORM_INTMEM +
1274 TSTORM_ISCSI_NUM_OF_TASKS_OFFSET(func), 1323 TSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid),
1275 req1->num_tasks_per_conn); 1324 req1->num_tasks_per_conn);
1276 1325
1277 /* init Ustorm RAM */ 1326 /* init Ustorm RAM */
1278 CNIC_WR16(dev, BAR_USTRORM_INTMEM + 1327 CNIC_WR16(dev, BAR_USTRORM_INTMEM +
1279 USTORM_ISCSI_RQ_BUFFER_SIZE_OFFSET(func), 1328 USTORM_ISCSI_RQ_BUFFER_SIZE_OFFSET(pfid),
1280 req1->rq_buffer_size); 1329 req1->rq_buffer_size);
1281 CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_PAGE_SIZE_OFFSET(func), 1330 CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_PAGE_SIZE_OFFSET(pfid),
1282 PAGE_SIZE); 1331 PAGE_SIZE);
1283 CNIC_WR8(dev, BAR_USTRORM_INTMEM + 1332 CNIC_WR8(dev, BAR_USTRORM_INTMEM +
1284 USTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(func), PAGE_SHIFT); 1333 USTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), PAGE_SHIFT);
1285 CNIC_WR16(dev, BAR_USTRORM_INTMEM + 1334 CNIC_WR16(dev, BAR_USTRORM_INTMEM +
1286 USTORM_ISCSI_NUM_OF_TASKS_OFFSET(func), 1335 USTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid),
1287 req1->num_tasks_per_conn); 1336 req1->num_tasks_per_conn);
1288 CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_RQ_SIZE_OFFSET(func), 1337 CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_RQ_SIZE_OFFSET(pfid),
1289 req1->rq_num_wqes); 1338 req1->rq_num_wqes);
1290 CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_CQ_SIZE_OFFSET(func), 1339 CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_CQ_SIZE_OFFSET(pfid),
1291 req1->cq_num_wqes); 1340 req1->cq_num_wqes);
1292 CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_R2TQ_SIZE_OFFSET(func), 1341 CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_R2TQ_SIZE_OFFSET(pfid),
1293 cp->num_iscsi_tasks * BNX2X_ISCSI_MAX_PENDING_R2TS); 1342 cp->num_iscsi_tasks * BNX2X_ISCSI_MAX_PENDING_R2TS);
1294 1343
1295 /* init Xstorm RAM */ 1344 /* init Xstorm RAM */
1296 CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_PAGE_SIZE_OFFSET(func), 1345 CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid),
1297 PAGE_SIZE); 1346 PAGE_SIZE);
1298 CNIC_WR8(dev, BAR_XSTRORM_INTMEM + 1347 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
1299 XSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(func), PAGE_SHIFT); 1348 XSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), PAGE_SHIFT);
1300 CNIC_WR16(dev, BAR_XSTRORM_INTMEM + 1349 CNIC_WR16(dev, BAR_XSTRORM_INTMEM +
1301 XSTORM_ISCSI_NUM_OF_TASKS_OFFSET(func), 1350 XSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid),
1302 req1->num_tasks_per_conn); 1351 req1->num_tasks_per_conn);
1303 CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_HQ_SIZE_OFFSET(func), 1352 CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_HQ_SIZE_OFFSET(pfid),
1304 hq_bds); 1353 hq_bds);
1305 CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_SQ_SIZE_OFFSET(func), 1354 CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_SQ_SIZE_OFFSET(pfid),
1306 req1->num_tasks_per_conn); 1355 req1->num_tasks_per_conn);
1307 CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_R2TQ_SIZE_OFFSET(func), 1356 CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_R2TQ_SIZE_OFFSET(pfid),
1308 cp->num_iscsi_tasks * BNX2X_ISCSI_MAX_PENDING_R2TS); 1357 cp->num_iscsi_tasks * BNX2X_ISCSI_MAX_PENDING_R2TS);
1309 1358
1310 /* init Cstorm RAM */ 1359 /* init Cstorm RAM */
1311 CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_PAGE_SIZE_OFFSET(func), 1360 CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid),
1312 PAGE_SIZE); 1361 PAGE_SIZE);
1313 CNIC_WR8(dev, BAR_CSTRORM_INTMEM + 1362 CNIC_WR8(dev, BAR_CSTRORM_INTMEM +
1314 CSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(func), PAGE_SHIFT); 1363 CSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), PAGE_SHIFT);
1315 CNIC_WR16(dev, BAR_CSTRORM_INTMEM + 1364 CNIC_WR16(dev, BAR_CSTRORM_INTMEM +
1316 CSTORM_ISCSI_NUM_OF_TASKS_OFFSET(func), 1365 CSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid),
1317 req1->num_tasks_per_conn); 1366 req1->num_tasks_per_conn);
1318 CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_CQ_SIZE_OFFSET(func), 1367 CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_CQ_SIZE_OFFSET(pfid),
1319 req1->cq_num_wqes); 1368 req1->cq_num_wqes);
1320 CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_HQ_SIZE_OFFSET(func), 1369 CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_HQ_SIZE_OFFSET(pfid),
1321 hq_bds); 1370 hq_bds);
1322 1371
1323 return 0; 1372 return 0;
@@ -1327,7 +1376,7 @@ static int cnic_bnx2x_iscsi_init2(struct cnic_dev *dev, struct kwqe *kwqe)
1327{ 1376{
1328 struct iscsi_kwqe_init2 *req2 = (struct iscsi_kwqe_init2 *) kwqe; 1377 struct iscsi_kwqe_init2 *req2 = (struct iscsi_kwqe_init2 *) kwqe;
1329 struct cnic_local *cp = dev->cnic_priv; 1378 struct cnic_local *cp = dev->cnic_priv;
1330 int func = cp->func; 1379 u32 pfid = cp->pfid;
1331 struct iscsi_kcqe kcqe; 1380 struct iscsi_kcqe kcqe;
1332 struct kcqe *cqes[1]; 1381 struct kcqe *cqes[1];
1333 1382
@@ -1339,21 +1388,21 @@ static int cnic_bnx2x_iscsi_init2(struct cnic_dev *dev, struct kwqe *kwqe)
1339 } 1388 }
1340 1389
1341 CNIC_WR(dev, BAR_TSTRORM_INTMEM + 1390 CNIC_WR(dev, BAR_TSTRORM_INTMEM +
1342 TSTORM_ISCSI_ERROR_BITMAP_OFFSET(func), req2->error_bit_map[0]); 1391 TSTORM_ISCSI_ERROR_BITMAP_OFFSET(pfid), req2->error_bit_map[0]);
1343 CNIC_WR(dev, BAR_TSTRORM_INTMEM + 1392 CNIC_WR(dev, BAR_TSTRORM_INTMEM +
1344 TSTORM_ISCSI_ERROR_BITMAP_OFFSET(func) + 4, 1393 TSTORM_ISCSI_ERROR_BITMAP_OFFSET(pfid) + 4,
1345 req2->error_bit_map[1]); 1394 req2->error_bit_map[1]);
1346 1395
1347 CNIC_WR16(dev, BAR_USTRORM_INTMEM + 1396 CNIC_WR16(dev, BAR_USTRORM_INTMEM +
1348 USTORM_ISCSI_CQ_SQN_SIZE_OFFSET(func), req2->max_cq_sqn); 1397 USTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfid), req2->max_cq_sqn);
1349 CNIC_WR(dev, BAR_USTRORM_INTMEM + 1398 CNIC_WR(dev, BAR_USTRORM_INTMEM +
1350 USTORM_ISCSI_ERROR_BITMAP_OFFSET(func), req2->error_bit_map[0]); 1399 USTORM_ISCSI_ERROR_BITMAP_OFFSET(pfid), req2->error_bit_map[0]);
1351 CNIC_WR(dev, BAR_USTRORM_INTMEM + 1400 CNIC_WR(dev, BAR_USTRORM_INTMEM +
1352 USTORM_ISCSI_ERROR_BITMAP_OFFSET(func) + 4, 1401 USTORM_ISCSI_ERROR_BITMAP_OFFSET(pfid) + 4,
1353 req2->error_bit_map[1]); 1402 req2->error_bit_map[1]);
1354 1403
1355 CNIC_WR16(dev, BAR_CSTRORM_INTMEM + 1404 CNIC_WR16(dev, BAR_CSTRORM_INTMEM +
1356 CSTORM_ISCSI_CQ_SQN_SIZE_OFFSET(func), req2->max_cq_sqn); 1405 CSTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfid), req2->max_cq_sqn);
1357 1406
1358 kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_SUCCESS; 1407 kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_SUCCESS;
1359 1408
@@ -1461,7 +1510,7 @@ static int cnic_setup_bnx2x_ctx(struct cnic_dev *dev, struct kwqe *wqes[],
1461 struct cnic_context *ctx = &cp->ctx_tbl[req1->iscsi_conn_id]; 1510 struct cnic_context *ctx = &cp->ctx_tbl[req1->iscsi_conn_id];
1462 struct cnic_iscsi *iscsi = ctx->proto.iscsi; 1511 struct cnic_iscsi *iscsi = ctx->proto.iscsi;
1463 u32 cid = ctx->cid; 1512 u32 cid = ctx->cid;
1464 u32 hw_cid = BNX2X_HW_CID(cid, cp->func); 1513 u32 hw_cid = BNX2X_HW_CID(cp, cid);
1465 struct iscsi_context *ictx; 1514 struct iscsi_context *ictx;
1466 struct regpair context_addr; 1515 struct regpair context_addr;
1467 int i, j, n = 2, n_max; 1516 int i, j, n = 2, n_max;
@@ -1527,8 +1576,10 @@ static int cnic_setup_bnx2x_ctx(struct cnic_dev *dev, struct kwqe *wqes[],
1527 ictx->tstorm_st_context.tcp.cwnd = 0x5A8; 1576 ictx->tstorm_st_context.tcp.cwnd = 0x5A8;
1528 ictx->tstorm_st_context.tcp.flags2 |= 1577 ictx->tstorm_st_context.tcp.flags2 |=
1529 TSTORM_TCP_ST_CONTEXT_SECTION_DA_EN; 1578 TSTORM_TCP_ST_CONTEXT_SECTION_DA_EN;
1579 ictx->tstorm_st_context.tcp.ooo_support_mode =
1580 TCP_TSTORM_OOO_DROP_AND_PROC_ACK;
1530 1581
1531 ictx->timers_context.flags |= ISCSI_TIMERS_BLOCK_CONTEXT_CONN_VALID_FLG; 1582 ictx->timers_context.flags |= TIMERS_BLOCK_CONTEXT_CONN_VALID_FLG;
1532 1583
1533 ictx->ustorm_st_context.ring.rq.pbl_base.lo = 1584 ictx->ustorm_st_context.ring.rq.pbl_base.lo =
1534 req2->rq_page_table_addr_lo; 1585 req2->rq_page_table_addr_lo;
@@ -1627,10 +1678,11 @@ static int cnic_bnx2x_iscsi_ofld1(struct cnic_dev *dev, struct kwqe *wqes[],
1627 struct iscsi_kwqe_conn_offload1 *req1; 1678 struct iscsi_kwqe_conn_offload1 *req1;
1628 struct iscsi_kwqe_conn_offload2 *req2; 1679 struct iscsi_kwqe_conn_offload2 *req2;
1629 struct cnic_local *cp = dev->cnic_priv; 1680 struct cnic_local *cp = dev->cnic_priv;
1681 struct cnic_context *ctx;
1630 struct iscsi_kcqe kcqe; 1682 struct iscsi_kcqe kcqe;
1631 struct kcqe *cqes[1]; 1683 struct kcqe *cqes[1];
1632 u32 l5_cid; 1684 u32 l5_cid;
1633 int ret; 1685 int ret = 0;
1634 1686
1635 if (num < 2) { 1687 if (num < 2) {
1636 *work = num; 1688 *work = num;
@@ -1654,9 +1706,15 @@ static int cnic_bnx2x_iscsi_ofld1(struct cnic_dev *dev, struct kwqe *wqes[],
1654 kcqe.iscsi_conn_id = l5_cid; 1706 kcqe.iscsi_conn_id = l5_cid;
1655 kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE; 1707 kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE;
1656 1708
1709 ctx = &cp->ctx_tbl[l5_cid];
1710 if (test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags)) {
1711 kcqe.completion_status =
1712 ISCSI_KCQE_COMPLETION_STATUS_CID_BUSY;
1713 goto done;
1714 }
1715
1657 if (atomic_inc_return(&cp->iscsi_conn) > dev->max_iscsi_conn) { 1716 if (atomic_inc_return(&cp->iscsi_conn) > dev->max_iscsi_conn) {
1658 atomic_dec(&cp->iscsi_conn); 1717 atomic_dec(&cp->iscsi_conn);
1659 ret = 0;
1660 goto done; 1718 goto done;
1661 } 1719 }
1662 ret = cnic_alloc_bnx2x_conn_resc(dev, l5_cid); 1720 ret = cnic_alloc_bnx2x_conn_resc(dev, l5_cid);
@@ -1673,8 +1731,7 @@ static int cnic_bnx2x_iscsi_ofld1(struct cnic_dev *dev, struct kwqe *wqes[],
1673 } 1731 }
1674 1732
1675 kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_SUCCESS; 1733 kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_SUCCESS;
1676 kcqe.iscsi_conn_context_id = BNX2X_HW_CID(cp->ctx_tbl[l5_cid].cid, 1734 kcqe.iscsi_conn_context_id = BNX2X_HW_CID(cp, cp->ctx_tbl[l5_cid].cid);
1677 cp->func);
1678 1735
1679done: 1736done:
1680 cqes[0] = (struct kcqe *) &kcqe; 1737 cqes[0] = (struct kcqe *) &kcqe;
@@ -1707,40 +1764,66 @@ static int cnic_bnx2x_iscsi_update(struct cnic_dev *dev, struct kwqe *kwqe)
1707 return ret; 1764 return ret;
1708} 1765}
1709 1766
1767static int cnic_bnx2x_destroy_ramrod(struct cnic_dev *dev, u32 l5_cid)
1768{
1769 struct cnic_local *cp = dev->cnic_priv;
1770 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
1771 union l5cm_specific_data l5_data;
1772 int ret;
1773 u32 hw_cid, type;
1774
1775 init_waitqueue_head(&ctx->waitq);
1776 ctx->wait_cond = 0;
1777 memset(&l5_data, 0, sizeof(l5_data));
1778 hw_cid = BNX2X_HW_CID(cp, ctx->cid);
1779 type = (NONE_CONNECTION_TYPE << SPE_HDR_CONN_TYPE_SHIFT)
1780 & SPE_HDR_CONN_TYPE;
1781 type |= ((cp->pfid << SPE_HDR_FUNCTION_ID_SHIFT) &
1782 SPE_HDR_FUNCTION_ID);
1783
1784 ret = cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_COMMON_CFC_DEL,
1785 hw_cid, type, &l5_data);
1786
1787 if (ret == 0)
1788 wait_event(ctx->waitq, ctx->wait_cond);
1789
1790 return ret;
1791}
1792
1710static int cnic_bnx2x_iscsi_destroy(struct cnic_dev *dev, struct kwqe *kwqe) 1793static int cnic_bnx2x_iscsi_destroy(struct cnic_dev *dev, struct kwqe *kwqe)
1711{ 1794{
1712 struct cnic_local *cp = dev->cnic_priv; 1795 struct cnic_local *cp = dev->cnic_priv;
1713 struct iscsi_kwqe_conn_destroy *req = 1796 struct iscsi_kwqe_conn_destroy *req =
1714 (struct iscsi_kwqe_conn_destroy *) kwqe; 1797 (struct iscsi_kwqe_conn_destroy *) kwqe;
1715 union l5cm_specific_data l5_data;
1716 u32 l5_cid = req->reserved0; 1798 u32 l5_cid = req->reserved0;
1717 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid]; 1799 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
1718 int ret = 0; 1800 int ret = 0;
1719 struct iscsi_kcqe kcqe; 1801 struct iscsi_kcqe kcqe;
1720 struct kcqe *cqes[1]; 1802 struct kcqe *cqes[1];
1721 1803
1722 if (!(ctx->ctx_flags & CTX_FL_OFFLD_START)) 1804 if (!test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags))
1723 goto skip_cfc_delete; 1805 goto skip_cfc_delete;
1724 1806
1725 while (!time_after(jiffies, ctx->timestamp + (2 * HZ))) 1807 if (!time_after(jiffies, ctx->timestamp + (2 * HZ))) {
1726 msleep(250); 1808 unsigned long delta = ctx->timestamp + (2 * HZ) - jiffies;
1727 1809
1728 init_waitqueue_head(&ctx->waitq); 1810 if (delta > (2 * HZ))
1729 ctx->wait_cond = 0; 1811 delta = 0;
1730 memset(&l5_data, 0, sizeof(l5_data)); 1812
1731 ret = cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_ETH_CFC_DEL, 1813 set_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags);
1732 req->context_id, 1814 queue_delayed_work(cnic_wq, &cp->delete_task, delta);
1733 ETH_CONNECTION_TYPE | 1815 goto destroy_reply;
1734 (1 << SPE_HDR_COMMON_RAMROD_SHIFT), 1816 }
1735 &l5_data); 1817
1736 if (ret == 0) 1818 ret = cnic_bnx2x_destroy_ramrod(dev, l5_cid);
1737 wait_event(ctx->waitq, ctx->wait_cond);
1738 1819
1739skip_cfc_delete: 1820skip_cfc_delete:
1740 cnic_free_bnx2x_conn_resc(dev, l5_cid); 1821 cnic_free_bnx2x_conn_resc(dev, l5_cid);
1741 1822
1742 atomic_dec(&cp->iscsi_conn); 1823 atomic_dec(&cp->iscsi_conn);
1824 clear_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags);
1743 1825
1826destroy_reply:
1744 memset(&kcqe, 0, sizeof(kcqe)); 1827 memset(&kcqe, 0, sizeof(kcqe));
1745 kcqe.op_code = ISCSI_KCQE_OPCODE_DESTROY_CONN; 1828 kcqe.op_code = ISCSI_KCQE_OPCODE_DESTROY_CONN;
1746 kcqe.iscsi_conn_id = l5_cid; 1829 kcqe.iscsi_conn_id = l5_cid;
@@ -1805,37 +1888,37 @@ static void cnic_init_storm_conn_bufs(struct cnic_dev *dev,
1805static void cnic_init_bnx2x_mac(struct cnic_dev *dev) 1888static void cnic_init_bnx2x_mac(struct cnic_dev *dev)
1806{ 1889{
1807 struct cnic_local *cp = dev->cnic_priv; 1890 struct cnic_local *cp = dev->cnic_priv;
1808 int func = CNIC_FUNC(cp); 1891 u32 pfid = cp->pfid;
1809 u8 *mac = dev->mac_addr; 1892 u8 *mac = dev->mac_addr;
1810 1893
1811 CNIC_WR8(dev, BAR_XSTRORM_INTMEM + 1894 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
1812 XSTORM_ISCSI_LOCAL_MAC_ADDR0_OFFSET(func), mac[0]); 1895 XSTORM_ISCSI_LOCAL_MAC_ADDR0_OFFSET(pfid), mac[0]);
1813 CNIC_WR8(dev, BAR_XSTRORM_INTMEM + 1896 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
1814 XSTORM_ISCSI_LOCAL_MAC_ADDR1_OFFSET(func), mac[1]); 1897 XSTORM_ISCSI_LOCAL_MAC_ADDR1_OFFSET(pfid), mac[1]);
1815 CNIC_WR8(dev, BAR_XSTRORM_INTMEM + 1898 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
1816 XSTORM_ISCSI_LOCAL_MAC_ADDR2_OFFSET(func), mac[2]); 1899 XSTORM_ISCSI_LOCAL_MAC_ADDR2_OFFSET(pfid), mac[2]);
1817 CNIC_WR8(dev, BAR_XSTRORM_INTMEM + 1900 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
1818 XSTORM_ISCSI_LOCAL_MAC_ADDR3_OFFSET(func), mac[3]); 1901 XSTORM_ISCSI_LOCAL_MAC_ADDR3_OFFSET(pfid), mac[3]);
1819 CNIC_WR8(dev, BAR_XSTRORM_INTMEM + 1902 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
1820 XSTORM_ISCSI_LOCAL_MAC_ADDR4_OFFSET(func), mac[4]); 1903 XSTORM_ISCSI_LOCAL_MAC_ADDR4_OFFSET(pfid), mac[4]);
1821 CNIC_WR8(dev, BAR_XSTRORM_INTMEM + 1904 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
1822 XSTORM_ISCSI_LOCAL_MAC_ADDR5_OFFSET(func), mac[5]); 1905 XSTORM_ISCSI_LOCAL_MAC_ADDR5_OFFSET(pfid), mac[5]);
1823 1906
1824 CNIC_WR8(dev, BAR_TSTRORM_INTMEM + 1907 CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
1825 TSTORM_ISCSI_TCP_VARS_LSB_LOCAL_MAC_ADDR_OFFSET(func), mac[5]); 1908 TSTORM_ISCSI_TCP_VARS_LSB_LOCAL_MAC_ADDR_OFFSET(pfid), mac[5]);
1826 CNIC_WR8(dev, BAR_TSTRORM_INTMEM + 1909 CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
1827 TSTORM_ISCSI_TCP_VARS_LSB_LOCAL_MAC_ADDR_OFFSET(func) + 1, 1910 TSTORM_ISCSI_TCP_VARS_LSB_LOCAL_MAC_ADDR_OFFSET(pfid) + 1,
1828 mac[4]); 1911 mac[4]);
1829 CNIC_WR8(dev, BAR_TSTRORM_INTMEM + 1912 CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
1830 TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(func), mac[3]); 1913 TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(pfid), mac[3]);
1831 CNIC_WR8(dev, BAR_TSTRORM_INTMEM + 1914 CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
1832 TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(func) + 1, 1915 TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(pfid) + 1,
1833 mac[2]); 1916 mac[2]);
1834 CNIC_WR8(dev, BAR_TSTRORM_INTMEM + 1917 CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
1835 TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(func) + 2, 1918 TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(pfid) + 2,
1836 mac[1]); 1919 mac[1]);
1837 CNIC_WR8(dev, BAR_TSTRORM_INTMEM + 1920 CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
1838 TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(func) + 3, 1921 TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(pfid) + 3,
1839 mac[0]); 1922 mac[0]);
1840} 1923}
1841 1924
@@ -1851,10 +1934,10 @@ static void cnic_bnx2x_set_tcp_timestamp(struct cnic_dev *dev, int tcp_ts)
1851 } 1934 }
1852 1935
1853 CNIC_WR8(dev, BAR_XSTRORM_INTMEM + 1936 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
1854 XSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(cp->func), xstorm_flags); 1937 XSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(cp->pfid), xstorm_flags);
1855 1938
1856 CNIC_WR16(dev, BAR_TSTRORM_INTMEM + 1939 CNIC_WR16(dev, BAR_TSTRORM_INTMEM +
1857 TSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(cp->func), tstorm_flags); 1940 TSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(cp->pfid), tstorm_flags);
1858} 1941}
1859 1942
1860static int cnic_bnx2x_connect(struct cnic_dev *dev, struct kwqe *wqes[], 1943static int cnic_bnx2x_connect(struct cnic_dev *dev, struct kwqe *wqes[],
@@ -1929,7 +2012,7 @@ static int cnic_bnx2x_connect(struct cnic_dev *dev, struct kwqe *wqes[],
1929 cnic_init_storm_conn_bufs(dev, kwqe1, kwqe3, conn_buf); 2012 cnic_init_storm_conn_bufs(dev, kwqe1, kwqe3, conn_buf);
1930 2013
1931 CNIC_WR16(dev, BAR_XSTRORM_INTMEM + 2014 CNIC_WR16(dev, BAR_XSTRORM_INTMEM +
1932 XSTORM_ISCSI_LOCAL_VLAN_OFFSET(cp->func), csk->vlan_id); 2015 XSTORM_ISCSI_LOCAL_VLAN_OFFSET(cp->pfid), csk->vlan_id);
1933 2016
1934 cnic_bnx2x_set_tcp_timestamp(dev, 2017 cnic_bnx2x_set_tcp_timestamp(dev,
1935 kwqe1->tcp_flags & L4_KWQ_CONNECT_REQ1_TIME_STAMP); 2018 kwqe1->tcp_flags & L4_KWQ_CONNECT_REQ1_TIME_STAMP);
@@ -1937,7 +2020,7 @@ static int cnic_bnx2x_connect(struct cnic_dev *dev, struct kwqe *wqes[],
1937 ret = cnic_submit_kwqe_16(dev, L5CM_RAMROD_CMD_ID_TCP_CONNECT, 2020 ret = cnic_submit_kwqe_16(dev, L5CM_RAMROD_CMD_ID_TCP_CONNECT,
1938 kwqe1->cid, ISCSI_CONNECTION_TYPE, &l5_data); 2021 kwqe1->cid, ISCSI_CONNECTION_TYPE, &l5_data);
1939 if (!ret) 2022 if (!ret)
1940 ctx->ctx_flags |= CTX_FL_OFFLD_START; 2023 set_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags);
1941 2024
1942 return ret; 2025 return ret;
1943} 2026}
@@ -2063,7 +2146,7 @@ static int cnic_submit_bnx2x_kwqes(struct cnic_dev *dev, struct kwqe *wqes[],
2063static void service_kcqes(struct cnic_dev *dev, int num_cqes) 2146static void service_kcqes(struct cnic_dev *dev, int num_cqes)
2064{ 2147{
2065 struct cnic_local *cp = dev->cnic_priv; 2148 struct cnic_local *cp = dev->cnic_priv;
2066 int i, j; 2149 int i, j, comp = 0;
2067 2150
2068 i = 0; 2151 i = 0;
2069 j = 1; 2152 j = 1;
@@ -2074,7 +2157,7 @@ static void service_kcqes(struct cnic_dev *dev, int num_cqes)
2074 u32 kcqe_layer = kcqe_op_flag & KCQE_FLAGS_LAYER_MASK; 2157 u32 kcqe_layer = kcqe_op_flag & KCQE_FLAGS_LAYER_MASK;
2075 2158
2076 if (unlikely(kcqe_op_flag & KCQE_RAMROD_COMPLETION)) 2159 if (unlikely(kcqe_op_flag & KCQE_RAMROD_COMPLETION))
2077 cnic_kwq_completion(dev, 1); 2160 comp++;
2078 2161
2079 while (j < num_cqes) { 2162 while (j < num_cqes) {
2080 u32 next_op = cp->completed_kcq[i + j]->kcqe_op_flag; 2163 u32 next_op = cp->completed_kcq[i + j]->kcqe_op_flag;
@@ -2083,7 +2166,7 @@ static void service_kcqes(struct cnic_dev *dev, int num_cqes)
2083 break; 2166 break;
2084 2167
2085 if (unlikely(next_op & KCQE_RAMROD_COMPLETION)) 2168 if (unlikely(next_op & KCQE_RAMROD_COMPLETION))
2086 cnic_kwq_completion(dev, 1); 2169 comp++;
2087 j++; 2170 j++;
2088 } 2171 }
2089 2172
@@ -2113,6 +2196,8 @@ end:
2113 i += j; 2196 i += j;
2114 j = 1; 2197 j = 1;
2115 } 2198 }
2199 if (unlikely(comp))
2200 cnic_spq_completion(dev, DRV_CTL_RET_L5_SPQ_CREDIT_CMD, comp);
2116} 2201}
2117 2202
2118static u16 cnic_bnx2_next_idx(u16 idx) 2203static u16 cnic_bnx2_next_idx(u16 idx)
@@ -2171,8 +2256,9 @@ static int cnic_get_kcqes(struct cnic_dev *dev, struct kcq_info *info)
2171static int cnic_l2_completion(struct cnic_local *cp) 2256static int cnic_l2_completion(struct cnic_local *cp)
2172{ 2257{
2173 u16 hw_cons, sw_cons; 2258 u16 hw_cons, sw_cons;
2259 struct cnic_uio_dev *udev = cp->udev;
2174 union eth_rx_cqe *cqe, *cqe_ring = (union eth_rx_cqe *) 2260 union eth_rx_cqe *cqe, *cqe_ring = (union eth_rx_cqe *)
2175 (cp->l2_ring + (2 * BCM_PAGE_SIZE)); 2261 (udev->l2_ring + (2 * BCM_PAGE_SIZE));
2176 u32 cmd; 2262 u32 cmd;
2177 int comp = 0; 2263 int comp = 0;
2178 2264
@@ -2203,13 +2289,14 @@ static int cnic_l2_completion(struct cnic_local *cp)
2203 2289
2204static void cnic_chk_pkt_rings(struct cnic_local *cp) 2290static void cnic_chk_pkt_rings(struct cnic_local *cp)
2205{ 2291{
2206 u16 rx_cons = *cp->rx_cons_ptr; 2292 u16 rx_cons, tx_cons;
2207 u16 tx_cons = *cp->tx_cons_ptr;
2208 int comp = 0; 2293 int comp = 0;
2209 2294
2210 if (!test_bit(CNIC_F_CNIC_UP, &cp->dev->flags)) 2295 if (!test_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags))
2211 return; 2296 return;
2212 2297
2298 rx_cons = *cp->rx_cons_ptr;
2299 tx_cons = *cp->tx_cons_ptr;
2213 if (cp->tx_cons != tx_cons || cp->rx_cons != rx_cons) { 2300 if (cp->tx_cons != tx_cons || cp->rx_cons != rx_cons) {
2214 if (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags)) 2301 if (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags))
2215 comp = cnic_l2_completion(cp); 2302 comp = cnic_l2_completion(cp);
@@ -2217,7 +2304,8 @@ static void cnic_chk_pkt_rings(struct cnic_local *cp)
2217 cp->tx_cons = tx_cons; 2304 cp->tx_cons = tx_cons;
2218 cp->rx_cons = rx_cons; 2305 cp->rx_cons = rx_cons;
2219 2306
2220 uio_event_notify(cp->cnic_uinfo); 2307 if (cp->udev)
2308 uio_event_notify(&cp->udev->cnic_uinfo);
2221 } 2309 }
2222 if (comp) 2310 if (comp)
2223 clear_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags); 2311 clear_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags);
@@ -2318,14 +2406,38 @@ static inline void cnic_ack_bnx2x_int(struct cnic_dev *dev, u8 id, u8 storm,
2318 CNIC_WR(dev, hc_addr, (*(u32 *)&igu_ack)); 2406 CNIC_WR(dev, hc_addr, (*(u32 *)&igu_ack));
2319} 2407}
2320 2408
2409static void cnic_ack_igu_sb(struct cnic_dev *dev, u8 igu_sb_id, u8 segment,
2410 u16 index, u8 op, u8 update)
2411{
2412 struct igu_regular cmd_data;
2413 u32 igu_addr = BAR_IGU_INTMEM + (IGU_CMD_INT_ACK_BASE + igu_sb_id) * 8;
2414
2415 cmd_data.sb_id_and_flags =
2416 (index << IGU_REGULAR_SB_INDEX_SHIFT) |
2417 (segment << IGU_REGULAR_SEGMENT_ACCESS_SHIFT) |
2418 (update << IGU_REGULAR_BUPDATE_SHIFT) |
2419 (op << IGU_REGULAR_ENABLE_INT_SHIFT);
2420
2421
2422 CNIC_WR(dev, igu_addr, cmd_data.sb_id_and_flags);
2423}
2424
2321static void cnic_ack_bnx2x_msix(struct cnic_dev *dev) 2425static void cnic_ack_bnx2x_msix(struct cnic_dev *dev)
2322{ 2426{
2323 struct cnic_local *cp = dev->cnic_priv; 2427 struct cnic_local *cp = dev->cnic_priv;
2324 2428
2325 cnic_ack_bnx2x_int(dev, cp->status_blk_num, CSTORM_ID, 0, 2429 cnic_ack_bnx2x_int(dev, cp->bnx2x_igu_sb_id, CSTORM_ID, 0,
2326 IGU_INT_DISABLE, 0); 2430 IGU_INT_DISABLE, 0);
2327} 2431}
2328 2432
2433static void cnic_ack_bnx2x_e2_msix(struct cnic_dev *dev)
2434{
2435 struct cnic_local *cp = dev->cnic_priv;
2436
2437 cnic_ack_igu_sb(dev, cp->bnx2x_igu_sb_id, IGU_SEG_ACCESS_DEF, 0,
2438 IGU_INT_DISABLE, 0);
2439}
2440
2329static u32 cnic_service_bnx2x_kcq(struct cnic_dev *dev, struct kcq_info *info) 2441static u32 cnic_service_bnx2x_kcq(struct cnic_dev *dev, struct kcq_info *info)
2330{ 2442{
2331 u32 last_status = *info->status_idx_ptr; 2443 u32 last_status = *info->status_idx_ptr;
@@ -2357,8 +2469,12 @@ static void cnic_service_bnx2x_bh(unsigned long data)
2357 status_idx = cnic_service_bnx2x_kcq(dev, &cp->kcq1); 2469 status_idx = cnic_service_bnx2x_kcq(dev, &cp->kcq1);
2358 2470
2359 CNIC_WR16(dev, cp->kcq1.io_addr, cp->kcq1.sw_prod_idx + MAX_KCQ_IDX); 2471 CNIC_WR16(dev, cp->kcq1.io_addr, cp->kcq1.sw_prod_idx + MAX_KCQ_IDX);
2360 cnic_ack_bnx2x_int(dev, cp->status_blk_num, CSTORM_ID, 2472 if (BNX2X_CHIP_IS_E2(cp->chip_id))
2361 status_idx, IGU_INT_ENABLE, 1); 2473 cnic_ack_igu_sb(dev, cp->bnx2x_igu_sb_id, IGU_SEG_ACCESS_DEF,
2474 status_idx, IGU_INT_ENABLE, 1);
2475 else
2476 cnic_ack_bnx2x_int(dev, cp->bnx2x_igu_sb_id, USTORM_ID,
2477 status_idx, IGU_INT_ENABLE, 1);
2362} 2478}
2363 2479
2364static int cnic_service_bnx2x(void *data, void *status_blk) 2480static int cnic_service_bnx2x(void *data, void *status_blk)
@@ -2379,8 +2495,7 @@ static void cnic_ulp_stop(struct cnic_dev *dev)
2379 struct cnic_local *cp = dev->cnic_priv; 2495 struct cnic_local *cp = dev->cnic_priv;
2380 int if_type; 2496 int if_type;
2381 2497
2382 if (cp->cnic_uinfo) 2498 cnic_send_nlmsg(cp, ISCSI_KEVENT_IF_DOWN, NULL);
2383 cnic_send_nlmsg(cp, ISCSI_KEVENT_IF_DOWN, NULL);
2384 2499
2385 for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++) { 2500 for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++) {
2386 struct cnic_ulp_ops *ulp_ops; 2501 struct cnic_ulp_ops *ulp_ops;
@@ -2728,6 +2843,13 @@ static int cnic_cm_create(struct cnic_dev *dev, int ulp_type, u32 cid,
2728 if (l5_cid >= MAX_CM_SK_TBL_SZ) 2843 if (l5_cid >= MAX_CM_SK_TBL_SZ)
2729 return -EINVAL; 2844 return -EINVAL;
2730 2845
2846 if (cp->ctx_tbl) {
2847 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
2848
2849 if (test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags))
2850 return -EAGAIN;
2851 }
2852
2731 csk1 = &cp->csk_tbl[l5_cid]; 2853 csk1 = &cp->csk_tbl[l5_cid];
2732 if (atomic_read(&csk1->ref_count)) 2854 if (atomic_read(&csk1->ref_count))
2733 return -EAGAIN; 2855 return -EAGAIN;
@@ -3279,39 +3401,106 @@ static void cnic_close_bnx2x_conn(struct cnic_sock *csk, u32 opcode)
3279 3401
3280static void cnic_cm_stop_bnx2x_hw(struct cnic_dev *dev) 3402static void cnic_cm_stop_bnx2x_hw(struct cnic_dev *dev)
3281{ 3403{
3404 struct cnic_local *cp = dev->cnic_priv;
3405 int i;
3406
3407 if (!cp->ctx_tbl)
3408 return;
3409
3410 if (!netif_running(dev->netdev))
3411 return;
3412
3413 for (i = 0; i < cp->max_cid_space; i++) {
3414 struct cnic_context *ctx = &cp->ctx_tbl[i];
3415
3416 while (test_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags))
3417 msleep(10);
3418
3419 if (test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags))
3420 netdev_warn(dev->netdev, "CID %x not deleted\n",
3421 ctx->cid);
3422 }
3423
3424 cancel_delayed_work(&cp->delete_task);
3425 flush_workqueue(cnic_wq);
3426
3427 if (atomic_read(&cp->iscsi_conn) != 0)
3428 netdev_warn(dev->netdev, "%d iSCSI connections not destroyed\n",
3429 atomic_read(&cp->iscsi_conn));
3282} 3430}
3283 3431
3284static int cnic_cm_init_bnx2x_hw(struct cnic_dev *dev) 3432static int cnic_cm_init_bnx2x_hw(struct cnic_dev *dev)
3285{ 3433{
3286 struct cnic_local *cp = dev->cnic_priv; 3434 struct cnic_local *cp = dev->cnic_priv;
3287 int func = CNIC_FUNC(cp); 3435 u32 pfid = cp->pfid;
3436 u32 port = CNIC_PORT(cp);
3288 3437
3289 cnic_init_bnx2x_mac(dev); 3438 cnic_init_bnx2x_mac(dev);
3290 cnic_bnx2x_set_tcp_timestamp(dev, 1); 3439 cnic_bnx2x_set_tcp_timestamp(dev, 1);
3291 3440
3292 CNIC_WR16(dev, BAR_XSTRORM_INTMEM + 3441 CNIC_WR16(dev, BAR_XSTRORM_INTMEM +
3293 XSTORM_ISCSI_LOCAL_VLAN_OFFSET(func), 0); 3442 XSTORM_ISCSI_LOCAL_VLAN_OFFSET(pfid), 0);
3294 3443
3295 CNIC_WR(dev, BAR_XSTRORM_INTMEM + 3444 CNIC_WR(dev, BAR_XSTRORM_INTMEM +
3296 XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_ENABLED_OFFSET(func), 1); 3445 XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_ENABLED_OFFSET(port), 1);
3297 CNIC_WR(dev, BAR_XSTRORM_INTMEM + 3446 CNIC_WR(dev, BAR_XSTRORM_INTMEM +
3298 XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_MAX_COUNT_OFFSET(func), 3447 XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_MAX_COUNT_OFFSET(port),
3299 DEF_MAX_DA_COUNT); 3448 DEF_MAX_DA_COUNT);
3300 3449
3301 CNIC_WR8(dev, BAR_XSTRORM_INTMEM + 3450 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
3302 XSTORM_ISCSI_TCP_VARS_TTL_OFFSET(func), DEF_TTL); 3451 XSTORM_ISCSI_TCP_VARS_TTL_OFFSET(pfid), DEF_TTL);
3303 CNIC_WR8(dev, BAR_XSTRORM_INTMEM + 3452 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
3304 XSTORM_ISCSI_TCP_VARS_TOS_OFFSET(func), DEF_TOS); 3453 XSTORM_ISCSI_TCP_VARS_TOS_OFFSET(pfid), DEF_TOS);
3305 CNIC_WR8(dev, BAR_XSTRORM_INTMEM + 3454 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
3306 XSTORM_ISCSI_TCP_VARS_ADV_WND_SCL_OFFSET(func), 2); 3455 XSTORM_ISCSI_TCP_VARS_ADV_WND_SCL_OFFSET(pfid), 2);
3307 CNIC_WR(dev, BAR_XSTRORM_INTMEM + 3456 CNIC_WR(dev, BAR_XSTRORM_INTMEM +
3308 XSTORM_TCP_TX_SWS_TIMER_VAL_OFFSET(func), DEF_SWS_TIMER); 3457 XSTORM_TCP_TX_SWS_TIMER_VAL_OFFSET(pfid), DEF_SWS_TIMER);
3309 3458
3310 CNIC_WR(dev, BAR_TSTRORM_INTMEM + TSTORM_TCP_MAX_CWND_OFFSET(func), 3459 CNIC_WR(dev, BAR_TSTRORM_INTMEM + TSTORM_TCP_MAX_CWND_OFFSET(pfid),
3311 DEF_MAX_CWND); 3460 DEF_MAX_CWND);
3312 return 0; 3461 return 0;
3313} 3462}
3314 3463
3464static void cnic_delete_task(struct work_struct *work)
3465{
3466 struct cnic_local *cp;
3467 struct cnic_dev *dev;
3468 u32 i;
3469 int need_resched = 0;
3470
3471 cp = container_of(work, struct cnic_local, delete_task.work);
3472 dev = cp->dev;
3473
3474 for (i = 0; i < cp->max_cid_space; i++) {
3475 struct cnic_context *ctx = &cp->ctx_tbl[i];
3476
3477 if (!test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags) ||
3478 !test_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags))
3479 continue;
3480
3481 if (!time_after(jiffies, ctx->timestamp + (2 * HZ))) {
3482 need_resched = 1;
3483 continue;
3484 }
3485
3486 if (!test_and_clear_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags))
3487 continue;
3488
3489 cnic_bnx2x_destroy_ramrod(dev, i);
3490
3491 cnic_free_bnx2x_conn_resc(dev, i);
3492 if (ctx->ulp_proto_id == CNIC_ULP_ISCSI)
3493 atomic_dec(&cp->iscsi_conn);
3494
3495 clear_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags);
3496 }
3497
3498 if (need_resched)
3499 queue_delayed_work(cnic_wq, &cp->delete_task,
3500 msecs_to_jiffies(10));
3501
3502}
3503
3315static int cnic_cm_open(struct cnic_dev *dev) 3504static int cnic_cm_open(struct cnic_dev *dev)
3316{ 3505{
3317 struct cnic_local *cp = dev->cnic_priv; 3506 struct cnic_local *cp = dev->cnic_priv;
@@ -3326,6 +3515,8 @@ static int cnic_cm_open(struct cnic_dev *dev)
3326 if (err) 3515 if (err)
3327 goto err_out; 3516 goto err_out;
3328 3517
3518 INIT_DELAYED_WORK(&cp->delete_task, cnic_delete_task);
3519
3329 dev->cm_create = cnic_cm_create; 3520 dev->cm_create = cnic_cm_create;
3330 dev->cm_destroy = cnic_cm_destroy; 3521 dev->cm_destroy = cnic_cm_destroy;
3331 dev->cm_connect = cnic_cm_connect; 3522 dev->cm_connect = cnic_cm_connect;
@@ -3418,11 +3609,24 @@ static void cnic_free_irq(struct cnic_dev *dev)
3418 3609
3419 if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) { 3610 if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
3420 cp->disable_int_sync(dev); 3611 cp->disable_int_sync(dev);
3421 tasklet_disable(&cp->cnic_irq_task); 3612 tasklet_kill(&cp->cnic_irq_task);
3422 free_irq(ethdev->irq_arr[0].vector, dev); 3613 free_irq(ethdev->irq_arr[0].vector, dev);
3423 } 3614 }
3424} 3615}
3425 3616
3617static int cnic_request_irq(struct cnic_dev *dev)
3618{
3619 struct cnic_local *cp = dev->cnic_priv;
3620 struct cnic_eth_dev *ethdev = cp->ethdev;
3621 int err;
3622
3623 err = request_irq(ethdev->irq_arr[0].vector, cnic_irq, 0, "cnic", dev);
3624 if (err)
3625 tasklet_disable(&cp->cnic_irq_task);
3626
3627 return err;
3628}
3629
3426static int cnic_init_bnx2_irq(struct cnic_dev *dev) 3630static int cnic_init_bnx2_irq(struct cnic_dev *dev)
3427{ 3631{
3428 struct cnic_local *cp = dev->cnic_priv; 3632 struct cnic_local *cp = dev->cnic_priv;
@@ -3443,12 +3647,10 @@ static int cnic_init_bnx2_irq(struct cnic_dev *dev)
3443 cp->last_status_idx = cp->status_blk.bnx2->status_idx; 3647 cp->last_status_idx = cp->status_blk.bnx2->status_idx;
3444 tasklet_init(&cp->cnic_irq_task, cnic_service_bnx2_msix, 3648 tasklet_init(&cp->cnic_irq_task, cnic_service_bnx2_msix,
3445 (unsigned long) dev); 3649 (unsigned long) dev);
3446 err = request_irq(ethdev->irq_arr[0].vector, cnic_irq, 0, 3650 err = cnic_request_irq(dev);
3447 "cnic", dev); 3651 if (err)
3448 if (err) {
3449 tasklet_disable(&cp->cnic_irq_task);
3450 return err; 3652 return err;
3451 } 3653
3452 while (cp->status_blk.bnx2->status_completion_producer_index && 3654 while (cp->status_blk.bnx2->status_completion_producer_index &&
3453 i < 10) { 3655 i < 10) {
3454 CNIC_WR(dev, BNX2_HC_COALESCE_NOW, 3656 CNIC_WR(dev, BNX2_HC_COALESCE_NOW,
@@ -3515,11 +3717,12 @@ static void cnic_init_bnx2_tx_ring(struct cnic_dev *dev)
3515{ 3717{
3516 struct cnic_local *cp = dev->cnic_priv; 3718 struct cnic_local *cp = dev->cnic_priv;
3517 struct cnic_eth_dev *ethdev = cp->ethdev; 3719 struct cnic_eth_dev *ethdev = cp->ethdev;
3720 struct cnic_uio_dev *udev = cp->udev;
3518 u32 cid_addr, tx_cid, sb_id; 3721 u32 cid_addr, tx_cid, sb_id;
3519 u32 val, offset0, offset1, offset2, offset3; 3722 u32 val, offset0, offset1, offset2, offset3;
3520 int i; 3723 int i;
3521 struct tx_bd *txbd; 3724 struct tx_bd *txbd;
3522 dma_addr_t buf_map; 3725 dma_addr_t buf_map, ring_map = udev->l2_ring_map;
3523 struct status_block *s_blk = cp->status_blk.gen; 3726 struct status_block *s_blk = cp->status_blk.gen;
3524 3727
3525 sb_id = cp->status_blk_num; 3728 sb_id = cp->status_blk_num;
@@ -3561,18 +3764,18 @@ static void cnic_init_bnx2_tx_ring(struct cnic_dev *dev)
3561 val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16); 3764 val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
3562 cnic_ctx_wr(dev, cid_addr, offset1, val); 3765 cnic_ctx_wr(dev, cid_addr, offset1, val);
3563 3766
3564 txbd = (struct tx_bd *) cp->l2_ring; 3767 txbd = (struct tx_bd *) udev->l2_ring;
3565 3768
3566 buf_map = cp->l2_buf_map; 3769 buf_map = udev->l2_buf_map;
3567 for (i = 0; i < MAX_TX_DESC_CNT; i++, txbd++) { 3770 for (i = 0; i < MAX_TX_DESC_CNT; i++, txbd++) {
3568 txbd->tx_bd_haddr_hi = (u64) buf_map >> 32; 3771 txbd->tx_bd_haddr_hi = (u64) buf_map >> 32;
3569 txbd->tx_bd_haddr_lo = (u64) buf_map & 0xffffffff; 3772 txbd->tx_bd_haddr_lo = (u64) buf_map & 0xffffffff;
3570 } 3773 }
3571 val = (u64) cp->l2_ring_map >> 32; 3774 val = (u64) ring_map >> 32;
3572 cnic_ctx_wr(dev, cid_addr, offset2, val); 3775 cnic_ctx_wr(dev, cid_addr, offset2, val);
3573 txbd->tx_bd_haddr_hi = val; 3776 txbd->tx_bd_haddr_hi = val;
3574 3777
3575 val = (u64) cp->l2_ring_map & 0xffffffff; 3778 val = (u64) ring_map & 0xffffffff;
3576 cnic_ctx_wr(dev, cid_addr, offset3, val); 3779 cnic_ctx_wr(dev, cid_addr, offset3, val);
3577 txbd->tx_bd_haddr_lo = val; 3780 txbd->tx_bd_haddr_lo = val;
3578} 3781}
@@ -3581,10 +3784,12 @@ static void cnic_init_bnx2_rx_ring(struct cnic_dev *dev)
3581{ 3784{
3582 struct cnic_local *cp = dev->cnic_priv; 3785 struct cnic_local *cp = dev->cnic_priv;
3583 struct cnic_eth_dev *ethdev = cp->ethdev; 3786 struct cnic_eth_dev *ethdev = cp->ethdev;
3787 struct cnic_uio_dev *udev = cp->udev;
3584 u32 cid_addr, sb_id, val, coal_reg, coal_val; 3788 u32 cid_addr, sb_id, val, coal_reg, coal_val;
3585 int i; 3789 int i;
3586 struct rx_bd *rxbd; 3790 struct rx_bd *rxbd;
3587 struct status_block *s_blk = cp->status_blk.gen; 3791 struct status_block *s_blk = cp->status_blk.gen;
3792 dma_addr_t ring_map = udev->l2_ring_map;
3588 3793
3589 sb_id = cp->status_blk_num; 3794 sb_id = cp->status_blk_num;
3590 cnic_init_context(dev, 2); 3795 cnic_init_context(dev, 2);
@@ -3618,22 +3823,22 @@ static void cnic_init_bnx2_rx_ring(struct cnic_dev *dev)
3618 val = BNX2_L2CTX_L2_STATUSB_NUM(sb_id); 3823 val = BNX2_L2CTX_L2_STATUSB_NUM(sb_id);
3619 cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_HOST_BDIDX, val); 3824 cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_HOST_BDIDX, val);
3620 3825
3621 rxbd = (struct rx_bd *) (cp->l2_ring + BCM_PAGE_SIZE); 3826 rxbd = (struct rx_bd *) (udev->l2_ring + BCM_PAGE_SIZE);
3622 for (i = 0; i < MAX_RX_DESC_CNT; i++, rxbd++) { 3827 for (i = 0; i < MAX_RX_DESC_CNT; i++, rxbd++) {
3623 dma_addr_t buf_map; 3828 dma_addr_t buf_map;
3624 int n = (i % cp->l2_rx_ring_size) + 1; 3829 int n = (i % cp->l2_rx_ring_size) + 1;
3625 3830
3626 buf_map = cp->l2_buf_map + (n * cp->l2_single_buf_size); 3831 buf_map = udev->l2_buf_map + (n * cp->l2_single_buf_size);
3627 rxbd->rx_bd_len = cp->l2_single_buf_size; 3832 rxbd->rx_bd_len = cp->l2_single_buf_size;
3628 rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END; 3833 rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
3629 rxbd->rx_bd_haddr_hi = (u64) buf_map >> 32; 3834 rxbd->rx_bd_haddr_hi = (u64) buf_map >> 32;
3630 rxbd->rx_bd_haddr_lo = (u64) buf_map & 0xffffffff; 3835 rxbd->rx_bd_haddr_lo = (u64) buf_map & 0xffffffff;
3631 } 3836 }
3632 val = (u64) (cp->l2_ring_map + BCM_PAGE_SIZE) >> 32; 3837 val = (u64) (ring_map + BCM_PAGE_SIZE) >> 32;
3633 cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val); 3838 cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
3634 rxbd->rx_bd_haddr_hi = val; 3839 rxbd->rx_bd_haddr_hi = val;
3635 3840
3636 val = (u64) (cp->l2_ring_map + BCM_PAGE_SIZE) & 0xffffffff; 3841 val = (u64) (ring_map + BCM_PAGE_SIZE) & 0xffffffff;
3637 cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val); 3842 cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
3638 rxbd->rx_bd_haddr_lo = val; 3843 rxbd->rx_bd_haddr_lo = val;
3639 3844
@@ -3850,42 +4055,55 @@ static int cnic_init_bnx2x_irq(struct cnic_dev *dev)
3850 4055
3851 tasklet_init(&cp->cnic_irq_task, cnic_service_bnx2x_bh, 4056 tasklet_init(&cp->cnic_irq_task, cnic_service_bnx2x_bh,
3852 (unsigned long) dev); 4057 (unsigned long) dev);
3853 if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) { 4058 if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX)
3854 err = request_irq(ethdev->irq_arr[0].vector, cnic_irq, 0, 4059 err = cnic_request_irq(dev);
3855 "cnic", dev); 4060
3856 if (err)
3857 tasklet_disable(&cp->cnic_irq_task);
3858 }
3859 return err; 4061 return err;
3860} 4062}
3861 4063
4064static inline void cnic_storm_memset_hc_disable(struct cnic_dev *dev,
4065 u16 sb_id, u8 sb_index,
4066 u8 disable)
4067{
4068
4069 u32 addr = BAR_CSTRORM_INTMEM +
4070 CSTORM_STATUS_BLOCK_DATA_OFFSET(sb_id) +
4071 offsetof(struct hc_status_block_data_e1x, index_data) +
4072 sizeof(struct hc_index_data)*sb_index +
4073 offsetof(struct hc_index_data, flags);
4074 u16 flags = CNIC_RD16(dev, addr);
4075 /* clear and set */
4076 flags &= ~HC_INDEX_DATA_HC_ENABLED;
4077 flags |= (((~disable) << HC_INDEX_DATA_HC_ENABLED_SHIFT) &
4078 HC_INDEX_DATA_HC_ENABLED);
4079 CNIC_WR16(dev, addr, flags);
4080}
4081
3862static void cnic_enable_bnx2x_int(struct cnic_dev *dev) 4082static void cnic_enable_bnx2x_int(struct cnic_dev *dev)
3863{ 4083{
3864 struct cnic_local *cp = dev->cnic_priv; 4084 struct cnic_local *cp = dev->cnic_priv;
3865 u8 sb_id = cp->status_blk_num; 4085 u8 sb_id = cp->status_blk_num;
3866 int port = CNIC_PORT(cp);
3867 4086
3868 CNIC_WR8(dev, BAR_CSTRORM_INTMEM + 4087 CNIC_WR8(dev, BAR_CSTRORM_INTMEM +
3869 CSTORM_SB_HC_TIMEOUT_C_OFFSET(port, sb_id, 4088 CSTORM_STATUS_BLOCK_DATA_OFFSET(sb_id) +
3870 HC_INDEX_C_ISCSI_EQ_CONS), 4089 offsetof(struct hc_status_block_data_e1x, index_data) +
3871 64 / 12); 4090 sizeof(struct hc_index_data)*HC_INDEX_ISCSI_EQ_CONS +
3872 CNIC_WR16(dev, BAR_CSTRORM_INTMEM + 4091 offsetof(struct hc_index_data, timeout), 64 / 12);
3873 CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id, 4092 cnic_storm_memset_hc_disable(dev, sb_id, HC_INDEX_ISCSI_EQ_CONS, 0);
3874 HC_INDEX_C_ISCSI_EQ_CONS), 0);
3875} 4093}
3876 4094
3877static void cnic_disable_bnx2x_int_sync(struct cnic_dev *dev) 4095static void cnic_disable_bnx2x_int_sync(struct cnic_dev *dev)
3878{ 4096{
3879} 4097}
3880 4098
3881static void cnic_init_bnx2x_tx_ring(struct cnic_dev *dev) 4099static void cnic_init_bnx2x_tx_ring(struct cnic_dev *dev,
4100 struct client_init_ramrod_data *data)
3882{ 4101{
3883 struct cnic_local *cp = dev->cnic_priv; 4102 struct cnic_local *cp = dev->cnic_priv;
3884 union eth_tx_bd_types *txbd = (union eth_tx_bd_types *) cp->l2_ring; 4103 struct cnic_uio_dev *udev = cp->udev;
3885 struct eth_context *context; 4104 union eth_tx_bd_types *txbd = (union eth_tx_bd_types *) udev->l2_ring;
3886 struct regpair context_addr; 4105 dma_addr_t buf_map, ring_map = udev->l2_ring_map;
3887 dma_addr_t buf_map; 4106 struct host_sp_status_block *sb = cp->bnx2x_def_status_blk;
3888 int func = CNIC_FUNC(cp);
3889 int port = CNIC_PORT(cp); 4107 int port = CNIC_PORT(cp);
3890 int i; 4108 int i;
3891 int cli = BNX2X_ISCSI_CL_ID(CNIC_E1HVN(cp)); 4109 int cli = BNX2X_ISCSI_CL_ID(CNIC_E1HVN(cp));
@@ -3893,7 +4111,7 @@ static void cnic_init_bnx2x_tx_ring(struct cnic_dev *dev)
3893 4111
3894 memset(txbd, 0, BCM_PAGE_SIZE); 4112 memset(txbd, 0, BCM_PAGE_SIZE);
3895 4113
3896 buf_map = cp->l2_buf_map; 4114 buf_map = udev->l2_buf_map;
3897 for (i = 0; i < MAX_TX_DESC_CNT; i += 3, txbd += 3) { 4115 for (i = 0; i < MAX_TX_DESC_CNT; i += 3, txbd += 3) {
3898 struct eth_tx_start_bd *start_bd = &txbd->start_bd; 4116 struct eth_tx_start_bd *start_bd = &txbd->start_bd;
3899 struct eth_tx_bd *reg_bd = &((txbd + 2)->reg_bd); 4117 struct eth_tx_bd *reg_bd = &((txbd + 2)->reg_bd);
@@ -3910,33 +4128,23 @@ static void cnic_init_bnx2x_tx_ring(struct cnic_dev *dev)
3910 start_bd->general_data |= (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT); 4128 start_bd->general_data |= (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT);
3911 4129
3912 } 4130 }
3913 context = cnic_get_bnx2x_ctx(dev, BNX2X_ISCSI_L2_CID, 1, &context_addr);
3914 4131
3915 val = (u64) cp->l2_ring_map >> 32; 4132 val = (u64) ring_map >> 32;
3916 txbd->next_bd.addr_hi = cpu_to_le32(val); 4133 txbd->next_bd.addr_hi = cpu_to_le32(val);
3917 4134
3918 context->xstorm_st_context.tx_bd_page_base_hi = val; 4135 data->tx.tx_bd_page_base.hi = cpu_to_le32(val);
3919 4136
3920 val = (u64) cp->l2_ring_map & 0xffffffff; 4137 val = (u64) ring_map & 0xffffffff;
3921 txbd->next_bd.addr_lo = cpu_to_le32(val); 4138 txbd->next_bd.addr_lo = cpu_to_le32(val);
3922 4139
3923 context->xstorm_st_context.tx_bd_page_base_lo = val; 4140 data->tx.tx_bd_page_base.lo = cpu_to_le32(val);
3924
3925 context->cstorm_st_context.sb_index_number =
3926 HC_INDEX_DEF_C_ETH_ISCSI_CQ_CONS;
3927 context->cstorm_st_context.status_block_id = BNX2X_DEF_SB_ID;
3928 4141
3929 if (cli < MAX_X_STAT_COUNTER_ID) 4142 /* Other ramrod params */
3930 context->xstorm_st_context.statistics_data = cli | 4143 data->tx.tx_sb_index_number = HC_SP_INDEX_ETH_ISCSI_CQ_CONS;
3931 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE; 4144 data->tx.tx_status_block_id = BNX2X_DEF_SB_ID;
3932
3933 context->xstorm_ag_context.cdu_reserved =
3934 CDU_RSRVD_VALUE_TYPE_A(BNX2X_HW_CID(BNX2X_ISCSI_L2_CID, func),
3935 CDU_REGION_NUMBER_XCM_AG,
3936 ETH_CONNECTION_TYPE);
3937 4145
3938 /* reset xstorm per client statistics */ 4146 /* reset xstorm per client statistics */
3939 if (cli < MAX_X_STAT_COUNTER_ID) { 4147 if (cli < MAX_STAT_COUNTER_ID) {
3940 val = BAR_XSTRORM_INTMEM + 4148 val = BAR_XSTRORM_INTMEM +
3941 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cli); 4149 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cli);
3942 for (i = 0; i < sizeof(struct xstorm_per_client_stats) / 4; i++) 4150 for (i = 0; i < sizeof(struct xstorm_per_client_stats) / 4; i++)
@@ -3944,111 +4152,77 @@ static void cnic_init_bnx2x_tx_ring(struct cnic_dev *dev)
3944 } 4152 }
3945 4153
3946 cp->tx_cons_ptr = 4154 cp->tx_cons_ptr =
3947 &cp->bnx2x_def_status_blk->c_def_status_block.index_values[ 4155 &sb->sp_sb.index_values[HC_SP_INDEX_ETH_ISCSI_CQ_CONS];
3948 HC_INDEX_DEF_C_ETH_ISCSI_CQ_CONS];
3949} 4156}
3950 4157
3951static void cnic_init_bnx2x_rx_ring(struct cnic_dev *dev) 4158static void cnic_init_bnx2x_rx_ring(struct cnic_dev *dev,
4159 struct client_init_ramrod_data *data)
3952{ 4160{
3953 struct cnic_local *cp = dev->cnic_priv; 4161 struct cnic_local *cp = dev->cnic_priv;
3954 struct eth_rx_bd *rxbd = (struct eth_rx_bd *) (cp->l2_ring + 4162 struct cnic_uio_dev *udev = cp->udev;
4163 struct eth_rx_bd *rxbd = (struct eth_rx_bd *) (udev->l2_ring +
3955 BCM_PAGE_SIZE); 4164 BCM_PAGE_SIZE);
3956 struct eth_rx_cqe_next_page *rxcqe = (struct eth_rx_cqe_next_page *) 4165 struct eth_rx_cqe_next_page *rxcqe = (struct eth_rx_cqe_next_page *)
3957 (cp->l2_ring + (2 * BCM_PAGE_SIZE)); 4166 (udev->l2_ring + (2 * BCM_PAGE_SIZE));
3958 struct eth_context *context; 4167 struct host_sp_status_block *sb = cp->bnx2x_def_status_blk;
3959 struct regpair context_addr;
3960 int i; 4168 int i;
3961 int port = CNIC_PORT(cp); 4169 int port = CNIC_PORT(cp);
3962 int func = CNIC_FUNC(cp);
3963 int cli = BNX2X_ISCSI_CL_ID(CNIC_E1HVN(cp)); 4170 int cli = BNX2X_ISCSI_CL_ID(CNIC_E1HVN(cp));
4171 int cl_qzone_id = BNX2X_CL_QZONE_ID(cp, cli);
3964 u32 val; 4172 u32 val;
3965 struct tstorm_eth_client_config tstorm_client = {0}; 4173 dma_addr_t ring_map = udev->l2_ring_map;
4174
4175 /* General data */
4176 data->general.client_id = cli;
4177 data->general.statistics_en_flg = 1;
4178 data->general.statistics_counter_id = cli;
4179 data->general.activate_flg = 1;
4180 data->general.sp_client_id = cli;
3966 4181
3967 for (i = 0; i < BNX2X_MAX_RX_DESC_CNT; i++, rxbd++) { 4182 for (i = 0; i < BNX2X_MAX_RX_DESC_CNT; i++, rxbd++) {
3968 dma_addr_t buf_map; 4183 dma_addr_t buf_map;
3969 int n = (i % cp->l2_rx_ring_size) + 1; 4184 int n = (i % cp->l2_rx_ring_size) + 1;
3970 4185
3971 buf_map = cp->l2_buf_map + (n * cp->l2_single_buf_size); 4186 buf_map = udev->l2_buf_map + (n * cp->l2_single_buf_size);
3972 rxbd->addr_hi = cpu_to_le32((u64) buf_map >> 32); 4187 rxbd->addr_hi = cpu_to_le32((u64) buf_map >> 32);
3973 rxbd->addr_lo = cpu_to_le32(buf_map & 0xffffffff); 4188 rxbd->addr_lo = cpu_to_le32(buf_map & 0xffffffff);
3974 } 4189 }
3975 context = cnic_get_bnx2x_ctx(dev, BNX2X_ISCSI_L2_CID, 0, &context_addr);
3976 4190
3977 val = (u64) (cp->l2_ring_map + BCM_PAGE_SIZE) >> 32; 4191 val = (u64) (ring_map + BCM_PAGE_SIZE) >> 32;
3978 rxbd->addr_hi = cpu_to_le32(val); 4192 rxbd->addr_hi = cpu_to_le32(val);
4193 data->rx.bd_page_base.hi = cpu_to_le32(val);
3979 4194
3980 context->ustorm_st_context.common.bd_page_base_hi = val; 4195 val = (u64) (ring_map + BCM_PAGE_SIZE) & 0xffffffff;
3981
3982 val = (u64) (cp->l2_ring_map + BCM_PAGE_SIZE) & 0xffffffff;
3983 rxbd->addr_lo = cpu_to_le32(val); 4196 rxbd->addr_lo = cpu_to_le32(val);
3984 4197 data->rx.bd_page_base.lo = cpu_to_le32(val);
3985 context->ustorm_st_context.common.bd_page_base_lo = val;
3986
3987 context->ustorm_st_context.common.sb_index_numbers =
3988 BNX2X_ISCSI_RX_SB_INDEX_NUM;
3989 context->ustorm_st_context.common.clientId = cli;
3990 context->ustorm_st_context.common.status_block_id = BNX2X_DEF_SB_ID;
3991 if (cli < MAX_U_STAT_COUNTER_ID) {
3992 context->ustorm_st_context.common.flags =
3993 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS;
3994 context->ustorm_st_context.common.statistics_counter_id = cli;
3995 }
3996 context->ustorm_st_context.common.mc_alignment_log_size = 0;
3997 context->ustorm_st_context.common.bd_buff_size =
3998 cp->l2_single_buf_size;
3999
4000 context->ustorm_ag_context.cdu_usage =
4001 CDU_RSRVD_VALUE_TYPE_A(BNX2X_HW_CID(BNX2X_ISCSI_L2_CID, func),
4002 CDU_REGION_NUMBER_UCM_AG,
4003 ETH_CONNECTION_TYPE);
4004 4198
4005 rxcqe += BNX2X_MAX_RCQ_DESC_CNT; 4199 rxcqe += BNX2X_MAX_RCQ_DESC_CNT;
4006 val = (u64) (cp->l2_ring_map + (2 * BCM_PAGE_SIZE)) >> 32; 4200 val = (u64) (ring_map + (2 * BCM_PAGE_SIZE)) >> 32;
4007 rxcqe->addr_hi = cpu_to_le32(val); 4201 rxcqe->addr_hi = cpu_to_le32(val);
4202 data->rx.cqe_page_base.hi = cpu_to_le32(val);
4008 4203
4009 CNIC_WR(dev, BAR_USTRORM_INTMEM + 4204 val = (u64) (ring_map + (2 * BCM_PAGE_SIZE)) & 0xffffffff;
4010 USTORM_CQE_PAGE_BASE_OFFSET(port, cli) + 4, val);
4011
4012 CNIC_WR(dev, BAR_USTRORM_INTMEM +
4013 USTORM_CQE_PAGE_NEXT_OFFSET(port, cli) + 4, val);
4014
4015 val = (u64) (cp->l2_ring_map + (2 * BCM_PAGE_SIZE)) & 0xffffffff;
4016 rxcqe->addr_lo = cpu_to_le32(val); 4205 rxcqe->addr_lo = cpu_to_le32(val);
4206 data->rx.cqe_page_base.lo = cpu_to_le32(val);
4017 4207
4018 CNIC_WR(dev, BAR_USTRORM_INTMEM + 4208 /* Other ramrod params */
4019 USTORM_CQE_PAGE_BASE_OFFSET(port, cli), val); 4209 data->rx.client_qzone_id = cl_qzone_id;
4020 4210 data->rx.rx_sb_index_number = HC_SP_INDEX_ETH_ISCSI_RX_CQ_CONS;
4021 CNIC_WR(dev, BAR_USTRORM_INTMEM + 4211 data->rx.status_block_id = BNX2X_DEF_SB_ID;
4022 USTORM_CQE_PAGE_NEXT_OFFSET(port, cli), val);
4023
4024 /* client tstorm info */
4025 tstorm_client.mtu = cp->l2_single_buf_size - 14;
4026 tstorm_client.config_flags = TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE;
4027
4028 if (cli < MAX_T_STAT_COUNTER_ID) {
4029 tstorm_client.config_flags |=
4030 TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE;
4031 tstorm_client.statistics_counter_id = cli;
4032 }
4033 4212
4034 CNIC_WR(dev, BAR_TSTRORM_INTMEM + 4213 data->rx.cache_line_alignment_log_size = L1_CACHE_SHIFT;
4035 TSTORM_CLIENT_CONFIG_OFFSET(port, cli), 4214 data->rx.bd_buff_size = cpu_to_le16(cp->l2_single_buf_size);
4036 ((u32 *)&tstorm_client)[0]);
4037 CNIC_WR(dev, BAR_TSTRORM_INTMEM +
4038 TSTORM_CLIENT_CONFIG_OFFSET(port, cli) + 4,
4039 ((u32 *)&tstorm_client)[1]);
4040 4215
4041 /* reset tstorm per client statistics */ 4216 data->rx.mtu = cpu_to_le16(cp->l2_single_buf_size - 14);
4042 if (cli < MAX_T_STAT_COUNTER_ID) { 4217 data->rx.outer_vlan_removal_enable_flg = 1;
4043 4218
4219 /* reset tstorm and ustorm per client statistics */
4220 if (cli < MAX_STAT_COUNTER_ID) {
4044 val = BAR_TSTRORM_INTMEM + 4221 val = BAR_TSTRORM_INTMEM +
4045 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cli); 4222 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cli);
4046 for (i = 0; i < sizeof(struct tstorm_per_client_stats) / 4; i++) 4223 for (i = 0; i < sizeof(struct tstorm_per_client_stats) / 4; i++)
4047 CNIC_WR(dev, val + i * 4, 0); 4224 CNIC_WR(dev, val + i * 4, 0);
4048 }
4049 4225
4050 /* reset ustorm per client statistics */
4051 if (cli < MAX_U_STAT_COUNTER_ID) {
4052 val = BAR_USTRORM_INTMEM + 4226 val = BAR_USTRORM_INTMEM +
4053 USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cli); 4227 USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cli);
4054 for (i = 0; i < sizeof(struct ustorm_per_client_stats) / 4; i++) 4228 for (i = 0; i < sizeof(struct ustorm_per_client_stats) / 4; i++)
@@ -4056,21 +4230,22 @@ static void cnic_init_bnx2x_rx_ring(struct cnic_dev *dev)
4056 } 4230 }
4057 4231
4058 cp->rx_cons_ptr = 4232 cp->rx_cons_ptr =
4059 &cp->bnx2x_def_status_blk->u_def_status_block.index_values[ 4233 &sb->sp_sb.index_values[HC_SP_INDEX_ETH_ISCSI_RX_CQ_CONS];
4060 HC_INDEX_DEF_U_ETH_ISCSI_RX_CQ_CONS];
4061} 4234}
4062 4235
4063static void cnic_get_bnx2x_iscsi_info(struct cnic_dev *dev) 4236static void cnic_get_bnx2x_iscsi_info(struct cnic_dev *dev)
4064{ 4237{
4065 struct cnic_local *cp = dev->cnic_priv; 4238 struct cnic_local *cp = dev->cnic_priv;
4066 u32 base, addr, val; 4239 u32 base, base2, addr, val;
4067 int port = CNIC_PORT(cp); 4240 int port = CNIC_PORT(cp);
4068 4241
4069 dev->max_iscsi_conn = 0; 4242 dev->max_iscsi_conn = 0;
4070 base = CNIC_RD(dev, MISC_REG_SHARED_MEM_ADDR); 4243 base = CNIC_RD(dev, MISC_REG_SHARED_MEM_ADDR);
4071 if (base < 0xa0000 || base >= 0xc0000) 4244 if (base == 0)
4072 return; 4245 return;
4073 4246
4247 base2 = CNIC_RD(dev, (CNIC_PATH(cp) ? MISC_REG_GENERIC_CR_1 :
4248 MISC_REG_GENERIC_CR_0));
4074 addr = BNX2X_SHMEM_ADDR(base, 4249 addr = BNX2X_SHMEM_ADDR(base,
4075 dev_info.port_hw_config[port].iscsi_mac_upper); 4250 dev_info.port_hw_config[port].iscsi_mac_upper);
4076 4251
@@ -4103,16 +4278,25 @@ static void cnic_get_bnx2x_iscsi_info(struct cnic_dev *dev)
4103 val16 ^= 0x1e1e; 4278 val16 ^= 0x1e1e;
4104 dev->max_iscsi_conn = val16; 4279 dev->max_iscsi_conn = val16;
4105 } 4280 }
4106 if (BNX2X_CHIP_IS_E1H(cp->chip_id)) { 4281 if (BNX2X_CHIP_IS_E1H(cp->chip_id) || BNX2X_CHIP_IS_E2(cp->chip_id)) {
4107 int func = CNIC_FUNC(cp); 4282 int func = CNIC_FUNC(cp);
4283 u32 mf_cfg_addr;
4284
4285 if (BNX2X_SHMEM2_HAS(base2, mf_cfg_addr))
4286 mf_cfg_addr = CNIC_RD(dev, BNX2X_SHMEM2_ADDR(base2,
4287 mf_cfg_addr));
4288 else
4289 mf_cfg_addr = base + BNX2X_SHMEM_MF_BLK_OFFSET;
4290
4291 addr = mf_cfg_addr +
4292 offsetof(struct mf_cfg, func_mf_config[func].e1hov_tag);
4108 4293
4109 addr = BNX2X_SHMEM_ADDR(base,
4110 mf_cfg.func_mf_config[func].e1hov_tag);
4111 val = CNIC_RD(dev, addr); 4294 val = CNIC_RD(dev, addr);
4112 val &= FUNC_MF_CFG_E1HOV_TAG_MASK; 4295 val &= FUNC_MF_CFG_E1HOV_TAG_MASK;
4113 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) { 4296 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
4114 addr = BNX2X_SHMEM_ADDR(base, 4297 addr = mf_cfg_addr +
4115 mf_cfg.func_mf_config[func].config); 4298 offsetof(struct mf_cfg,
4299 func_mf_config[func].config);
4116 val = CNIC_RD(dev, addr); 4300 val = CNIC_RD(dev, addr);
4117 val &= FUNC_MF_CFG_PROTOCOL_MASK; 4301 val &= FUNC_MF_CFG_PROTOCOL_MASK;
4118 if (val != FUNC_MF_CFG_PROTOCOL_ISCSI) 4302 if (val != FUNC_MF_CFG_PROTOCOL_ISCSI)
@@ -4124,10 +4308,26 @@ static void cnic_get_bnx2x_iscsi_info(struct cnic_dev *dev)
4124static int cnic_start_bnx2x_hw(struct cnic_dev *dev) 4308static int cnic_start_bnx2x_hw(struct cnic_dev *dev)
4125{ 4309{
4126 struct cnic_local *cp = dev->cnic_priv; 4310 struct cnic_local *cp = dev->cnic_priv;
4311 struct cnic_eth_dev *ethdev = cp->ethdev;
4127 int func = CNIC_FUNC(cp), ret, i; 4312 int func = CNIC_FUNC(cp), ret, i;
4128 int port = CNIC_PORT(cp); 4313 u32 pfid;
4129 u16 eq_idx; 4314
4130 u8 sb_id = cp->status_blk_num; 4315 if (BNX2X_CHIP_IS_E2(cp->chip_id)) {
4316 u32 val = CNIC_RD(dev, MISC_REG_PORT4MODE_EN_OVWR);
4317
4318 if (!(val & 1))
4319 val = CNIC_RD(dev, MISC_REG_PORT4MODE_EN);
4320 else
4321 val = (val >> 1) & 1;
4322
4323 if (val)
4324 cp->pfid = func >> 1;
4325 else
4326 cp->pfid = func & 0x6;
4327 } else {
4328 cp->pfid = func;
4329 }
4330 pfid = cp->pfid;
4131 4331
4132 ret = cnic_init_id_tbl(&cp->cid_tbl, MAX_ISCSI_TBL_SZ, 4332 ret = cnic_init_id_tbl(&cp->cid_tbl, MAX_ISCSI_TBL_SZ,
4133 cp->iscsi_start_cid); 4333 cp->iscsi_start_cid);
@@ -4135,86 +4335,98 @@ static int cnic_start_bnx2x_hw(struct cnic_dev *dev)
4135 if (ret) 4335 if (ret)
4136 return -ENOMEM; 4336 return -ENOMEM;
4137 4337
4338 cp->bnx2x_igu_sb_id = ethdev->irq_arr[0].status_blk_num2;
4339
4138 cp->kcq1.io_addr = BAR_CSTRORM_INTMEM + 4340 cp->kcq1.io_addr = BAR_CSTRORM_INTMEM +
4139 CSTORM_ISCSI_EQ_PROD_OFFSET(func, 0); 4341 CSTORM_ISCSI_EQ_PROD_OFFSET(pfid, 0);
4140 cp->kcq1.sw_prod_idx = 0; 4342 cp->kcq1.sw_prod_idx = 0;
4141 4343
4142 cp->kcq1.hw_prod_idx_ptr = 4344 if (BNX2X_CHIP_IS_E2(cp->chip_id)) {
4143 &cp->status_blk.bnx2x->c_status_block.index_values[ 4345 struct host_hc_status_block_e2 *sb = cp->status_blk.gen;
4144 HC_INDEX_C_ISCSI_EQ_CONS]; 4346
4145 cp->kcq1.status_idx_ptr = 4347 cp->kcq1.hw_prod_idx_ptr =
4146 &cp->status_blk.bnx2x->c_status_block.status_block_index; 4348 &sb->sb.index_values[HC_INDEX_ISCSI_EQ_CONS];
4349 cp->kcq1.status_idx_ptr =
4350 &sb->sb.running_index[SM_RX_ID];
4351 } else {
4352 struct host_hc_status_block_e1x *sb = cp->status_blk.gen;
4353
4354 cp->kcq1.hw_prod_idx_ptr =
4355 &sb->sb.index_values[HC_INDEX_ISCSI_EQ_CONS];
4356 cp->kcq1.status_idx_ptr =
4357 &sb->sb.running_index[SM_RX_ID];
4358 }
4147 4359
4148 cnic_get_bnx2x_iscsi_info(dev); 4360 cnic_get_bnx2x_iscsi_info(dev);
4149 4361
4150 /* Only 1 EQ */ 4362 /* Only 1 EQ */
4151 CNIC_WR16(dev, cp->kcq1.io_addr, MAX_KCQ_IDX); 4363 CNIC_WR16(dev, cp->kcq1.io_addr, MAX_KCQ_IDX);
4152 CNIC_WR(dev, BAR_CSTRORM_INTMEM + 4364 CNIC_WR(dev, BAR_CSTRORM_INTMEM +
4153 CSTORM_ISCSI_EQ_CONS_OFFSET(func, 0), 0); 4365 CSTORM_ISCSI_EQ_CONS_OFFSET(pfid, 0), 0);
4154 CNIC_WR(dev, BAR_CSTRORM_INTMEM + 4366 CNIC_WR(dev, BAR_CSTRORM_INTMEM +
4155 CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_OFFSET(func, 0), 4367 CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_OFFSET(pfid, 0),
4156 cp->kcq1.dma.pg_map_arr[1] & 0xffffffff); 4368 cp->kcq1.dma.pg_map_arr[1] & 0xffffffff);
4157 CNIC_WR(dev, BAR_CSTRORM_INTMEM + 4369 CNIC_WR(dev, BAR_CSTRORM_INTMEM +
4158 CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_OFFSET(func, 0) + 4, 4370 CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_OFFSET(pfid, 0) + 4,
4159 (u64) cp->kcq1.dma.pg_map_arr[1] >> 32); 4371 (u64) cp->kcq1.dma.pg_map_arr[1] >> 32);
4160 CNIC_WR(dev, BAR_CSTRORM_INTMEM + 4372 CNIC_WR(dev, BAR_CSTRORM_INTMEM +
4161 CSTORM_ISCSI_EQ_NEXT_EQE_ADDR_OFFSET(func, 0), 4373 CSTORM_ISCSI_EQ_NEXT_EQE_ADDR_OFFSET(pfid, 0),
4162 cp->kcq1.dma.pg_map_arr[0] & 0xffffffff); 4374 cp->kcq1.dma.pg_map_arr[0] & 0xffffffff);
4163 CNIC_WR(dev, BAR_CSTRORM_INTMEM + 4375 CNIC_WR(dev, BAR_CSTRORM_INTMEM +
4164 CSTORM_ISCSI_EQ_NEXT_EQE_ADDR_OFFSET(func, 0) + 4, 4376 CSTORM_ISCSI_EQ_NEXT_EQE_ADDR_OFFSET(pfid, 0) + 4,
4165 (u64) cp->kcq1.dma.pg_map_arr[0] >> 32); 4377 (u64) cp->kcq1.dma.pg_map_arr[0] >> 32);
4166 CNIC_WR8(dev, BAR_CSTRORM_INTMEM + 4378 CNIC_WR8(dev, BAR_CSTRORM_INTMEM +
4167 CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_VALID_OFFSET(func, 0), 1); 4379 CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_VALID_OFFSET(pfid, 0), 1);
4168 CNIC_WR16(dev, BAR_CSTRORM_INTMEM + 4380 CNIC_WR16(dev, BAR_CSTRORM_INTMEM +
4169 CSTORM_ISCSI_EQ_SB_NUM_OFFSET(func, 0), cp->status_blk_num); 4381 CSTORM_ISCSI_EQ_SB_NUM_OFFSET(pfid, 0), cp->status_blk_num);
4170 CNIC_WR8(dev, BAR_CSTRORM_INTMEM + 4382 CNIC_WR8(dev, BAR_CSTRORM_INTMEM +
4171 CSTORM_ISCSI_EQ_SB_INDEX_OFFSET(func, 0), 4383 CSTORM_ISCSI_EQ_SB_INDEX_OFFSET(pfid, 0),
4172 HC_INDEX_C_ISCSI_EQ_CONS); 4384 HC_INDEX_ISCSI_EQ_CONS);
4173 4385
4174 for (i = 0; i < cp->conn_buf_info.num_pages; i++) { 4386 for (i = 0; i < cp->conn_buf_info.num_pages; i++) {
4175 CNIC_WR(dev, BAR_TSTRORM_INTMEM + 4387 CNIC_WR(dev, BAR_TSTRORM_INTMEM +
4176 TSTORM_ISCSI_CONN_BUF_PBL_OFFSET(func, i), 4388 TSTORM_ISCSI_CONN_BUF_PBL_OFFSET(pfid, i),
4177 cp->conn_buf_info.pgtbl[2 * i]); 4389 cp->conn_buf_info.pgtbl[2 * i]);
4178 CNIC_WR(dev, BAR_TSTRORM_INTMEM + 4390 CNIC_WR(dev, BAR_TSTRORM_INTMEM +
4179 TSTORM_ISCSI_CONN_BUF_PBL_OFFSET(func, i) + 4, 4391 TSTORM_ISCSI_CONN_BUF_PBL_OFFSET(pfid, i) + 4,
4180 cp->conn_buf_info.pgtbl[(2 * i) + 1]); 4392 cp->conn_buf_info.pgtbl[(2 * i) + 1]);
4181 } 4393 }
4182 4394
4183 CNIC_WR(dev, BAR_USTRORM_INTMEM + 4395 CNIC_WR(dev, BAR_USTRORM_INTMEM +
4184 USTORM_ISCSI_GLOBAL_BUF_PHYS_ADDR_OFFSET(func), 4396 USTORM_ISCSI_GLOBAL_BUF_PHYS_ADDR_OFFSET(pfid),
4185 cp->gbl_buf_info.pg_map_arr[0] & 0xffffffff); 4397 cp->gbl_buf_info.pg_map_arr[0] & 0xffffffff);
4186 CNIC_WR(dev, BAR_USTRORM_INTMEM + 4398 CNIC_WR(dev, BAR_USTRORM_INTMEM +
4187 USTORM_ISCSI_GLOBAL_BUF_PHYS_ADDR_OFFSET(func) + 4, 4399 USTORM_ISCSI_GLOBAL_BUF_PHYS_ADDR_OFFSET(pfid) + 4,
4188 (u64) cp->gbl_buf_info.pg_map_arr[0] >> 32); 4400 (u64) cp->gbl_buf_info.pg_map_arr[0] >> 32);
4189 4401
4402 CNIC_WR(dev, BAR_TSTRORM_INTMEM +
4403 TSTORM_ISCSI_TCP_LOCAL_ADV_WND_OFFSET(pfid), DEF_RCV_BUF);
4404
4190 cnic_setup_bnx2x_context(dev); 4405 cnic_setup_bnx2x_context(dev);
4191 4406
4192 eq_idx = CNIC_RD16(dev, BAR_CSTRORM_INTMEM +
4193 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id) +
4194 offsetof(struct cstorm_status_block_c,
4195 index_values[HC_INDEX_C_ISCSI_EQ_CONS]));
4196 if (eq_idx != 0) {
4197 netdev_err(dev->netdev, "EQ cons index %x != 0\n", eq_idx);
4198 return -EBUSY;
4199 }
4200 ret = cnic_init_bnx2x_irq(dev); 4407 ret = cnic_init_bnx2x_irq(dev);
4201 if (ret) 4408 if (ret)
4202 return ret; 4409 return ret;
4203 4410
4204 cnic_init_bnx2x_tx_ring(dev);
4205 cnic_init_bnx2x_rx_ring(dev);
4206
4207 return 0; 4411 return 0;
4208} 4412}
4209 4413
4210static void cnic_init_rings(struct cnic_dev *dev) 4414static void cnic_init_rings(struct cnic_dev *dev)
4211{ 4415{
4416 struct cnic_local *cp = dev->cnic_priv;
4417 struct cnic_uio_dev *udev = cp->udev;
4418
4419 if (test_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags))
4420 return;
4421
4212 if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags)) { 4422 if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags)) {
4213 cnic_init_bnx2_tx_ring(dev); 4423 cnic_init_bnx2_tx_ring(dev);
4214 cnic_init_bnx2_rx_ring(dev); 4424 cnic_init_bnx2_rx_ring(dev);
4425 set_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags);
4215 } else if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) { 4426 } else if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) {
4216 struct cnic_local *cp = dev->cnic_priv;
4217 u32 cli = BNX2X_ISCSI_CL_ID(CNIC_E1HVN(cp)); 4427 u32 cli = BNX2X_ISCSI_CL_ID(CNIC_E1HVN(cp));
4428 u32 cl_qzone_id, type;
4429 struct client_init_ramrod_data *data;
4218 union l5cm_specific_data l5_data; 4430 union l5cm_specific_data l5_data;
4219 struct ustorm_eth_rx_producers rx_prods = {0}; 4431 struct ustorm_eth_rx_producers rx_prods = {0};
4220 u32 off, i; 4432 u32 off, i;
@@ -4223,21 +4435,38 @@ static void cnic_init_rings(struct cnic_dev *dev)
4223 rx_prods.cqe_prod = BNX2X_MAX_RCQ_DESC_CNT; 4435 rx_prods.cqe_prod = BNX2X_MAX_RCQ_DESC_CNT;
4224 barrier(); 4436 barrier();
4225 4437
4438 cl_qzone_id = BNX2X_CL_QZONE_ID(cp, cli);
4439
4226 off = BAR_USTRORM_INTMEM + 4440 off = BAR_USTRORM_INTMEM +
4227 USTORM_RX_PRODS_OFFSET(CNIC_PORT(cp), cli); 4441 (BNX2X_CHIP_IS_E2(cp->chip_id) ?
4442 USTORM_RX_PRODS_E2_OFFSET(cl_qzone_id) :
4443 USTORM_RX_PRODS_E1X_OFFSET(CNIC_PORT(cp), cli));
4228 4444
4229 for (i = 0; i < sizeof(struct ustorm_eth_rx_producers) / 4; i++) 4445 for (i = 0; i < sizeof(struct ustorm_eth_rx_producers) / 4; i++)
4230 CNIC_WR(dev, off + i * 4, ((u32 *) &rx_prods)[i]); 4446 CNIC_WR(dev, off + i * 4, ((u32 *) &rx_prods)[i]);
4231 4447
4232 set_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags); 4448 set_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags);
4233 4449
4234 cnic_init_bnx2x_tx_ring(dev); 4450 data = udev->l2_buf;
4235 cnic_init_bnx2x_rx_ring(dev); 4451
4452 memset(data, 0, sizeof(*data));
4453
4454 cnic_init_bnx2x_tx_ring(dev, data);
4455 cnic_init_bnx2x_rx_ring(dev, data);
4456
4457 l5_data.phy_address.lo = udev->l2_buf_map & 0xffffffff;
4458 l5_data.phy_address.hi = (u64) udev->l2_buf_map >> 32;
4459
4460 type = (ETH_CONNECTION_TYPE << SPE_HDR_CONN_TYPE_SHIFT)
4461 & SPE_HDR_CONN_TYPE;
4462 type |= ((cp->pfid << SPE_HDR_FUNCTION_ID_SHIFT) &
4463 SPE_HDR_FUNCTION_ID);
4464
4465 set_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags);
4236 4466
4237 l5_data.phy_address.lo = cli;
4238 l5_data.phy_address.hi = 0;
4239 cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_ETH_CLIENT_SETUP, 4467 cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_ETH_CLIENT_SETUP,
4240 BNX2X_ISCSI_L2_CID, ETH_CONNECTION_TYPE, &l5_data); 4468 BNX2X_ISCSI_L2_CID, type, &l5_data);
4469
4241 i = 0; 4470 i = 0;
4242 while (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags) && 4471 while (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags) &&
4243 ++i < 10) 4472 ++i < 10)
@@ -4246,13 +4475,18 @@ static void cnic_init_rings(struct cnic_dev *dev)
4246 if (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags)) 4475 if (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags))
4247 netdev_err(dev->netdev, 4476 netdev_err(dev->netdev,
4248 "iSCSI CLIENT_SETUP did not complete\n"); 4477 "iSCSI CLIENT_SETUP did not complete\n");
4249 cnic_kwq_completion(dev, 1); 4478 cnic_spq_completion(dev, DRV_CTL_RET_L2_SPQ_CREDIT_CMD, 1);
4250 cnic_ring_ctl(dev, BNX2X_ISCSI_L2_CID, cli, 1); 4479 cnic_ring_ctl(dev, BNX2X_ISCSI_L2_CID, cli, 1);
4251 } 4480 }
4252} 4481}
4253 4482
4254static void cnic_shutdown_rings(struct cnic_dev *dev) 4483static void cnic_shutdown_rings(struct cnic_dev *dev)
4255{ 4484{
4485 struct cnic_local *cp = dev->cnic_priv;
4486
4487 if (!test_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags))
4488 return;
4489
4256 if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags)) { 4490 if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags)) {
4257 cnic_shutdown_bnx2_rx_ring(dev); 4491 cnic_shutdown_bnx2_rx_ring(dev);
4258 } else if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) { 4492 } else if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) {
@@ -4260,6 +4494,7 @@ static void cnic_shutdown_rings(struct cnic_dev *dev)
4260 u32 cli = BNX2X_ISCSI_CL_ID(CNIC_E1HVN(cp)); 4494 u32 cli = BNX2X_ISCSI_CL_ID(CNIC_E1HVN(cp));
4261 union l5cm_specific_data l5_data; 4495 union l5cm_specific_data l5_data;
4262 int i; 4496 int i;
4497 u32 type;
4263 4498
4264 cnic_ring_ctl(dev, BNX2X_ISCSI_L2_CID, cli, 0); 4499 cnic_ring_ctl(dev, BNX2X_ISCSI_L2_CID, cli, 0);
4265 4500
@@ -4277,14 +4512,18 @@ static void cnic_shutdown_rings(struct cnic_dev *dev)
4277 if (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags)) 4512 if (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags))
4278 netdev_err(dev->netdev, 4513 netdev_err(dev->netdev,
4279 "iSCSI CLIENT_HALT did not complete\n"); 4514 "iSCSI CLIENT_HALT did not complete\n");
4280 cnic_kwq_completion(dev, 1); 4515 cnic_spq_completion(dev, DRV_CTL_RET_L2_SPQ_CREDIT_CMD, 1);
4281 4516
4282 memset(&l5_data, 0, sizeof(l5_data)); 4517 memset(&l5_data, 0, sizeof(l5_data));
4283 cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_ETH_CFC_DEL, 4518 type = (NONE_CONNECTION_TYPE << SPE_HDR_CONN_TYPE_SHIFT)
4284 BNX2X_ISCSI_L2_CID, ETH_CONNECTION_TYPE | 4519 & SPE_HDR_CONN_TYPE;
4285 (1 << SPE_HDR_COMMON_RAMROD_SHIFT), &l5_data); 4520 type |= ((cp->pfid << SPE_HDR_FUNCTION_ID_SHIFT) &
4521 SPE_HDR_FUNCTION_ID);
4522 cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_COMMON_CFC_DEL,
4523 BNX2X_ISCSI_L2_CID, type, &l5_data);
4286 msleep(10); 4524 msleep(10);
4287 } 4525 }
4526 clear_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags);
4288} 4527}
4289 4528
4290static int cnic_register_netdev(struct cnic_dev *dev) 4529static int cnic_register_netdev(struct cnic_dev *dev)
@@ -4327,7 +4566,6 @@ static int cnic_start_hw(struct cnic_dev *dev)
4327 return -EALREADY; 4566 return -EALREADY;
4328 4567
4329 dev->regview = ethdev->io_base; 4568 dev->regview = ethdev->io_base;
4330 cp->chip_id = ethdev->chip_id;
4331 pci_dev_get(dev->pcidev); 4569 pci_dev_get(dev->pcidev);
4332 cp->func = PCI_FUNC(dev->pcidev->devfn); 4570 cp->func = PCI_FUNC(dev->pcidev->devfn);
4333 cp->status_blk.gen = ethdev->irq_arr[0].status_blk; 4571 cp->status_blk.gen = ethdev->irq_arr[0].status_blk;
@@ -4379,17 +4617,11 @@ static void cnic_stop_bnx2_hw(struct cnic_dev *dev)
4379static void cnic_stop_bnx2x_hw(struct cnic_dev *dev) 4617static void cnic_stop_bnx2x_hw(struct cnic_dev *dev)
4380{ 4618{
4381 struct cnic_local *cp = dev->cnic_priv; 4619 struct cnic_local *cp = dev->cnic_priv;
4382 u8 sb_id = cp->status_blk_num;
4383 int port = CNIC_PORT(cp);
4384 4620
4385 cnic_free_irq(dev); 4621 cnic_free_irq(dev);
4386 CNIC_WR16(dev, BAR_CSTRORM_INTMEM + 4622 *cp->kcq1.hw_prod_idx_ptr = 0;
4387 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id) +
4388 offsetof(struct cstorm_status_block_c,
4389 index_values[HC_INDEX_C_ISCSI_EQ_CONS]),
4390 0);
4391 CNIC_WR(dev, BAR_CSTRORM_INTMEM + 4623 CNIC_WR(dev, BAR_CSTRORM_INTMEM +
4392 CSTORM_ISCSI_EQ_CONS_OFFSET(cp->func, 0), 0); 4624 CSTORM_ISCSI_EQ_CONS_OFFSET(cp->pfid, 0), 0);
4393 CNIC_WR16(dev, cp->kcq1.io_addr, 0); 4625 CNIC_WR16(dev, cp->kcq1.io_addr, 0);
4394 cnic_free_resc(dev); 4626 cnic_free_resc(dev);
4395} 4627}
@@ -4403,10 +4635,11 @@ static void cnic_stop_hw(struct cnic_dev *dev)
4403 /* Need to wait for the ring shutdown event to complete 4635 /* Need to wait for the ring shutdown event to complete
4404 * before clearing the CNIC_UP flag. 4636 * before clearing the CNIC_UP flag.
4405 */ 4637 */
4406 while (cp->uio_dev != -1 && i < 15) { 4638 while (cp->udev->uio_dev != -1 && i < 15) {
4407 msleep(100); 4639 msleep(100);
4408 i++; 4640 i++;
4409 } 4641 }
4642 cnic_shutdown_rings(dev);
4410 clear_bit(CNIC_F_CNIC_UP, &dev->flags); 4643 clear_bit(CNIC_F_CNIC_UP, &dev->flags);
4411 rcu_assign_pointer(cp->ulp_ops[CNIC_ULP_L4], NULL); 4644 rcu_assign_pointer(cp->ulp_ops[CNIC_ULP_L4], NULL);
4412 synchronize_rcu(); 4645 synchronize_rcu();
@@ -4455,7 +4688,6 @@ static struct cnic_dev *cnic_alloc_dev(struct net_device *dev,
4455 4688
4456 cp = cdev->cnic_priv; 4689 cp = cdev->cnic_priv;
4457 cp->dev = cdev; 4690 cp->dev = cdev;
4458 cp->uio_dev = -1;
4459 cp->l2_single_buf_size = 0x400; 4691 cp->l2_single_buf_size = 0x400;
4460 cp->l2_rx_ring_size = 3; 4692 cp->l2_rx_ring_size = 3;
4461 4693
@@ -4510,6 +4742,7 @@ static struct cnic_dev *init_bnx2_cnic(struct net_device *dev)
4510 cp = cdev->cnic_priv; 4742 cp = cdev->cnic_priv;
4511 cp->ethdev = ethdev; 4743 cp->ethdev = ethdev;
4512 cdev->pcidev = pdev; 4744 cdev->pcidev = pdev;
4745 cp->chip_id = ethdev->chip_id;
4513 4746
4514 cp->cnic_ops = &cnic_bnx2_ops; 4747 cp->cnic_ops = &cnic_bnx2_ops;
4515 cp->start_hw = cnic_start_bnx2_hw; 4748 cp->start_hw = cnic_start_bnx2_hw;
@@ -4564,6 +4797,7 @@ static struct cnic_dev *init_bnx2x_cnic(struct net_device *dev)
4564 cp = cdev->cnic_priv; 4797 cp = cdev->cnic_priv;
4565 cp->ethdev = ethdev; 4798 cp->ethdev = ethdev;
4566 cdev->pcidev = pdev; 4799 cdev->pcidev = pdev;
4800 cp->chip_id = ethdev->chip_id;
4567 4801
4568 cp->cnic_ops = &cnic_bnx2x_ops; 4802 cp->cnic_ops = &cnic_bnx2x_ops;
4569 cp->start_hw = cnic_start_bnx2x_hw; 4803 cp->start_hw = cnic_start_bnx2x_hw;
@@ -4575,7 +4809,10 @@ static struct cnic_dev *init_bnx2x_cnic(struct net_device *dev)
4575 cp->stop_cm = cnic_cm_stop_bnx2x_hw; 4809 cp->stop_cm = cnic_cm_stop_bnx2x_hw;
4576 cp->enable_int = cnic_enable_bnx2x_int; 4810 cp->enable_int = cnic_enable_bnx2x_int;
4577 cp->disable_int_sync = cnic_disable_bnx2x_int_sync; 4811 cp->disable_int_sync = cnic_disable_bnx2x_int_sync;
4578 cp->ack_int = cnic_ack_bnx2x_msix; 4812 if (BNX2X_CHIP_IS_E2(cp->chip_id))
4813 cp->ack_int = cnic_ack_bnx2x_e2_msix;
4814 else
4815 cp->ack_int = cnic_ack_bnx2x_msix;
4579 cp->close_conn = cnic_close_bnx2x_conn; 4816 cp->close_conn = cnic_close_bnx2x_conn;
4580 cp->next_idx = cnic_bnx2x_next_idx; 4817 cp->next_idx = cnic_bnx2x_next_idx;
4581 cp->hw_idx = cnic_bnx2x_hw_idx; 4818 cp->hw_idx = cnic_bnx2x_hw_idx;
@@ -4683,6 +4920,7 @@ static struct notifier_block cnic_netdev_notifier = {
4683static void cnic_release(void) 4920static void cnic_release(void)
4684{ 4921{
4685 struct cnic_dev *dev; 4922 struct cnic_dev *dev;
4923 struct cnic_uio_dev *udev;
4686 4924
4687 while (!list_empty(&cnic_dev_list)) { 4925 while (!list_empty(&cnic_dev_list)) {
4688 dev = list_entry(cnic_dev_list.next, struct cnic_dev, list); 4926 dev = list_entry(cnic_dev_list.next, struct cnic_dev, list);
@@ -4696,6 +4934,11 @@ static void cnic_release(void)
4696 list_del_init(&dev->list); 4934 list_del_init(&dev->list);
4697 cnic_free_dev(dev); 4935 cnic_free_dev(dev);
4698 } 4936 }
4937 while (!list_empty(&cnic_udev_list)) {
4938 udev = list_entry(cnic_udev_list.next, struct cnic_uio_dev,
4939 list);
4940 cnic_free_uio(udev);
4941 }
4699} 4942}
4700 4943
4701static int __init cnic_init(void) 4944static int __init cnic_init(void)
@@ -4710,6 +4953,13 @@ static int __init cnic_init(void)
4710 return rc; 4953 return rc;
4711 } 4954 }
4712 4955
4956 cnic_wq = create_singlethread_workqueue("cnic_wq");
4957 if (!cnic_wq) {
4958 cnic_release();
4959 unregister_netdevice_notifier(&cnic_netdev_notifier);
4960 return -ENOMEM;
4961 }
4962
4713 return 0; 4963 return 0;
4714} 4964}
4715 4965
@@ -4717,6 +4967,7 @@ static void __exit cnic_exit(void)
4717{ 4967{
4718 unregister_netdevice_notifier(&cnic_netdev_notifier); 4968 unregister_netdevice_notifier(&cnic_netdev_notifier);
4719 cnic_release(); 4969 cnic_release();
4970 destroy_workqueue(cnic_wq);
4720} 4971}
4721 4972
4722module_init(cnic_init); 4973module_init(cnic_init);
diff --git a/drivers/net/cnic.h b/drivers/net/cnic.h
index 275c36114d85..6a4a0ae5cfe3 100644
--- a/drivers/net/cnic.h
+++ b/drivers/net/cnic.h
@@ -12,6 +12,13 @@
12#ifndef CNIC_H 12#ifndef CNIC_H
13#define CNIC_H 13#define CNIC_H
14 14
15#define HC_INDEX_ISCSI_EQ_CONS 6
16
17#define HC_INDEX_FCOE_EQ_CONS 3
18
19#define HC_SP_INDEX_ETH_ISCSI_CQ_CONS 5
20#define HC_SP_INDEX_ETH_ISCSI_RX_CQ_CONS 1
21
15#define KWQ_PAGE_CNT 4 22#define KWQ_PAGE_CNT 4
16#define KCQ_PAGE_CNT 16 23#define KCQ_PAGE_CNT 16
17 24
@@ -161,8 +168,9 @@ struct cnic_context {
161 wait_queue_head_t waitq; 168 wait_queue_head_t waitq;
162 int wait_cond; 169 int wait_cond;
163 unsigned long timestamp; 170 unsigned long timestamp;
164 u32 ctx_flags; 171 unsigned long ctx_flags;
165#define CTX_FL_OFFLD_START 0x00000001 172#define CTX_FL_OFFLD_START 0
173#define CTX_FL_DELETE_WAIT 1
166 u8 ulp_proto_id; 174 u8 ulp_proto_id;
167 union { 175 union {
168 struct cnic_iscsi *iscsi; 176 struct cnic_iscsi *iscsi;
@@ -179,6 +187,31 @@ struct kcq_info {
179 u32 io_addr; 187 u32 io_addr;
180}; 188};
181 189
190struct iro {
191 u32 base;
192 u16 m1;
193 u16 m2;
194 u16 m3;
195 u16 size;
196};
197
198struct cnic_uio_dev {
199 struct uio_info cnic_uinfo;
200 u32 uio_dev;
201
202 int l2_ring_size;
203 void *l2_ring;
204 dma_addr_t l2_ring_map;
205
206 int l2_buf_size;
207 void *l2_buf;
208 dma_addr_t l2_buf_map;
209
210 struct cnic_dev *dev;
211 struct pci_dev *pdev;
212 struct list_head list;
213};
214
182struct cnic_local { 215struct cnic_local {
183 216
184 spinlock_t cnic_ulp_lock; 217 spinlock_t cnic_ulp_lock;
@@ -192,19 +225,15 @@ struct cnic_local {
192 unsigned long cnic_local_flags; 225 unsigned long cnic_local_flags;
193#define CNIC_LCL_FL_KWQ_INIT 0x0 226#define CNIC_LCL_FL_KWQ_INIT 0x0
194#define CNIC_LCL_FL_L2_WAIT 0x1 227#define CNIC_LCL_FL_L2_WAIT 0x1
228#define CNIC_LCL_FL_RINGS_INITED 0x2
195 229
196 struct cnic_dev *dev; 230 struct cnic_dev *dev;
197 231
198 struct cnic_eth_dev *ethdev; 232 struct cnic_eth_dev *ethdev;
199 233
200 void *l2_ring; 234 struct cnic_uio_dev *udev;
201 dma_addr_t l2_ring_map;
202 int l2_ring_size;
203 int l2_rx_ring_size;
204 235
205 void *l2_buf; 236 int l2_rx_ring_size;
206 dma_addr_t l2_buf_map;
207 int l2_buf_size;
208 int l2_single_buf_size; 237 int l2_single_buf_size;
209 238
210 u16 *rx_cons_ptr; 239 u16 *rx_cons_ptr;
@@ -212,6 +241,9 @@ struct cnic_local {
212 u16 rx_cons; 241 u16 rx_cons;
213 u16 tx_cons; 242 u16 tx_cons;
214 243
244 struct iro *iro_arr;
245#define IRO (((struct cnic_local *) dev->cnic_priv)->iro_arr)
246
215 struct cnic_dma kwq_info; 247 struct cnic_dma kwq_info;
216 struct kwqe **kwq; 248 struct kwqe **kwq;
217 249
@@ -230,12 +262,16 @@ struct cnic_local {
230 union { 262 union {
231 void *gen; 263 void *gen;
232 struct status_block_msix *bnx2; 264 struct status_block_msix *bnx2;
233 struct host_status_block *bnx2x; 265 struct host_hc_status_block_e1x *bnx2x_e1x;
266 /* index values - which counter to update */
267 #define SM_RX_ID 0
268 #define SM_TX_ID 1
234 } status_blk; 269 } status_blk;
235 270
236 struct host_def_status_block *bnx2x_def_status_blk; 271 struct host_sp_status_block *bnx2x_def_status_blk;
237 272
238 u32 status_blk_num; 273 u32 status_blk_num;
274 u32 bnx2x_igu_sb_id;
239 u32 int_num; 275 u32 int_num;
240 u32 last_status_idx; 276 u32 last_status_idx;
241 struct tasklet_struct cnic_irq_task; 277 struct tasklet_struct cnic_irq_task;
@@ -264,6 +300,8 @@ struct cnic_local {
264 int hq_size; 300 int hq_size;
265 int num_cqs; 301 int num_cqs;
266 302
303 struct delayed_work delete_task;
304
267 struct cnic_ctx *ctx_arr; 305 struct cnic_ctx *ctx_arr;
268 int ctx_blks; 306 int ctx_blks;
269 int ctx_blk_size; 307 int ctx_blk_size;
@@ -272,11 +310,9 @@ struct cnic_local {
272 310
273 u32 chip_id; 311 u32 chip_id;
274 int func; 312 int func;
313 u32 pfid;
275 u32 shmem_base; 314 u32 shmem_base;
276 315
277 u32 uio_dev;
278 struct uio_info *cnic_uinfo;
279
280 struct cnic_ops *cnic_ops; 316 struct cnic_ops *cnic_ops;
281 int (*start_hw)(struct cnic_dev *); 317 int (*start_hw)(struct cnic_dev *);
282 void (*stop_hw)(struct cnic_dev *); 318 void (*stop_hw)(struct cnic_dev *);
@@ -335,18 +371,36 @@ struct bnx2x_bd_chain_next {
335#define BNX2X_ISCSI_GLB_BUF_SIZE 64 371#define BNX2X_ISCSI_GLB_BUF_SIZE 64
336#define BNX2X_ISCSI_PBL_NOT_CACHED 0xff 372#define BNX2X_ISCSI_PBL_NOT_CACHED 0xff
337#define BNX2X_ISCSI_PDU_HEADER_NOT_CACHED 0xff 373#define BNX2X_ISCSI_PDU_HEADER_NOT_CACHED 0xff
338#define BNX2X_HW_CID(x, func) ((x) | (((func) % PORT_MAX) << 23) | \ 374
339 (((func) >> 1) << 17)) 375#define BNX2X_CHIP_NUM_57710 0x164e
340#define BNX2X_SW_CID(x) (x & 0x1ffff)
341#define BNX2X_CHIP_NUM_57711 0x164f 376#define BNX2X_CHIP_NUM_57711 0x164f
342#define BNX2X_CHIP_NUM_57711E 0x1650 377#define BNX2X_CHIP_NUM_57711E 0x1650
378#define BNX2X_CHIP_NUM_57712 0x1662
379#define BNX2X_CHIP_NUM_57712E 0x1663
380#define BNX2X_CHIP_NUM_57713 0x1651
381#define BNX2X_CHIP_NUM_57713E 0x1652
382
343#define BNX2X_CHIP_NUM(x) (x >> 16) 383#define BNX2X_CHIP_NUM(x) (x >> 16)
384#define BNX2X_CHIP_IS_57710(x) \
385 (BNX2X_CHIP_NUM(x) == BNX2X_CHIP_NUM_57710)
344#define BNX2X_CHIP_IS_57711(x) \ 386#define BNX2X_CHIP_IS_57711(x) \
345 (BNX2X_CHIP_NUM(x) == BNX2X_CHIP_NUM_57711) 387 (BNX2X_CHIP_NUM(x) == BNX2X_CHIP_NUM_57711)
346#define BNX2X_CHIP_IS_57711E(x) \ 388#define BNX2X_CHIP_IS_57711E(x) \
347 (BNX2X_CHIP_NUM(x) == BNX2X_CHIP_NUM_57711E) 389 (BNX2X_CHIP_NUM(x) == BNX2X_CHIP_NUM_57711E)
348#define BNX2X_CHIP_IS_E1H(x) \ 390#define BNX2X_CHIP_IS_E1H(x) \
349 (BNX2X_CHIP_IS_57711(x) || BNX2X_CHIP_IS_57711E(x)) 391 (BNX2X_CHIP_IS_57711(x) || BNX2X_CHIP_IS_57711E(x))
392#define BNX2X_CHIP_IS_57712(x) \
393 (BNX2X_CHIP_NUM(x) == BNX2X_CHIP_NUM_57712)
394#define BNX2X_CHIP_IS_57712E(x) \
395 (BNX2X_CHIP_NUM(x) == BNX2X_CHIP_NUM_57712E)
396#define BNX2X_CHIP_IS_57713(x) \
397 (BNX2X_CHIP_NUM(x) == BNX2X_CHIP_NUM_57713)
398#define BNX2X_CHIP_IS_57713E(x) \
399 (BNX2X_CHIP_NUM(x) == BNX2X_CHIP_NUM_57713E)
400#define BNX2X_CHIP_IS_E2(x) \
401 (BNX2X_CHIP_IS_57712(x) || BNX2X_CHIP_IS_57712E(x) || \
402 BNX2X_CHIP_IS_57713(x) || BNX2X_CHIP_IS_57713E(x))
403
350#define IS_E1H_OFFSET BNX2X_CHIP_IS_E1H(cp->chip_id) 404#define IS_E1H_OFFSET BNX2X_CHIP_IS_E1H(cp->chip_id)
351 405
352#define BNX2X_RX_DESC_CNT (BCM_PAGE_SIZE / sizeof(struct eth_rx_bd)) 406#define BNX2X_RX_DESC_CNT (BCM_PAGE_SIZE / sizeof(struct eth_rx_bd))
@@ -358,19 +412,35 @@ struct bnx2x_bd_chain_next {
358 (BNX2X_MAX_RCQ_DESC_CNT - 1)) ? \ 412 (BNX2X_MAX_RCQ_DESC_CNT - 1)) ? \
359 ((x) + 2) : ((x) + 1) 413 ((x) + 2) : ((x) + 1)
360 414
361#define BNX2X_DEF_SB_ID 16 415#define BNX2X_DEF_SB_ID HC_SP_SB_ID
362 416
363#define BNX2X_ISCSI_RX_SB_INDEX_NUM \ 417#define BNX2X_SHMEM_MF_BLK_OFFSET 0x7e4
364 ((HC_INDEX_DEF_U_ETH_ISCSI_RX_CQ_CONS << \
365 USTORM_ETH_ST_CONTEXT_CONFIG_CQE_SB_INDEX_NUMBER_SHIFT) & \
366 USTORM_ETH_ST_CONTEXT_CONFIG_CQE_SB_INDEX_NUMBER)
367 418
368#define BNX2X_SHMEM_ADDR(base, field) (base + \ 419#define BNX2X_SHMEM_ADDR(base, field) (base + \
369 offsetof(struct shmem_region, field)) 420 offsetof(struct shmem_region, field))
370 421
371#define CNIC_PORT(cp) ((cp)->func % PORT_MAX) 422#define BNX2X_SHMEM2_ADDR(base, field) (base + \
423 offsetof(struct shmem2_region, field))
424
425#define BNX2X_SHMEM2_HAS(base, field) \
426 ((base) && \
427 (CNIC_RD(dev, BNX2X_SHMEM2_ADDR(base, size)) > \
428 offsetof(struct shmem2_region, field)))
429
430#define CNIC_PORT(cp) ((cp)->pfid & 1)
372#define CNIC_FUNC(cp) ((cp)->func) 431#define CNIC_FUNC(cp) ((cp)->func)
373#define CNIC_E1HVN(cp) ((cp)->func >> 1) 432#define CNIC_PATH(cp) (!BNX2X_CHIP_IS_E2(cp->chip_id) ? 0 :\
433 (CNIC_FUNC(cp) & 1))
434#define CNIC_E1HVN(cp) ((cp)->pfid >> 1)
435
436#define BNX2X_HW_CID(cp, x) ((CNIC_PORT(cp) << 23) | \
437 (CNIC_E1HVN(cp) << 17) | (x))
438
439#define BNX2X_SW_CID(x) (x & 0x1ffff)
440
441#define BNX2X_CL_QZONE_ID(cp, cli) \
442 (cli + (CNIC_PORT(cp) * ETH_MAX_RX_CLIENTS_E1H))
374 443
444#define TCP_TSTORM_OOO_DROP_AND_PROC_ACK (0<<4)
375#endif 445#endif
376 446
diff --git a/drivers/net/cnic_defs.h b/drivers/net/cnic_defs.h
index 7ce694d41b6b..328e8b2765a3 100644
--- a/drivers/net/cnic_defs.h
+++ b/drivers/net/cnic_defs.h
@@ -14,6 +14,7 @@
14 14
15/* KWQ (kernel work queue) request op codes */ 15/* KWQ (kernel work queue) request op codes */
16#define L2_KWQE_OPCODE_VALUE_FLUSH (4) 16#define L2_KWQE_OPCODE_VALUE_FLUSH (4)
17#define L2_KWQE_OPCODE_VALUE_VM_FREE_RX_QUEUE (8)
17 18
18#define L4_KWQE_OPCODE_VALUE_CONNECT1 (50) 19#define L4_KWQE_OPCODE_VALUE_CONNECT1 (50)
19#define L4_KWQE_OPCODE_VALUE_CONNECT2 (51) 20#define L4_KWQE_OPCODE_VALUE_CONNECT2 (51)
@@ -48,11 +49,14 @@
48#define L4_KCQE_OPCODE_VALUE_UPLOAD_PG (14) 49#define L4_KCQE_OPCODE_VALUE_UPLOAD_PG (14)
49 50
50/* KCQ (kernel completion queue) completion status */ 51/* KCQ (kernel completion queue) completion status */
51#define L4_KCQE_COMPLETION_STATUS_SUCCESS (0) 52#define L4_KCQE_COMPLETION_STATUS_SUCCESS (0)
52#define L4_KCQE_COMPLETION_STATUS_TIMEOUT (0x93) 53#define L4_KCQE_COMPLETION_STATUS_TIMEOUT (0x93)
53 54
54#define L4_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAIL (0x83) 55#define L4_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAIL (0x83)
55#define L4_KCQE_COMPLETION_STATUS_OFFLOADED_PG (0x89) 56#define L4_KCQE_COMPLETION_STATUS_OFFLOADED_PG (0x89)
57
58#define L4_KCQE_OPCODE_VALUE_OOO_EVENT_NOTIFICATION (0xa0)
59#define L4_KCQE_OPCODE_VALUE_OOO_FLUSH (0xa1)
56 60
57#define L4_LAYER_CODE (4) 61#define L4_LAYER_CODE (4)
58#define L2_LAYER_CODE (2) 62#define L2_LAYER_CODE (2)
@@ -585,6 +589,100 @@ struct l4_kwq_upload {
585 */ 589 */
586 590
587/* 591/*
592 * The iscsi aggregative context of Cstorm
593 */
594struct cstorm_iscsi_ag_context {
595 u32 agg_vars1;
596#define CSTORM_ISCSI_AG_CONTEXT_STATE (0xFF<<0)
597#define CSTORM_ISCSI_AG_CONTEXT_STATE_SHIFT 0
598#define __CSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM0 (0x1<<8)
599#define __CSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM0_SHIFT 8
600#define __CSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM1 (0x1<<9)
601#define __CSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM1_SHIFT 9
602#define __CSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM2 (0x1<<10)
603#define __CSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM2_SHIFT 10
604#define __CSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3 (0x1<<11)
605#define __CSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3_SHIFT 11
606#define __CSTORM_ISCSI_AG_CONTEXT_RESERVED_ULP_RX_SE_CF_EN (0x1<<12)
607#define __CSTORM_ISCSI_AG_CONTEXT_RESERVED_ULP_RX_SE_CF_EN_SHIFT 12
608#define __CSTORM_ISCSI_AG_CONTEXT_RESERVED_ULP_RX_INV_CF_EN (0x1<<13)
609#define __CSTORM_ISCSI_AG_CONTEXT_RESERVED_ULP_RX_INV_CF_EN_SHIFT 13
610#define __CSTORM_ISCSI_AG_CONTEXT_PENDING_COMPLETION3_CF (0x3<<14)
611#define __CSTORM_ISCSI_AG_CONTEXT_PENDING_COMPLETION3_CF_SHIFT 14
612#define __CSTORM_ISCSI_AG_CONTEXT_RESERVED66 (0x3<<16)
613#define __CSTORM_ISCSI_AG_CONTEXT_RESERVED66_SHIFT 16
614#define __CSTORM_ISCSI_AG_CONTEXT_FIN_RECEIVED_CF_EN (0x1<<18)
615#define __CSTORM_ISCSI_AG_CONTEXT_FIN_RECEIVED_CF_EN_SHIFT 18
616#define __CSTORM_ISCSI_AG_CONTEXT_PENDING_COMPLETION0_CF_EN (0x1<<19)
617#define __CSTORM_ISCSI_AG_CONTEXT_PENDING_COMPLETION0_CF_EN_SHIFT 19
618#define __CSTORM_ISCSI_AG_CONTEXT_PENDING_COMPLETION1_CF_EN (0x1<<20)
619#define __CSTORM_ISCSI_AG_CONTEXT_PENDING_COMPLETION1_CF_EN_SHIFT 20
620#define __CSTORM_ISCSI_AG_CONTEXT_PENDING_COMPLETION2_CF_EN (0x1<<21)
621#define __CSTORM_ISCSI_AG_CONTEXT_PENDING_COMPLETION2_CF_EN_SHIFT 21
622#define __CSTORM_ISCSI_AG_CONTEXT_PENDING_COMPLETION3_CF_EN (0x1<<22)
623#define __CSTORM_ISCSI_AG_CONTEXT_PENDING_COMPLETION3_CF_EN_SHIFT 22
624#define __CSTORM_ISCSI_AG_CONTEXT_REL_SEQ_RULE (0x7<<23)
625#define __CSTORM_ISCSI_AG_CONTEXT_REL_SEQ_RULE_SHIFT 23
626#define CSTORM_ISCSI_AG_CONTEXT_HQ_PROD_RULE (0x3<<26)
627#define CSTORM_ISCSI_AG_CONTEXT_HQ_PROD_RULE_SHIFT 26
628#define __CSTORM_ISCSI_AG_CONTEXT_RESERVED52 (0x3<<28)
629#define __CSTORM_ISCSI_AG_CONTEXT_RESERVED52_SHIFT 28
630#define __CSTORM_ISCSI_AG_CONTEXT_RESERVED53 (0x3<<30)
631#define __CSTORM_ISCSI_AG_CONTEXT_RESERVED53_SHIFT 30
632#if defined(__BIG_ENDIAN)
633 u8 __aux1_th;
634 u8 __aux1_val;
635 u16 __agg_vars2;
636#elif defined(__LITTLE_ENDIAN)
637 u16 __agg_vars2;
638 u8 __aux1_val;
639 u8 __aux1_th;
640#endif
641 u32 rel_seq;
642 u32 rel_seq_th;
643#if defined(__BIG_ENDIAN)
644 u16 hq_cons;
645 u16 hq_prod;
646#elif defined(__LITTLE_ENDIAN)
647 u16 hq_prod;
648 u16 hq_cons;
649#endif
650#if defined(__BIG_ENDIAN)
651 u8 __reserved62;
652 u8 __reserved61;
653 u8 __reserved60;
654 u8 __reserved59;
655#elif defined(__LITTLE_ENDIAN)
656 u8 __reserved59;
657 u8 __reserved60;
658 u8 __reserved61;
659 u8 __reserved62;
660#endif
661#if defined(__BIG_ENDIAN)
662 u16 __reserved64;
663 u16 __cq_u_prod0;
664#elif defined(__LITTLE_ENDIAN)
665 u16 __cq_u_prod0;
666 u16 __reserved64;
667#endif
668 u32 __cq_u_prod1;
669#if defined(__BIG_ENDIAN)
670 u16 __agg_vars3;
671 u16 __cq_u_prod2;
672#elif defined(__LITTLE_ENDIAN)
673 u16 __cq_u_prod2;
674 u16 __agg_vars3;
675#endif
676#if defined(__BIG_ENDIAN)
677 u16 __aux2_th;
678 u16 __cq_u_prod3;
679#elif defined(__LITTLE_ENDIAN)
680 u16 __cq_u_prod3;
681 u16 __aux2_th;
682#endif
683};
684
685/*
588 * iSCSI context region, used only in iSCSI 686 * iSCSI context region, used only in iSCSI
589 */ 687 */
590struct ustorm_iscsi_rq_db { 688struct ustorm_iscsi_rq_db {
@@ -696,7 +794,7 @@ struct ustorm_iscsi_st_context {
696 struct regpair task_pbl_base; 794 struct regpair task_pbl_base;
697 struct regpair tce_phy_addr; 795 struct regpair tce_phy_addr;
698 struct ustorm_iscsi_placement_db place_db; 796 struct ustorm_iscsi_placement_db place_db;
699 u32 data_rcv_seq; 797 u32 reserved8;
700 u32 rem_rcv_len; 798 u32 rem_rcv_len;
701#if defined(__BIG_ENDIAN) 799#if defined(__BIG_ENDIAN)
702 u16 hdr_itt; 800 u16 hdr_itt;
@@ -713,8 +811,10 @@ struct ustorm_iscsi_st_context {
713#define USTORM_ISCSI_ST_CONTEXT_BMIDDLEOFPDU_SHIFT 0 811#define USTORM_ISCSI_ST_CONTEXT_BMIDDLEOFPDU_SHIFT 0
714#define USTORM_ISCSI_ST_CONTEXT_BFENCECQE (0x1<<1) 812#define USTORM_ISCSI_ST_CONTEXT_BFENCECQE (0x1<<1)
715#define USTORM_ISCSI_ST_CONTEXT_BFENCECQE_SHIFT 1 813#define USTORM_ISCSI_ST_CONTEXT_BFENCECQE_SHIFT 1
716#define USTORM_ISCSI_ST_CONTEXT_RESERVED1 (0x3F<<2) 814#define USTORM_ISCSI_ST_CONTEXT_BRESETCRC (0x1<<2)
717#define USTORM_ISCSI_ST_CONTEXT_RESERVED1_SHIFT 2 815#define USTORM_ISCSI_ST_CONTEXT_BRESETCRC_SHIFT 2
816#define USTORM_ISCSI_ST_CONTEXT_RESERVED1 (0x1F<<3)
817#define USTORM_ISCSI_ST_CONTEXT_RESERVED1_SHIFT 3
718 u8 task_pdu_cache_index; 818 u8 task_pdu_cache_index;
719 u8 task_pbe_cache_index; 819 u8 task_pbe_cache_index;
720#elif defined(__LITTLE_ENDIAN) 820#elif defined(__LITTLE_ENDIAN)
@@ -725,8 +825,10 @@ struct ustorm_iscsi_st_context {
725#define USTORM_ISCSI_ST_CONTEXT_BMIDDLEOFPDU_SHIFT 0 825#define USTORM_ISCSI_ST_CONTEXT_BMIDDLEOFPDU_SHIFT 0
726#define USTORM_ISCSI_ST_CONTEXT_BFENCECQE (0x1<<1) 826#define USTORM_ISCSI_ST_CONTEXT_BFENCECQE (0x1<<1)
727#define USTORM_ISCSI_ST_CONTEXT_BFENCECQE_SHIFT 1 827#define USTORM_ISCSI_ST_CONTEXT_BFENCECQE_SHIFT 1
728#define USTORM_ISCSI_ST_CONTEXT_RESERVED1 (0x3F<<2) 828#define USTORM_ISCSI_ST_CONTEXT_BRESETCRC (0x1<<2)
729#define USTORM_ISCSI_ST_CONTEXT_RESERVED1_SHIFT 2 829#define USTORM_ISCSI_ST_CONTEXT_BRESETCRC_SHIFT 2
830#define USTORM_ISCSI_ST_CONTEXT_RESERVED1 (0x1F<<3)
831#define USTORM_ISCSI_ST_CONTEXT_RESERVED1_SHIFT 3
730 u8 hdr_second_byte_union; 832 u8 hdr_second_byte_union;
731#endif 833#endif
732#if defined(__BIG_ENDIAN) 834#if defined(__BIG_ENDIAN)
@@ -777,14 +879,14 @@ struct ustorm_iscsi_st_context {
777 */ 879 */
778struct tstorm_tcp_st_context_section { 880struct tstorm_tcp_st_context_section {
779 u32 flags1; 881 u32 flags1;
780#define TSTORM_TCP_ST_CONTEXT_SECTION_RTT_SRTT_20B (0xFFFFFF<<0) 882#define TSTORM_TCP_ST_CONTEXT_SECTION_RTT_SRTT (0xFFFFFF<<0)
781#define TSTORM_TCP_ST_CONTEXT_SECTION_RTT_SRTT_20B_SHIFT 0 883#define TSTORM_TCP_ST_CONTEXT_SECTION_RTT_SRTT_SHIFT 0
782#define TSTORM_TCP_ST_CONTEXT_SECTION_PAWS_INVALID (0x1<<24) 884#define TSTORM_TCP_ST_CONTEXT_SECTION_PAWS_INVALID (0x1<<24)
783#define TSTORM_TCP_ST_CONTEXT_SECTION_PAWS_INVALID_SHIFT 24 885#define TSTORM_TCP_ST_CONTEXT_SECTION_PAWS_INVALID_SHIFT 24
784#define TSTORM_TCP_ST_CONTEXT_SECTION_TIMESTAMP_EXISTS (0x1<<25) 886#define TSTORM_TCP_ST_CONTEXT_SECTION_TIMESTAMP_EXISTS (0x1<<25)
785#define TSTORM_TCP_ST_CONTEXT_SECTION_TIMESTAMP_EXISTS_SHIFT 25 887#define TSTORM_TCP_ST_CONTEXT_SECTION_TIMESTAMP_EXISTS_SHIFT 25
786#define TSTORM_TCP_ST_CONTEXT_SECTION_ISLE_EXISTS (0x1<<26) 888#define TSTORM_TCP_ST_CONTEXT_SECTION_RESERVED0 (0x1<<26)
787#define TSTORM_TCP_ST_CONTEXT_SECTION_ISLE_EXISTS_SHIFT 26 889#define TSTORM_TCP_ST_CONTEXT_SECTION_RESERVED0_SHIFT 26
788#define TSTORM_TCP_ST_CONTEXT_SECTION_STOP_RX_PAYLOAD (0x1<<27) 890#define TSTORM_TCP_ST_CONTEXT_SECTION_STOP_RX_PAYLOAD (0x1<<27)
789#define TSTORM_TCP_ST_CONTEXT_SECTION_STOP_RX_PAYLOAD_SHIFT 27 891#define TSTORM_TCP_ST_CONTEXT_SECTION_STOP_RX_PAYLOAD_SHIFT 27
790#define TSTORM_TCP_ST_CONTEXT_SECTION_KA_ENABLED (0x1<<28) 892#define TSTORM_TCP_ST_CONTEXT_SECTION_KA_ENABLED (0x1<<28)
@@ -793,11 +895,11 @@ struct tstorm_tcp_st_context_section {
793#define TSTORM_TCP_ST_CONTEXT_SECTION_FIRST_RTO_ESTIMATE_SHIFT 29 895#define TSTORM_TCP_ST_CONTEXT_SECTION_FIRST_RTO_ESTIMATE_SHIFT 29
794#define TSTORM_TCP_ST_CONTEXT_SECTION_MAX_SEG_RETRANSMIT_EN (0x1<<30) 896#define TSTORM_TCP_ST_CONTEXT_SECTION_MAX_SEG_RETRANSMIT_EN (0x1<<30)
795#define TSTORM_TCP_ST_CONTEXT_SECTION_MAX_SEG_RETRANSMIT_EN_SHIFT 30 897#define TSTORM_TCP_ST_CONTEXT_SECTION_MAX_SEG_RETRANSMIT_EN_SHIFT 30
796#define TSTORM_TCP_ST_CONTEXT_SECTION_RESERVED3 (0x1<<31) 898#define TSTORM_TCP_ST_CONTEXT_SECTION_LAST_ISLE_HAS_FIN (0x1<<31)
797#define TSTORM_TCP_ST_CONTEXT_SECTION_RESERVED3_SHIFT 31 899#define TSTORM_TCP_ST_CONTEXT_SECTION_LAST_ISLE_HAS_FIN_SHIFT 31
798 u32 flags2; 900 u32 flags2;
799#define TSTORM_TCP_ST_CONTEXT_SECTION_RTT_VARIATION_20B (0xFFFFFF<<0) 901#define TSTORM_TCP_ST_CONTEXT_SECTION_RTT_VARIATION (0xFFFFFF<<0)
800#define TSTORM_TCP_ST_CONTEXT_SECTION_RTT_VARIATION_20B_SHIFT 0 902#define TSTORM_TCP_ST_CONTEXT_SECTION_RTT_VARIATION_SHIFT 0
801#define TSTORM_TCP_ST_CONTEXT_SECTION_DA_EN (0x1<<24) 903#define TSTORM_TCP_ST_CONTEXT_SECTION_DA_EN (0x1<<24)
802#define TSTORM_TCP_ST_CONTEXT_SECTION_DA_EN_SHIFT 24 904#define TSTORM_TCP_ST_CONTEXT_SECTION_DA_EN_SHIFT 24
803#define TSTORM_TCP_ST_CONTEXT_SECTION_DA_COUNTER_EN (0x1<<25) 905#define TSTORM_TCP_ST_CONTEXT_SECTION_DA_COUNTER_EN (0x1<<25)
@@ -810,18 +912,18 @@ struct tstorm_tcp_st_context_section {
810#define TSTORM_TCP_ST_CONTEXT_SECTION_UPDATE_L2_STATSTICS_SHIFT 28 912#define TSTORM_TCP_ST_CONTEXT_SECTION_UPDATE_L2_STATSTICS_SHIFT 28
811#define TSTORM_TCP_ST_CONTEXT_SECTION_UPDATE_L4_STATSTICS (0x1<<29) 913#define TSTORM_TCP_ST_CONTEXT_SECTION_UPDATE_L4_STATSTICS (0x1<<29)
812#define TSTORM_TCP_ST_CONTEXT_SECTION_UPDATE_L4_STATSTICS_SHIFT 29 914#define TSTORM_TCP_ST_CONTEXT_SECTION_UPDATE_L4_STATSTICS_SHIFT 29
813#define __TSTORM_TCP_ST_CONTEXT_SECTION_SECOND_ISLE_DROPPED (0x1<<30) 915#define __TSTORM_TCP_ST_CONTEXT_SECTION_IN_WINDOW_RST_ATTACK (0x1<<30)
814#define __TSTORM_TCP_ST_CONTEXT_SECTION_SECOND_ISLE_DROPPED_SHIFT 30 916#define __TSTORM_TCP_ST_CONTEXT_SECTION_IN_WINDOW_RST_ATTACK_SHIFT 30
815#define __TSTORM_TCP_ST_CONTEXT_SECTION_DONT_SUPPORT_OOO (0x1<<31) 917#define __TSTORM_TCP_ST_CONTEXT_SECTION_IN_WINDOW_SYN_ATTACK (0x1<<31)
816#define __TSTORM_TCP_ST_CONTEXT_SECTION_DONT_SUPPORT_OOO_SHIFT 31 918#define __TSTORM_TCP_ST_CONTEXT_SECTION_IN_WINDOW_SYN_ATTACK_SHIFT 31
817#if defined(__BIG_ENDIAN) 919#if defined(__BIG_ENDIAN)
818 u16 reserved_slowpath; 920 u16 mss;
819 u8 tcp_sm_state_3b; 921 u8 tcp_sm_state;
820 u8 rto_exp_3b; 922 u8 rto_exp;
821#elif defined(__LITTLE_ENDIAN) 923#elif defined(__LITTLE_ENDIAN)
822 u8 rto_exp_3b; 924 u8 rto_exp;
823 u8 tcp_sm_state_3b; 925 u8 tcp_sm_state;
824 u16 reserved_slowpath; 926 u16 mss;
825#endif 927#endif
826 u32 rcv_nxt; 928 u32 rcv_nxt;
827 u32 timestamp_recent; 929 u32 timestamp_recent;
@@ -846,11 +948,11 @@ struct tstorm_tcp_st_context_section {
846#if defined(__BIG_ENDIAN) 948#if defined(__BIG_ENDIAN)
847 u8 statistics_counter_id; 949 u8 statistics_counter_id;
848 u8 ooo_support_mode; 950 u8 ooo_support_mode;
849 u8 snd_wnd_scale_4b; 951 u8 snd_wnd_scale;
850 u8 dup_ack_count; 952 u8 dup_ack_count;
851#elif defined(__LITTLE_ENDIAN) 953#elif defined(__LITTLE_ENDIAN)
852 u8 dup_ack_count; 954 u8 dup_ack_count;
853 u8 snd_wnd_scale_4b; 955 u8 snd_wnd_scale;
854 u8 ooo_support_mode; 956 u8 ooo_support_mode;
855 u8 statistics_counter_id; 957 u8 statistics_counter_id;
856#endif 958#endif
@@ -860,13 +962,21 @@ struct tstorm_tcp_st_context_section {
860 u32 isle_start_seq; 962 u32 isle_start_seq;
861 u32 isle_end_seq; 963 u32 isle_end_seq;
862#if defined(__BIG_ENDIAN) 964#if defined(__BIG_ENDIAN)
863 u16 mss; 965 u16 second_isle_address;
864 u16 recent_seg_wnd; 966 u16 recent_seg_wnd;
865#elif defined(__LITTLE_ENDIAN) 967#elif defined(__LITTLE_ENDIAN)
866 u16 recent_seg_wnd; 968 u16 recent_seg_wnd;
867 u16 mss; 969 u16 second_isle_address;
970#endif
971#if defined(__BIG_ENDIAN)
972 u8 max_isles_ever_happened;
973 u8 isles_number;
974 u16 last_isle_address;
975#elif defined(__LITTLE_ENDIAN)
976 u16 last_isle_address;
977 u8 isles_number;
978 u8 max_isles_ever_happened;
868#endif 979#endif
869 u32 reserved4;
870 u32 max_rt_time; 980 u32 max_rt_time;
871#if defined(__BIG_ENDIAN) 981#if defined(__BIG_ENDIAN)
872 u16 lsb_mac_address; 982 u16 lsb_mac_address;
@@ -876,7 +986,7 @@ struct tstorm_tcp_st_context_section {
876 u16 lsb_mac_address; 986 u16 lsb_mac_address;
877#endif 987#endif
878 u32 msb_mac_address; 988 u32 msb_mac_address;
879 u32 reserved2; 989 u32 rightmost_received_seq;
880}; 990};
881 991
882/* 992/*
@@ -951,7 +1061,7 @@ struct tstorm_iscsi_st_context_section {
951 u8 scratchpad_idx; 1061 u8 scratchpad_idx;
952 struct iscsi_term_vars term_vars; 1062 struct iscsi_term_vars term_vars;
953#endif 1063#endif
954 u32 reserved2; 1064 u32 process_nxt;
955}; 1065};
956 1066
957/* 1067/*
@@ -1174,24 +1284,12 @@ struct xstorm_iscsi_ag_context {
1174#endif 1284#endif
1175#if defined(__BIG_ENDIAN) 1285#if defined(__BIG_ENDIAN)
1176 u8 cdu_reserved; 1286 u8 cdu_reserved;
1177 u8 agg_vars4; 1287 u8 __agg_vars4;
1178#define XSTORM_ISCSI_AG_CONTEXT_R2TQ_PROD_CF (0x3<<0)
1179#define XSTORM_ISCSI_AG_CONTEXT_R2TQ_PROD_CF_SHIFT 0
1180#define __XSTORM_ISCSI_AG_CONTEXT_AUX21_CF (0x3<<2)
1181#define __XSTORM_ISCSI_AG_CONTEXT_AUX21_CF_SHIFT 2
1182#define __XSTORM_ISCSI_AG_CONTEXT_AUX18_CF_EN (0x1<<4)
1183#define __XSTORM_ISCSI_AG_CONTEXT_AUX18_CF_EN_SHIFT 4
1184#define __XSTORM_ISCSI_AG_CONTEXT_AUX19_CF_EN (0x1<<5)
1185#define __XSTORM_ISCSI_AG_CONTEXT_AUX19_CF_EN_SHIFT 5
1186#define __XSTORM_ISCSI_AG_CONTEXT_R2TQ_PROD_CF_EN (0x1<<6)
1187#define __XSTORM_ISCSI_AG_CONTEXT_R2TQ_PROD_CF_EN_SHIFT 6
1188#define __XSTORM_ISCSI_AG_CONTEXT_AUX21_CF_EN (0x1<<7)
1189#define __XSTORM_ISCSI_AG_CONTEXT_AUX21_CF_EN_SHIFT 7
1190 u8 agg_vars3; 1288 u8 agg_vars3;
1191#define XSTORM_ISCSI_AG_CONTEXT_PHYSICAL_QUEUE_NUM2 (0x3F<<0) 1289#define XSTORM_ISCSI_AG_CONTEXT_PHYSICAL_QUEUE_NUM2 (0x3F<<0)
1192#define XSTORM_ISCSI_AG_CONTEXT_PHYSICAL_QUEUE_NUM2_SHIFT 0 1290#define XSTORM_ISCSI_AG_CONTEXT_PHYSICAL_QUEUE_NUM2_SHIFT 0
1193#define __XSTORM_ISCSI_AG_CONTEXT_AUX19_CF (0x3<<6) 1291#define __XSTORM_ISCSI_AG_CONTEXT_RX_TS_EN_CF (0x3<<6)
1194#define __XSTORM_ISCSI_AG_CONTEXT_AUX19_CF_SHIFT 6 1292#define __XSTORM_ISCSI_AG_CONTEXT_RX_TS_EN_CF_SHIFT 6
1195 u8 agg_vars2; 1293 u8 agg_vars2;
1196#define __XSTORM_ISCSI_AG_CONTEXT_DQ_CF (0x3<<0) 1294#define __XSTORM_ISCSI_AG_CONTEXT_DQ_CF (0x3<<0)
1197#define __XSTORM_ISCSI_AG_CONTEXT_DQ_CF_SHIFT 0 1295#define __XSTORM_ISCSI_AG_CONTEXT_DQ_CF_SHIFT 0
@@ -1222,21 +1320,9 @@ struct xstorm_iscsi_ag_context {
1222 u8 agg_vars3; 1320 u8 agg_vars3;
1223#define XSTORM_ISCSI_AG_CONTEXT_PHYSICAL_QUEUE_NUM2 (0x3F<<0) 1321#define XSTORM_ISCSI_AG_CONTEXT_PHYSICAL_QUEUE_NUM2 (0x3F<<0)
1224#define XSTORM_ISCSI_AG_CONTEXT_PHYSICAL_QUEUE_NUM2_SHIFT 0 1322#define XSTORM_ISCSI_AG_CONTEXT_PHYSICAL_QUEUE_NUM2_SHIFT 0
1225#define __XSTORM_ISCSI_AG_CONTEXT_AUX19_CF (0x3<<6) 1323#define __XSTORM_ISCSI_AG_CONTEXT_RX_TS_EN_CF (0x3<<6)
1226#define __XSTORM_ISCSI_AG_CONTEXT_AUX19_CF_SHIFT 6 1324#define __XSTORM_ISCSI_AG_CONTEXT_RX_TS_EN_CF_SHIFT 6
1227 u8 agg_vars4; 1325 u8 __agg_vars4;
1228#define XSTORM_ISCSI_AG_CONTEXT_R2TQ_PROD_CF (0x3<<0)
1229#define XSTORM_ISCSI_AG_CONTEXT_R2TQ_PROD_CF_SHIFT 0
1230#define __XSTORM_ISCSI_AG_CONTEXT_AUX21_CF (0x3<<2)
1231#define __XSTORM_ISCSI_AG_CONTEXT_AUX21_CF_SHIFT 2
1232#define __XSTORM_ISCSI_AG_CONTEXT_AUX18_CF_EN (0x1<<4)
1233#define __XSTORM_ISCSI_AG_CONTEXT_AUX18_CF_EN_SHIFT 4
1234#define __XSTORM_ISCSI_AG_CONTEXT_AUX19_CF_EN (0x1<<5)
1235#define __XSTORM_ISCSI_AG_CONTEXT_AUX19_CF_EN_SHIFT 5
1236#define __XSTORM_ISCSI_AG_CONTEXT_R2TQ_PROD_CF_EN (0x1<<6)
1237#define __XSTORM_ISCSI_AG_CONTEXT_R2TQ_PROD_CF_EN_SHIFT 6
1238#define __XSTORM_ISCSI_AG_CONTEXT_AUX21_CF_EN (0x1<<7)
1239#define __XSTORM_ISCSI_AG_CONTEXT_AUX21_CF_EN_SHIFT 7
1240 u8 cdu_reserved; 1326 u8 cdu_reserved;
1241#endif 1327#endif
1242 u32 more_to_send; 1328 u32 more_to_send;
@@ -1270,8 +1356,8 @@ struct xstorm_iscsi_ag_context {
1270#define __XSTORM_ISCSI_AG_CONTEXT_AGG_VAL11_DECISION_RULE_SHIFT 0 1356#define __XSTORM_ISCSI_AG_CONTEXT_AGG_VAL11_DECISION_RULE_SHIFT 0
1271#define __XSTORM_ISCSI_AG_CONTEXT_AUX13_FLAG (0x1<<3) 1357#define __XSTORM_ISCSI_AG_CONTEXT_AUX13_FLAG (0x1<<3)
1272#define __XSTORM_ISCSI_AG_CONTEXT_AUX13_FLAG_SHIFT 3 1358#define __XSTORM_ISCSI_AG_CONTEXT_AUX13_FLAG_SHIFT 3
1273#define XSTORM_ISCSI_AG_CONTEXT_AUX18_CF (0x3<<4) 1359#define __XSTORM_ISCSI_AG_CONTEXT_STORMS_SYNC_CF (0x3<<4)
1274#define XSTORM_ISCSI_AG_CONTEXT_AUX18_CF_SHIFT 4 1360#define __XSTORM_ISCSI_AG_CONTEXT_STORMS_SYNC_CF_SHIFT 4
1275#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE3 (0x3<<6) 1361#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE3 (0x3<<6)
1276#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE3_SHIFT 6 1362#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE3_SHIFT 6
1277#define XSTORM_ISCSI_AG_CONTEXT_AUX1_CF (0x3<<8) 1363#define XSTORM_ISCSI_AG_CONTEXT_AUX1_CF (0x3<<8)
@@ -1286,8 +1372,8 @@ struct xstorm_iscsi_ag_context {
1286#define __XSTORM_ISCSI_AG_CONTEXT_AUX11_FLAG_SHIFT 13 1372#define __XSTORM_ISCSI_AG_CONTEXT_AUX11_FLAG_SHIFT 13
1287#define __XSTORM_ISCSI_AG_CONTEXT_AUX12_FLAG (0x1<<14) 1373#define __XSTORM_ISCSI_AG_CONTEXT_AUX12_FLAG (0x1<<14)
1288#define __XSTORM_ISCSI_AG_CONTEXT_AUX12_FLAG_SHIFT 14 1374#define __XSTORM_ISCSI_AG_CONTEXT_AUX12_FLAG_SHIFT 14
1289#define __XSTORM_ISCSI_AG_CONTEXT_AUX2_FLAG (0x1<<15) 1375#define __XSTORM_ISCSI_AG_CONTEXT_RX_WND_SCL_EN (0x1<<15)
1290#define __XSTORM_ISCSI_AG_CONTEXT_AUX2_FLAG_SHIFT 15 1376#define __XSTORM_ISCSI_AG_CONTEXT_RX_WND_SCL_EN_SHIFT 15
1291 u8 agg_val3_th; 1377 u8 agg_val3_th;
1292 u8 agg_vars6; 1378 u8 agg_vars6;
1293#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE6 (0x7<<0) 1379#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE6 (0x7<<0)
@@ -1310,8 +1396,8 @@ struct xstorm_iscsi_ag_context {
1310#define __XSTORM_ISCSI_AG_CONTEXT_AGG_VAL11_DECISION_RULE_SHIFT 0 1396#define __XSTORM_ISCSI_AG_CONTEXT_AGG_VAL11_DECISION_RULE_SHIFT 0
1311#define __XSTORM_ISCSI_AG_CONTEXT_AUX13_FLAG (0x1<<3) 1397#define __XSTORM_ISCSI_AG_CONTEXT_AUX13_FLAG (0x1<<3)
1312#define __XSTORM_ISCSI_AG_CONTEXT_AUX13_FLAG_SHIFT 3 1398#define __XSTORM_ISCSI_AG_CONTEXT_AUX13_FLAG_SHIFT 3
1313#define XSTORM_ISCSI_AG_CONTEXT_AUX18_CF (0x3<<4) 1399#define __XSTORM_ISCSI_AG_CONTEXT_STORMS_SYNC_CF (0x3<<4)
1314#define XSTORM_ISCSI_AG_CONTEXT_AUX18_CF_SHIFT 4 1400#define __XSTORM_ISCSI_AG_CONTEXT_STORMS_SYNC_CF_SHIFT 4
1315#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE3 (0x3<<6) 1401#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE3 (0x3<<6)
1316#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE3_SHIFT 6 1402#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE3_SHIFT 6
1317#define XSTORM_ISCSI_AG_CONTEXT_AUX1_CF (0x3<<8) 1403#define XSTORM_ISCSI_AG_CONTEXT_AUX1_CF (0x3<<8)
@@ -1326,14 +1412,14 @@ struct xstorm_iscsi_ag_context {
1326#define __XSTORM_ISCSI_AG_CONTEXT_AUX11_FLAG_SHIFT 13 1412#define __XSTORM_ISCSI_AG_CONTEXT_AUX11_FLAG_SHIFT 13
1327#define __XSTORM_ISCSI_AG_CONTEXT_AUX12_FLAG (0x1<<14) 1413#define __XSTORM_ISCSI_AG_CONTEXT_AUX12_FLAG (0x1<<14)
1328#define __XSTORM_ISCSI_AG_CONTEXT_AUX12_FLAG_SHIFT 14 1414#define __XSTORM_ISCSI_AG_CONTEXT_AUX12_FLAG_SHIFT 14
1329#define __XSTORM_ISCSI_AG_CONTEXT_AUX2_FLAG (0x1<<15) 1415#define __XSTORM_ISCSI_AG_CONTEXT_RX_WND_SCL_EN (0x1<<15)
1330#define __XSTORM_ISCSI_AG_CONTEXT_AUX2_FLAG_SHIFT 15 1416#define __XSTORM_ISCSI_AG_CONTEXT_RX_WND_SCL_EN_SHIFT 15
1331#endif 1417#endif
1332#if defined(__BIG_ENDIAN) 1418#if defined(__BIG_ENDIAN)
1333 u16 __agg_val11_th; 1419 u16 __agg_val11_th;
1334 u16 __agg_val11; 1420 u16 __gen_data;
1335#elif defined(__LITTLE_ENDIAN) 1421#elif defined(__LITTLE_ENDIAN)
1336 u16 __agg_val11; 1422 u16 __gen_data;
1337 u16 __agg_val11_th; 1423 u16 __agg_val11_th;
1338#endif 1424#endif
1339#if defined(__BIG_ENDIAN) 1425#if defined(__BIG_ENDIAN)
@@ -1384,7 +1470,7 @@ struct xstorm_iscsi_ag_context {
1384#endif 1470#endif
1385 u32 hq_cons_tcp_seq; 1471 u32 hq_cons_tcp_seq;
1386 u32 exp_stat_sn; 1472 u32 exp_stat_sn;
1387 u32 agg_misc5; 1473 u32 rst_seq_num;
1388}; 1474};
1389 1475
1390/* 1476/*
@@ -1478,12 +1564,12 @@ struct tstorm_iscsi_ag_context {
1478#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM2_SHIFT 2 1564#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM2_SHIFT 2
1479#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3 (0x1<<3) 1565#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3 (0x1<<3)
1480#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3_SHIFT 3 1566#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3_SHIFT 3
1481#define __TSTORM_ISCSI_AG_CONTEXT_AUX3_CF (0x3<<4) 1567#define __TSTORM_ISCSI_AG_CONTEXT_QUEUES_FLUSH_Q0_CF (0x3<<4)
1482#define __TSTORM_ISCSI_AG_CONTEXT_AUX3_CF_SHIFT 4 1568#define __TSTORM_ISCSI_AG_CONTEXT_QUEUES_FLUSH_Q0_CF_SHIFT 4
1483#define __TSTORM_ISCSI_AG_CONTEXT_AUX3_FLAG (0x1<<6) 1569#define __TSTORM_ISCSI_AG_CONTEXT_AUX3_FLAG (0x1<<6)
1484#define __TSTORM_ISCSI_AG_CONTEXT_AUX3_FLAG_SHIFT 6 1570#define __TSTORM_ISCSI_AG_CONTEXT_AUX3_FLAG_SHIFT 6
1485#define __TSTORM_ISCSI_AG_CONTEXT_AUX4_FLAG (0x1<<7) 1571#define __TSTORM_ISCSI_AG_CONTEXT_ACK_ON_FIN_SENT_FLAG (0x1<<7)
1486#define __TSTORM_ISCSI_AG_CONTEXT_AUX4_FLAG_SHIFT 7 1572#define __TSTORM_ISCSI_AG_CONTEXT_ACK_ON_FIN_SENT_FLAG_SHIFT 7
1487 u8 state; 1573 u8 state;
1488#elif defined(__LITTLE_ENDIAN) 1574#elif defined(__LITTLE_ENDIAN)
1489 u8 state; 1575 u8 state;
@@ -1496,63 +1582,63 @@ struct tstorm_iscsi_ag_context {
1496#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM2_SHIFT 2 1582#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM2_SHIFT 2
1497#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3 (0x1<<3) 1583#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3 (0x1<<3)
1498#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3_SHIFT 3 1584#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3_SHIFT 3
1499#define __TSTORM_ISCSI_AG_CONTEXT_AUX3_CF (0x3<<4) 1585#define __TSTORM_ISCSI_AG_CONTEXT_QUEUES_FLUSH_Q0_CF (0x3<<4)
1500#define __TSTORM_ISCSI_AG_CONTEXT_AUX3_CF_SHIFT 4 1586#define __TSTORM_ISCSI_AG_CONTEXT_QUEUES_FLUSH_Q0_CF_SHIFT 4
1501#define __TSTORM_ISCSI_AG_CONTEXT_AUX3_FLAG (0x1<<6) 1587#define __TSTORM_ISCSI_AG_CONTEXT_AUX3_FLAG (0x1<<6)
1502#define __TSTORM_ISCSI_AG_CONTEXT_AUX3_FLAG_SHIFT 6 1588#define __TSTORM_ISCSI_AG_CONTEXT_AUX3_FLAG_SHIFT 6
1503#define __TSTORM_ISCSI_AG_CONTEXT_AUX4_FLAG (0x1<<7) 1589#define __TSTORM_ISCSI_AG_CONTEXT_ACK_ON_FIN_SENT_FLAG (0x1<<7)
1504#define __TSTORM_ISCSI_AG_CONTEXT_AUX4_FLAG_SHIFT 7 1590#define __TSTORM_ISCSI_AG_CONTEXT_ACK_ON_FIN_SENT_FLAG_SHIFT 7
1505 u16 ulp_credit; 1591 u16 ulp_credit;
1506#endif 1592#endif
1507#if defined(__BIG_ENDIAN) 1593#if defined(__BIG_ENDIAN)
1508 u16 __agg_val4; 1594 u16 __agg_val4;
1509 u16 agg_vars2; 1595 u16 agg_vars2;
1510#define __TSTORM_ISCSI_AG_CONTEXT_AUX5_FLAG (0x1<<0) 1596#define __TSTORM_ISCSI_AG_CONTEXT_MSL_TIMER_SET_FLAG (0x1<<0)
1511#define __TSTORM_ISCSI_AG_CONTEXT_AUX5_FLAG_SHIFT 0 1597#define __TSTORM_ISCSI_AG_CONTEXT_MSL_TIMER_SET_FLAG_SHIFT 0
1512#define __TSTORM_ISCSI_AG_CONTEXT_AUX6_FLAG (0x1<<1) 1598#define __TSTORM_ISCSI_AG_CONTEXT_FIN_SENT_FIRST_FLAG (0x1<<1)
1513#define __TSTORM_ISCSI_AG_CONTEXT_AUX6_FLAG_SHIFT 1 1599#define __TSTORM_ISCSI_AG_CONTEXT_FIN_SENT_FIRST_FLAG_SHIFT 1
1514#define __TSTORM_ISCSI_AG_CONTEXT_AUX4_CF (0x3<<2) 1600#define __TSTORM_ISCSI_AG_CONTEXT_RST_SENT_CF (0x3<<2)
1515#define __TSTORM_ISCSI_AG_CONTEXT_AUX4_CF_SHIFT 2 1601#define __TSTORM_ISCSI_AG_CONTEXT_RST_SENT_CF_SHIFT 2
1516#define __TSTORM_ISCSI_AG_CONTEXT_AUX5_CF (0x3<<4) 1602#define __TSTORM_ISCSI_AG_CONTEXT_WAKEUP_CALL_CF (0x3<<4)
1517#define __TSTORM_ISCSI_AG_CONTEXT_AUX5_CF_SHIFT 4 1603#define __TSTORM_ISCSI_AG_CONTEXT_WAKEUP_CALL_CF_SHIFT 4
1518#define __TSTORM_ISCSI_AG_CONTEXT_AUX6_CF (0x3<<6) 1604#define __TSTORM_ISCSI_AG_CONTEXT_AUX6_CF (0x3<<6)
1519#define __TSTORM_ISCSI_AG_CONTEXT_AUX6_CF_SHIFT 6 1605#define __TSTORM_ISCSI_AG_CONTEXT_AUX6_CF_SHIFT 6
1520#define __TSTORM_ISCSI_AG_CONTEXT_AUX7_CF (0x3<<8) 1606#define __TSTORM_ISCSI_AG_CONTEXT_AUX7_CF (0x3<<8)
1521#define __TSTORM_ISCSI_AG_CONTEXT_AUX7_CF_SHIFT 8 1607#define __TSTORM_ISCSI_AG_CONTEXT_AUX7_CF_SHIFT 8
1522#define __TSTORM_ISCSI_AG_CONTEXT_AUX7_FLAG (0x1<<10) 1608#define __TSTORM_ISCSI_AG_CONTEXT_AUX7_FLAG (0x1<<10)
1523#define __TSTORM_ISCSI_AG_CONTEXT_AUX7_FLAG_SHIFT 10 1609#define __TSTORM_ISCSI_AG_CONTEXT_AUX7_FLAG_SHIFT 10
1524#define TSTORM_ISCSI_AG_CONTEXT_AUX3_CF_EN (0x1<<11) 1610#define __TSTORM_ISCSI_AG_CONTEXT_QUEUES_FLUSH_Q0_CF_EN (0x1<<11)
1525#define TSTORM_ISCSI_AG_CONTEXT_AUX3_CF_EN_SHIFT 11 1611#define __TSTORM_ISCSI_AG_CONTEXT_QUEUES_FLUSH_Q0_CF_EN_SHIFT 11
1526#define TSTORM_ISCSI_AG_CONTEXT_AUX4_CF_EN (0x1<<12) 1612#define __TSTORM_ISCSI_AG_CONTEXT_RST_SENT_CF_EN (0x1<<12)
1527#define TSTORM_ISCSI_AG_CONTEXT_AUX4_CF_EN_SHIFT 12 1613#define __TSTORM_ISCSI_AG_CONTEXT_RST_SENT_CF_EN_SHIFT 12
1528#define TSTORM_ISCSI_AG_CONTEXT_AUX5_CF_EN (0x1<<13) 1614#define __TSTORM_ISCSI_AG_CONTEXT_WAKEUP_CALL_CF_EN (0x1<<13)
1529#define TSTORM_ISCSI_AG_CONTEXT_AUX5_CF_EN_SHIFT 13 1615#define __TSTORM_ISCSI_AG_CONTEXT_WAKEUP_CALL_CF_EN_SHIFT 13
1530#define TSTORM_ISCSI_AG_CONTEXT_AUX6_CF_EN (0x1<<14) 1616#define TSTORM_ISCSI_AG_CONTEXT_AUX6_CF_EN (0x1<<14)
1531#define TSTORM_ISCSI_AG_CONTEXT_AUX6_CF_EN_SHIFT 14 1617#define TSTORM_ISCSI_AG_CONTEXT_AUX6_CF_EN_SHIFT 14
1532#define TSTORM_ISCSI_AG_CONTEXT_AUX7_CF_EN (0x1<<15) 1618#define TSTORM_ISCSI_AG_CONTEXT_AUX7_CF_EN (0x1<<15)
1533#define TSTORM_ISCSI_AG_CONTEXT_AUX7_CF_EN_SHIFT 15 1619#define TSTORM_ISCSI_AG_CONTEXT_AUX7_CF_EN_SHIFT 15
1534#elif defined(__LITTLE_ENDIAN) 1620#elif defined(__LITTLE_ENDIAN)
1535 u16 agg_vars2; 1621 u16 agg_vars2;
1536#define __TSTORM_ISCSI_AG_CONTEXT_AUX5_FLAG (0x1<<0) 1622#define __TSTORM_ISCSI_AG_CONTEXT_MSL_TIMER_SET_FLAG (0x1<<0)
1537#define __TSTORM_ISCSI_AG_CONTEXT_AUX5_FLAG_SHIFT 0 1623#define __TSTORM_ISCSI_AG_CONTEXT_MSL_TIMER_SET_FLAG_SHIFT 0
1538#define __TSTORM_ISCSI_AG_CONTEXT_AUX6_FLAG (0x1<<1) 1624#define __TSTORM_ISCSI_AG_CONTEXT_FIN_SENT_FIRST_FLAG (0x1<<1)
1539#define __TSTORM_ISCSI_AG_CONTEXT_AUX6_FLAG_SHIFT 1 1625#define __TSTORM_ISCSI_AG_CONTEXT_FIN_SENT_FIRST_FLAG_SHIFT 1
1540#define __TSTORM_ISCSI_AG_CONTEXT_AUX4_CF (0x3<<2) 1626#define __TSTORM_ISCSI_AG_CONTEXT_RST_SENT_CF (0x3<<2)
1541#define __TSTORM_ISCSI_AG_CONTEXT_AUX4_CF_SHIFT 2 1627#define __TSTORM_ISCSI_AG_CONTEXT_RST_SENT_CF_SHIFT 2
1542#define __TSTORM_ISCSI_AG_CONTEXT_AUX5_CF (0x3<<4) 1628#define __TSTORM_ISCSI_AG_CONTEXT_WAKEUP_CALL_CF (0x3<<4)
1543#define __TSTORM_ISCSI_AG_CONTEXT_AUX5_CF_SHIFT 4 1629#define __TSTORM_ISCSI_AG_CONTEXT_WAKEUP_CALL_CF_SHIFT 4
1544#define __TSTORM_ISCSI_AG_CONTEXT_AUX6_CF (0x3<<6) 1630#define __TSTORM_ISCSI_AG_CONTEXT_AUX6_CF (0x3<<6)
1545#define __TSTORM_ISCSI_AG_CONTEXT_AUX6_CF_SHIFT 6 1631#define __TSTORM_ISCSI_AG_CONTEXT_AUX6_CF_SHIFT 6
1546#define __TSTORM_ISCSI_AG_CONTEXT_AUX7_CF (0x3<<8) 1632#define __TSTORM_ISCSI_AG_CONTEXT_AUX7_CF (0x3<<8)
1547#define __TSTORM_ISCSI_AG_CONTEXT_AUX7_CF_SHIFT 8 1633#define __TSTORM_ISCSI_AG_CONTEXT_AUX7_CF_SHIFT 8
1548#define __TSTORM_ISCSI_AG_CONTEXT_AUX7_FLAG (0x1<<10) 1634#define __TSTORM_ISCSI_AG_CONTEXT_AUX7_FLAG (0x1<<10)
1549#define __TSTORM_ISCSI_AG_CONTEXT_AUX7_FLAG_SHIFT 10 1635#define __TSTORM_ISCSI_AG_CONTEXT_AUX7_FLAG_SHIFT 10
1550#define TSTORM_ISCSI_AG_CONTEXT_AUX3_CF_EN (0x1<<11) 1636#define __TSTORM_ISCSI_AG_CONTEXT_QUEUES_FLUSH_Q0_CF_EN (0x1<<11)
1551#define TSTORM_ISCSI_AG_CONTEXT_AUX3_CF_EN_SHIFT 11 1637#define __TSTORM_ISCSI_AG_CONTEXT_QUEUES_FLUSH_Q0_CF_EN_SHIFT 11
1552#define TSTORM_ISCSI_AG_CONTEXT_AUX4_CF_EN (0x1<<12) 1638#define __TSTORM_ISCSI_AG_CONTEXT_RST_SENT_CF_EN (0x1<<12)
1553#define TSTORM_ISCSI_AG_CONTEXT_AUX4_CF_EN_SHIFT 12 1639#define __TSTORM_ISCSI_AG_CONTEXT_RST_SENT_CF_EN_SHIFT 12
1554#define TSTORM_ISCSI_AG_CONTEXT_AUX5_CF_EN (0x1<<13) 1640#define __TSTORM_ISCSI_AG_CONTEXT_WAKEUP_CALL_CF_EN (0x1<<13)
1555#define TSTORM_ISCSI_AG_CONTEXT_AUX5_CF_EN_SHIFT 13 1641#define __TSTORM_ISCSI_AG_CONTEXT_WAKEUP_CALL_CF_EN_SHIFT 13
1556#define TSTORM_ISCSI_AG_CONTEXT_AUX6_CF_EN (0x1<<14) 1642#define TSTORM_ISCSI_AG_CONTEXT_AUX6_CF_EN (0x1<<14)
1557#define TSTORM_ISCSI_AG_CONTEXT_AUX6_CF_EN_SHIFT 14 1643#define TSTORM_ISCSI_AG_CONTEXT_AUX6_CF_EN_SHIFT 14
1558#define TSTORM_ISCSI_AG_CONTEXT_AUX7_CF_EN (0x1<<15) 1644#define TSTORM_ISCSI_AG_CONTEXT_AUX7_CF_EN (0x1<<15)
@@ -1563,100 +1649,6 @@ struct tstorm_iscsi_ag_context {
1563}; 1649};
1564 1650
1565/* 1651/*
1566 * The iscsi aggregative context of Cstorm
1567 */
1568struct cstorm_iscsi_ag_context {
1569 u32 agg_vars1;
1570#define CSTORM_ISCSI_AG_CONTEXT_STATE (0xFF<<0)
1571#define CSTORM_ISCSI_AG_CONTEXT_STATE_SHIFT 0
1572#define __CSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM0 (0x1<<8)
1573#define __CSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM0_SHIFT 8
1574#define __CSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM1 (0x1<<9)
1575#define __CSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM1_SHIFT 9
1576#define __CSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM2 (0x1<<10)
1577#define __CSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM2_SHIFT 10
1578#define __CSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3 (0x1<<11)
1579#define __CSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3_SHIFT 11
1580#define __CSTORM_ISCSI_AG_CONTEXT_RESERVED_ULP_RX_SE_CF_EN (0x1<<12)
1581#define __CSTORM_ISCSI_AG_CONTEXT_RESERVED_ULP_RX_SE_CF_EN_SHIFT 12
1582#define __CSTORM_ISCSI_AG_CONTEXT_RESERVED_ULP_RX_INV_CF_EN (0x1<<13)
1583#define __CSTORM_ISCSI_AG_CONTEXT_RESERVED_ULP_RX_INV_CF_EN_SHIFT 13
1584#define __CSTORM_ISCSI_AG_CONTEXT_PENDING_COMPLETION3_CF (0x3<<14)
1585#define __CSTORM_ISCSI_AG_CONTEXT_PENDING_COMPLETION3_CF_SHIFT 14
1586#define __CSTORM_ISCSI_AG_CONTEXT_RESERVED66 (0x3<<16)
1587#define __CSTORM_ISCSI_AG_CONTEXT_RESERVED66_SHIFT 16
1588#define __CSTORM_ISCSI_AG_CONTEXT_FIN_RECEIVED_CF_EN (0x1<<18)
1589#define __CSTORM_ISCSI_AG_CONTEXT_FIN_RECEIVED_CF_EN_SHIFT 18
1590#define __CSTORM_ISCSI_AG_CONTEXT_PENDING_COMPLETION0_CF_EN (0x1<<19)
1591#define __CSTORM_ISCSI_AG_CONTEXT_PENDING_COMPLETION0_CF_EN_SHIFT 19
1592#define __CSTORM_ISCSI_AG_CONTEXT_PENDING_COMPLETION1_CF_EN (0x1<<20)
1593#define __CSTORM_ISCSI_AG_CONTEXT_PENDING_COMPLETION1_CF_EN_SHIFT 20
1594#define __CSTORM_ISCSI_AG_CONTEXT_PENDING_COMPLETION2_CF_EN (0x1<<21)
1595#define __CSTORM_ISCSI_AG_CONTEXT_PENDING_COMPLETION2_CF_EN_SHIFT 21
1596#define __CSTORM_ISCSI_AG_CONTEXT_PENDING_COMPLETION3_CF_EN (0x1<<22)
1597#define __CSTORM_ISCSI_AG_CONTEXT_PENDING_COMPLETION3_CF_EN_SHIFT 22
1598#define __CSTORM_ISCSI_AG_CONTEXT_REL_SEQ_RULE (0x7<<23)
1599#define __CSTORM_ISCSI_AG_CONTEXT_REL_SEQ_RULE_SHIFT 23
1600#define CSTORM_ISCSI_AG_CONTEXT_HQ_PROD_RULE (0x3<<26)
1601#define CSTORM_ISCSI_AG_CONTEXT_HQ_PROD_RULE_SHIFT 26
1602#define __CSTORM_ISCSI_AG_CONTEXT_RESERVED52 (0x3<<28)
1603#define __CSTORM_ISCSI_AG_CONTEXT_RESERVED52_SHIFT 28
1604#define __CSTORM_ISCSI_AG_CONTEXT_RESERVED53 (0x3<<30)
1605#define __CSTORM_ISCSI_AG_CONTEXT_RESERVED53_SHIFT 30
1606#if defined(__BIG_ENDIAN)
1607 u8 __aux1_th;
1608 u8 __aux1_val;
1609 u16 __agg_vars2;
1610#elif defined(__LITTLE_ENDIAN)
1611 u16 __agg_vars2;
1612 u8 __aux1_val;
1613 u8 __aux1_th;
1614#endif
1615 u32 rel_seq;
1616 u32 rel_seq_th;
1617#if defined(__BIG_ENDIAN)
1618 u16 hq_cons;
1619 u16 hq_prod;
1620#elif defined(__LITTLE_ENDIAN)
1621 u16 hq_prod;
1622 u16 hq_cons;
1623#endif
1624#if defined(__BIG_ENDIAN)
1625 u8 __reserved62;
1626 u8 __reserved61;
1627 u8 __reserved60;
1628 u8 __reserved59;
1629#elif defined(__LITTLE_ENDIAN)
1630 u8 __reserved59;
1631 u8 __reserved60;
1632 u8 __reserved61;
1633 u8 __reserved62;
1634#endif
1635#if defined(__BIG_ENDIAN)
1636 u16 __reserved64;
1637 u16 __cq_u_prod0;
1638#elif defined(__LITTLE_ENDIAN)
1639 u16 __cq_u_prod0;
1640 u16 __reserved64;
1641#endif
1642 u32 __cq_u_prod1;
1643#if defined(__BIG_ENDIAN)
1644 u16 __agg_vars3;
1645 u16 __cq_u_prod2;
1646#elif defined(__LITTLE_ENDIAN)
1647 u16 __cq_u_prod2;
1648 u16 __agg_vars3;
1649#endif
1650#if defined(__BIG_ENDIAN)
1651 u16 __aux2_th;
1652 u16 __cq_u_prod3;
1653#elif defined(__LITTLE_ENDIAN)
1654 u16 __cq_u_prod3;
1655 u16 __aux2_th;
1656#endif
1657};
1658
1659/*
1660 * The iscsi aggregative context of Ustorm 1652 * The iscsi aggregative context of Ustorm
1661 */ 1653 */
1662struct ustorm_iscsi_ag_context { 1654struct ustorm_iscsi_ag_context {
@@ -1746,8 +1738,8 @@ struct ustorm_iscsi_ag_context {
1746#define USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_RULE_SHIFT 0 1738#define USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_RULE_SHIFT 0
1747#define __USTORM_ISCSI_AG_CONTEXT_AGG_VAL3_RULE (0x7<<3) 1739#define __USTORM_ISCSI_AG_CONTEXT_AGG_VAL3_RULE (0x7<<3)
1748#define __USTORM_ISCSI_AG_CONTEXT_AGG_VAL3_RULE_SHIFT 3 1740#define __USTORM_ISCSI_AG_CONTEXT_AGG_VAL3_RULE_SHIFT 3
1749#define __USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_ARM_N_FLAG (0x1<<6) 1741#define USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_ARM_N_FLAG (0x1<<6)
1750#define __USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_ARM_N_FLAG_SHIFT 6 1742#define USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_ARM_N_FLAG_SHIFT 6
1751#define __USTORM_ISCSI_AG_CONTEXT_RESERVED1 (0x1<<7) 1743#define __USTORM_ISCSI_AG_CONTEXT_RESERVED1 (0x1<<7)
1752#define __USTORM_ISCSI_AG_CONTEXT_RESERVED1_SHIFT 7 1744#define __USTORM_ISCSI_AG_CONTEXT_RESERVED1_SHIFT 7
1753 u8 decision_rule_enable_bits; 1745 u8 decision_rule_enable_bits;
@@ -1790,8 +1782,8 @@ struct ustorm_iscsi_ag_context {
1790#define USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_RULE_SHIFT 0 1782#define USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_RULE_SHIFT 0
1791#define __USTORM_ISCSI_AG_CONTEXT_AGG_VAL3_RULE (0x7<<3) 1783#define __USTORM_ISCSI_AG_CONTEXT_AGG_VAL3_RULE (0x7<<3)
1792#define __USTORM_ISCSI_AG_CONTEXT_AGG_VAL3_RULE_SHIFT 3 1784#define __USTORM_ISCSI_AG_CONTEXT_AGG_VAL3_RULE_SHIFT 3
1793#define __USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_ARM_N_FLAG (0x1<<6) 1785#define USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_ARM_N_FLAG (0x1<<6)
1794#define __USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_ARM_N_FLAG_SHIFT 6 1786#define USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_ARM_N_FLAG_SHIFT 6
1795#define __USTORM_ISCSI_AG_CONTEXT_RESERVED1 (0x1<<7) 1787#define __USTORM_ISCSI_AG_CONTEXT_RESERVED1 (0x1<<7)
1796#define __USTORM_ISCSI_AG_CONTEXT_RESERVED1_SHIFT 7 1788#define __USTORM_ISCSI_AG_CONTEXT_RESERVED1_SHIFT 7
1797 u16 __reserved2; 1789 u16 __reserved2;
@@ -1799,22 +1791,6 @@ struct ustorm_iscsi_ag_context {
1799}; 1791};
1800 1792
1801/* 1793/*
1802 * Timers connection context
1803 */
1804struct iscsi_timers_block_context {
1805 u32 __reserved_0;
1806 u32 __reserved_1;
1807 u32 __reserved_2;
1808 u32 flags;
1809#define __ISCSI_TIMERS_BLOCK_CONTEXT_NUM_OF_ACTIVE_TIMERS (0x3<<0)
1810#define __ISCSI_TIMERS_BLOCK_CONTEXT_NUM_OF_ACTIVE_TIMERS_SHIFT 0
1811#define ISCSI_TIMERS_BLOCK_CONTEXT_CONN_VALID_FLG (0x1<<2)
1812#define ISCSI_TIMERS_BLOCK_CONTEXT_CONN_VALID_FLG_SHIFT 2
1813#define __ISCSI_TIMERS_BLOCK_CONTEXT_RESERVED0 (0x1FFFFFFF<<3)
1814#define __ISCSI_TIMERS_BLOCK_CONTEXT_RESERVED0_SHIFT 3
1815};
1816
1817/*
1818 * Ethernet context section, shared in TOE, RDMA and ISCSI 1794 * Ethernet context section, shared in TOE, RDMA and ISCSI
1819 */ 1795 */
1820struct xstorm_eth_context_section { 1796struct xstorm_eth_context_section {
@@ -1963,7 +1939,7 @@ struct xstorm_tcp_context_section {
1963#endif 1939#endif
1964#if defined(__BIG_ENDIAN) 1940#if defined(__BIG_ENDIAN)
1965 u8 original_nagle_1b; 1941 u8 original_nagle_1b;
1966 u8 ts_enabled_1b; 1942 u8 ts_enabled;
1967 u16 tcp_params; 1943 u16 tcp_params;
1968#define XSTORM_TCP_CONTEXT_SECTION_TOTAL_HEADER_SIZE (0xFF<<0) 1944#define XSTORM_TCP_CONTEXT_SECTION_TOTAL_HEADER_SIZE (0xFF<<0)
1969#define XSTORM_TCP_CONTEXT_SECTION_TOTAL_HEADER_SIZE_SHIFT 0 1945#define XSTORM_TCP_CONTEXT_SECTION_TOTAL_HEADER_SIZE_SHIFT 0
@@ -1973,8 +1949,8 @@ struct xstorm_tcp_context_section {
1973#define __XSTORM_TCP_CONTEXT_SECTION_ECN_ENABLED_SHIFT 9 1949#define __XSTORM_TCP_CONTEXT_SECTION_ECN_ENABLED_SHIFT 9
1974#define XSTORM_TCP_CONTEXT_SECTION_SACK_ENABLED (0x1<<10) 1950#define XSTORM_TCP_CONTEXT_SECTION_SACK_ENABLED (0x1<<10)
1975#define XSTORM_TCP_CONTEXT_SECTION_SACK_ENABLED_SHIFT 10 1951#define XSTORM_TCP_CONTEXT_SECTION_SACK_ENABLED_SHIFT 10
1976#define XSTORM_TCP_CONTEXT_SECTION_KA_STATE (0x1<<11) 1952#define XSTORM_TCP_CONTEXT_SECTION_SMALL_WIN_ADV (0x1<<11)
1977#define XSTORM_TCP_CONTEXT_SECTION_KA_STATE_SHIFT 11 1953#define XSTORM_TCP_CONTEXT_SECTION_SMALL_WIN_ADV_SHIFT 11
1978#define XSTORM_TCP_CONTEXT_SECTION_FIN_SENT_FLAG (0x1<<12) 1954#define XSTORM_TCP_CONTEXT_SECTION_FIN_SENT_FLAG (0x1<<12)
1979#define XSTORM_TCP_CONTEXT_SECTION_FIN_SENT_FLAG_SHIFT 12 1955#define XSTORM_TCP_CONTEXT_SECTION_FIN_SENT_FLAG_SHIFT 12
1980#define XSTORM_TCP_CONTEXT_SECTION_WINDOW_SATURATED (0x1<<13) 1956#define XSTORM_TCP_CONTEXT_SECTION_WINDOW_SATURATED (0x1<<13)
@@ -1991,15 +1967,15 @@ struct xstorm_tcp_context_section {
1991#define __XSTORM_TCP_CONTEXT_SECTION_ECN_ENABLED_SHIFT 9 1967#define __XSTORM_TCP_CONTEXT_SECTION_ECN_ENABLED_SHIFT 9
1992#define XSTORM_TCP_CONTEXT_SECTION_SACK_ENABLED (0x1<<10) 1968#define XSTORM_TCP_CONTEXT_SECTION_SACK_ENABLED (0x1<<10)
1993#define XSTORM_TCP_CONTEXT_SECTION_SACK_ENABLED_SHIFT 10 1969#define XSTORM_TCP_CONTEXT_SECTION_SACK_ENABLED_SHIFT 10
1994#define XSTORM_TCP_CONTEXT_SECTION_KA_STATE (0x1<<11) 1970#define XSTORM_TCP_CONTEXT_SECTION_SMALL_WIN_ADV (0x1<<11)
1995#define XSTORM_TCP_CONTEXT_SECTION_KA_STATE_SHIFT 11 1971#define XSTORM_TCP_CONTEXT_SECTION_SMALL_WIN_ADV_SHIFT 11
1996#define XSTORM_TCP_CONTEXT_SECTION_FIN_SENT_FLAG (0x1<<12) 1972#define XSTORM_TCP_CONTEXT_SECTION_FIN_SENT_FLAG (0x1<<12)
1997#define XSTORM_TCP_CONTEXT_SECTION_FIN_SENT_FLAG_SHIFT 12 1973#define XSTORM_TCP_CONTEXT_SECTION_FIN_SENT_FLAG_SHIFT 12
1998#define XSTORM_TCP_CONTEXT_SECTION_WINDOW_SATURATED (0x1<<13) 1974#define XSTORM_TCP_CONTEXT_SECTION_WINDOW_SATURATED (0x1<<13)
1999#define XSTORM_TCP_CONTEXT_SECTION_WINDOW_SATURATED_SHIFT 13 1975#define XSTORM_TCP_CONTEXT_SECTION_WINDOW_SATURATED_SHIFT 13
2000#define XSTORM_TCP_CONTEXT_SECTION_SLOWPATH_QUEUES_FLUSH_COUNTER (0x3<<14) 1976#define XSTORM_TCP_CONTEXT_SECTION_SLOWPATH_QUEUES_FLUSH_COUNTER (0x3<<14)
2001#define XSTORM_TCP_CONTEXT_SECTION_SLOWPATH_QUEUES_FLUSH_COUNTER_SHIFT 14 1977#define XSTORM_TCP_CONTEXT_SECTION_SLOWPATH_QUEUES_FLUSH_COUNTER_SHIFT 14
2002 u8 ts_enabled_1b; 1978 u8 ts_enabled;
2003 u8 original_nagle_1b; 1979 u8 original_nagle_1b;
2004#endif 1980#endif
2005#if defined(__BIG_ENDIAN) 1981#if defined(__BIG_ENDIAN)
@@ -2030,8 +2006,8 @@ struct xstorm_common_context_section {
2030#define XSTORM_COMMON_CONTEXT_SECTION_UPDATE_L4_STATSTICS_SHIFT 1 2006#define XSTORM_COMMON_CONTEXT_SECTION_UPDATE_L4_STATSTICS_SHIFT 1
2031#define XSTORM_COMMON_CONTEXT_SECTION_STATISTICS_COUNTER_ID (0x1F<<2) 2007#define XSTORM_COMMON_CONTEXT_SECTION_STATISTICS_COUNTER_ID (0x1F<<2)
2032#define XSTORM_COMMON_CONTEXT_SECTION_STATISTICS_COUNTER_ID_SHIFT 2 2008#define XSTORM_COMMON_CONTEXT_SECTION_STATISTICS_COUNTER_ID_SHIFT 2
2033#define XSTORM_COMMON_CONTEXT_SECTION_RESERVED0 (0x1<<7) 2009#define XSTORM_COMMON_CONTEXT_SECTION_DCB_EXISTS (0x1<<7)
2034#define XSTORM_COMMON_CONTEXT_SECTION_RESERVED0_SHIFT 7 2010#define XSTORM_COMMON_CONTEXT_SECTION_DCB_EXISTS_SHIFT 7
2035 u8 ip_version_1b; 2011 u8 ip_version_1b;
2036#elif defined(__LITTLE_ENDIAN) 2012#elif defined(__LITTLE_ENDIAN)
2037 u8 ip_version_1b; 2013 u8 ip_version_1b;
@@ -2042,8 +2018,8 @@ struct xstorm_common_context_section {
2042#define XSTORM_COMMON_CONTEXT_SECTION_UPDATE_L4_STATSTICS_SHIFT 1 2018#define XSTORM_COMMON_CONTEXT_SECTION_UPDATE_L4_STATSTICS_SHIFT 1
2043#define XSTORM_COMMON_CONTEXT_SECTION_STATISTICS_COUNTER_ID (0x1F<<2) 2019#define XSTORM_COMMON_CONTEXT_SECTION_STATISTICS_COUNTER_ID (0x1F<<2)
2044#define XSTORM_COMMON_CONTEXT_SECTION_STATISTICS_COUNTER_ID_SHIFT 2 2020#define XSTORM_COMMON_CONTEXT_SECTION_STATISTICS_COUNTER_ID_SHIFT 2
2045#define XSTORM_COMMON_CONTEXT_SECTION_RESERVED0 (0x1<<7) 2021#define XSTORM_COMMON_CONTEXT_SECTION_DCB_EXISTS (0x1<<7)
2046#define XSTORM_COMMON_CONTEXT_SECTION_RESERVED0_SHIFT 7 2022#define XSTORM_COMMON_CONTEXT_SECTION_DCB_EXISTS_SHIFT 7
2047 u16 reserved; 2023 u16 reserved;
2048#endif 2024#endif
2049}; 2025};
@@ -2284,7 +2260,7 @@ struct iscsi_context {
2284 struct tstorm_iscsi_ag_context tstorm_ag_context; 2260 struct tstorm_iscsi_ag_context tstorm_ag_context;
2285 struct cstorm_iscsi_ag_context cstorm_ag_context; 2261 struct cstorm_iscsi_ag_context cstorm_ag_context;
2286 struct ustorm_iscsi_ag_context ustorm_ag_context; 2262 struct ustorm_iscsi_ag_context ustorm_ag_context;
2287 struct iscsi_timers_block_context timers_context; 2263 struct timers_block_context timers_context;
2288 struct regpair upb_context; 2264 struct regpair upb_context;
2289 struct xstorm_iscsi_st_context xstorm_st_context; 2265 struct xstorm_iscsi_st_context xstorm_st_context;
2290 struct regpair xpb_context; 2266 struct regpair xpb_context;
@@ -2434,16 +2410,16 @@ struct l5cm_packet_size {
2434 * l5cm connection parameters 2410 * l5cm connection parameters
2435 */ 2411 */
2436union l5cm_reduce_param_union { 2412union l5cm_reduce_param_union {
2437 u32 passive_side_scramble_key; 2413 u32 opaque1;
2438 u32 pcs_id; 2414 u32 opaque2;
2439}; 2415};
2440 2416
2441/* 2417/*
2442 * l5cm connection parameters 2418 * l5cm connection parameters
2443 */ 2419 */
2444struct l5cm_reduce_conn { 2420struct l5cm_reduce_conn {
2445 union l5cm_reduce_param_union param; 2421 union l5cm_reduce_param_union opaque1;
2446 u32 isn; 2422 u32 opaque2;
2447}; 2423};
2448 2424
2449/* 2425/*
diff --git a/drivers/net/cnic_if.h b/drivers/net/cnic_if.h
index 344c842d55ab..0dbeaec4f03a 100644
--- a/drivers/net/cnic_if.h
+++ b/drivers/net/cnic_if.h
@@ -12,8 +12,8 @@
12#ifndef CNIC_IF_H 12#ifndef CNIC_IF_H
13#define CNIC_IF_H 13#define CNIC_IF_H
14 14
15#define CNIC_MODULE_VERSION "2.1.3" 15#define CNIC_MODULE_VERSION "2.2.6"
16#define CNIC_MODULE_RELDATE "June 24, 2010" 16#define CNIC_MODULE_RELDATE "Oct 12, 2010"
17 17
18#define CNIC_ULP_RDMA 0 18#define CNIC_ULP_RDMA 0
19#define CNIC_ULP_ISCSI 1 19#define CNIC_ULP_ISCSI 1
@@ -80,18 +80,15 @@ struct kcqe {
80#define DRV_CTL_IO_RD_CMD 0x102 80#define DRV_CTL_IO_RD_CMD 0x102
81#define DRV_CTL_CTX_WR_CMD 0x103 81#define DRV_CTL_CTX_WR_CMD 0x103
82#define DRV_CTL_CTXTBL_WR_CMD 0x104 82#define DRV_CTL_CTXTBL_WR_CMD 0x104
83#define DRV_CTL_COMPLETION_CMD 0x105 83#define DRV_CTL_RET_L5_SPQ_CREDIT_CMD 0x105
84#define DRV_CTL_START_L2_CMD 0x106 84#define DRV_CTL_START_L2_CMD 0x106
85#define DRV_CTL_STOP_L2_CMD 0x107 85#define DRV_CTL_STOP_L2_CMD 0x107
86#define DRV_CTL_RET_L2_SPQ_CREDIT_CMD 0x10c
86 87
87struct cnic_ctl_completion { 88struct cnic_ctl_completion {
88 u32 cid; 89 u32 cid;
89}; 90};
90 91
91struct drv_ctl_completion {
92 u32 comp_count;
93};
94
95struct cnic_ctl_info { 92struct cnic_ctl_info {
96 int cmd; 93 int cmd;
97 union { 94 union {
@@ -100,6 +97,10 @@ struct cnic_ctl_info {
100 } data; 97 } data;
101}; 98};
102 99
100struct drv_ctl_spq_credit {
101 u32 credit_count;
102};
103
103struct drv_ctl_io { 104struct drv_ctl_io {
104 u32 cid_addr; 105 u32 cid_addr;
105 u32 offset; 106 u32 offset;
@@ -115,7 +116,7 @@ struct drv_ctl_l2_ring {
115struct drv_ctl_info { 116struct drv_ctl_info {
116 int cmd; 117 int cmd;
117 union { 118 union {
118 struct drv_ctl_completion comp; 119 struct drv_ctl_spq_credit credit;
119 struct drv_ctl_io io; 120 struct drv_ctl_io io;
120 struct drv_ctl_l2_ring ring; 121 struct drv_ctl_l2_ring ring;
121 char bytes[MAX_DRV_CTL_DATA]; 122 char bytes[MAX_DRV_CTL_DATA];
@@ -138,6 +139,7 @@ struct cnic_irq {
138 unsigned int vector; 139 unsigned int vector;
139 void *status_blk; 140 void *status_blk;
140 u32 status_blk_num; 141 u32 status_blk_num;
142 u32 status_blk_num2;
141 u32 irq_flags; 143 u32 irq_flags;
142#define CNIC_IRQ_FL_MSIX 0x00000001 144#define CNIC_IRQ_FL_MSIX 0x00000001
143}; 145};
@@ -152,6 +154,7 @@ struct cnic_eth_dev {
152 struct pci_dev *pdev; 154 struct pci_dev *pdev;
153 void __iomem *io_base; 155 void __iomem *io_base;
154 void __iomem *io_base2; 156 void __iomem *io_base2;
157 void *iro_arr;
155 158
156 u32 ctx_tbl_offset; 159 u32 ctx_tbl_offset;
157 u32 ctx_tbl_len; 160 u32 ctx_tbl_len;
@@ -160,7 +163,9 @@ struct cnic_eth_dev {
160 u32 max_iscsi_conn; 163 u32 max_iscsi_conn;
161 u32 max_fcoe_conn; 164 u32 max_fcoe_conn;
162 u32 max_rdma_conn; 165 u32 max_rdma_conn;
163 u32 reserved0[2]; 166 u32 fcoe_init_cid;
167 u16 iscsi_l2_client_id;
168 u16 iscsi_l2_cid;
164 169
165 int num_irq; 170 int num_irq;
166 struct cnic_irq irq_arr[MAX_CNIC_VEC]; 171 struct cnic_irq irq_arr[MAX_CNIC_VEC];
diff --git a/drivers/net/cxgb3/adapter.h b/drivers/net/cxgb3/adapter.h
index 4cd7f420766a..ef67be59680f 100644
--- a/drivers/net/cxgb3/adapter.h
+++ b/drivers/net/cxgb3/adapter.h
@@ -336,9 +336,6 @@ int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
336 int irq_vec_idx, const struct qset_params *p, 336 int irq_vec_idx, const struct qset_params *p,
337 int ntxq, struct net_device *dev, 337 int ntxq, struct net_device *dev,
338 struct netdev_queue *netdevq); 338 struct netdev_queue *netdevq);
339int t3_get_desc(const struct sge_qset *qs, unsigned int qnum, unsigned int idx,
340 unsigned char *data);
341irqreturn_t t3_sge_intr_msix(int irq, void *cookie);
342extern struct workqueue_struct *cxgb3_wq; 339extern struct workqueue_struct *cxgb3_wq;
343 340
344int t3_get_edc_fw(struct cphy *phy, int edc_idx, int size); 341int t3_get_edc_fw(struct cphy *phy, int edc_idx, int size);
diff --git a/drivers/net/cxgb3/common.h b/drivers/net/cxgb3/common.h
index fe08a004b0dd..5ccb77d078aa 100644
--- a/drivers/net/cxgb3/common.h
+++ b/drivers/net/cxgb3/common.h
@@ -673,7 +673,6 @@ void t3_xgm_intr_enable(struct adapter *adapter, int idx);
673void t3_xgm_intr_disable(struct adapter *adapter, int idx); 673void t3_xgm_intr_disable(struct adapter *adapter, int idx);
674void t3_port_intr_enable(struct adapter *adapter, int idx); 674void t3_port_intr_enable(struct adapter *adapter, int idx);
675void t3_port_intr_disable(struct adapter *adapter, int idx); 675void t3_port_intr_disable(struct adapter *adapter, int idx);
676void t3_port_intr_clear(struct adapter *adapter, int idx);
677int t3_slow_intr_handler(struct adapter *adapter); 676int t3_slow_intr_handler(struct adapter *adapter);
678int t3_phy_intr_handler(struct adapter *adapter); 677int t3_phy_intr_handler(struct adapter *adapter);
679 678
@@ -689,14 +688,10 @@ int t3_check_tpsram_version(struct adapter *adapter);
689int t3_check_tpsram(struct adapter *adapter, const u8 *tp_ram, 688int t3_check_tpsram(struct adapter *adapter, const u8 *tp_ram,
690 unsigned int size); 689 unsigned int size);
691int t3_set_proto_sram(struct adapter *adap, const u8 *data); 690int t3_set_proto_sram(struct adapter *adap, const u8 *data);
692int t3_read_flash(struct adapter *adapter, unsigned int addr,
693 unsigned int nwords, u32 *data, int byte_oriented);
694int t3_load_fw(struct adapter *adapter, const u8 * fw_data, unsigned int size); 691int t3_load_fw(struct adapter *adapter, const u8 * fw_data, unsigned int size);
695int t3_get_fw_version(struct adapter *adapter, u32 *vers); 692int t3_get_fw_version(struct adapter *adapter, u32 *vers);
696int t3_check_fw_version(struct adapter *adapter); 693int t3_check_fw_version(struct adapter *adapter);
697int t3_init_hw(struct adapter *adapter, u32 fw_params); 694int t3_init_hw(struct adapter *adapter, u32 fw_params);
698void mac_prep(struct cmac *mac, struct adapter *adapter, int index);
699void early_hw_init(struct adapter *adapter, const struct adapter_info *ai);
700int t3_reset_adapter(struct adapter *adapter); 695int t3_reset_adapter(struct adapter *adapter);
701int t3_prep_adapter(struct adapter *adapter, const struct adapter_info *ai, 696int t3_prep_adapter(struct adapter *adapter, const struct adapter_info *ai,
702 int reset); 697 int reset);
@@ -706,8 +701,6 @@ void t3_fatal_err(struct adapter *adapter);
706void t3_set_vlan_accel(struct adapter *adapter, unsigned int ports, int on); 701void t3_set_vlan_accel(struct adapter *adapter, unsigned int ports, int on);
707void t3_config_rss(struct adapter *adapter, unsigned int rss_config, 702void t3_config_rss(struct adapter *adapter, unsigned int rss_config,
708 const u8 * cpus, const u16 *rspq); 703 const u8 * cpus, const u16 *rspq);
709int t3_read_rss(struct adapter *adapter, u8 * lkup, u16 *map);
710int t3_mps_set_active_ports(struct adapter *adap, unsigned int port_mask);
711int t3_cim_ctl_blk_read(struct adapter *adap, unsigned int addr, 704int t3_cim_ctl_blk_read(struct adapter *adap, unsigned int addr,
712 unsigned int n, unsigned int *valp); 705 unsigned int n, unsigned int *valp);
713int t3_mc7_bd_read(struct mc7 *mc7, unsigned int start, unsigned int n, 706int t3_mc7_bd_read(struct mc7 *mc7, unsigned int start, unsigned int n,
@@ -731,19 +724,12 @@ void t3_mc5_prep(struct adapter *adapter, struct mc5 *mc5, int mode);
731int t3_mc5_init(struct mc5 *mc5, unsigned int nservers, unsigned int nfilters, 724int t3_mc5_init(struct mc5 *mc5, unsigned int nservers, unsigned int nfilters,
732 unsigned int nroutes); 725 unsigned int nroutes);
733void t3_mc5_intr_handler(struct mc5 *mc5); 726void t3_mc5_intr_handler(struct mc5 *mc5);
734int t3_read_mc5_range(const struct mc5 *mc5, unsigned int start, unsigned int n,
735 u32 *buf);
736 727
737int t3_tp_set_coalescing_size(struct adapter *adap, unsigned int size, int psh);
738void t3_tp_set_max_rxsize(struct adapter *adap, unsigned int size);
739void t3_tp_set_offload_mode(struct adapter *adap, int enable); 728void t3_tp_set_offload_mode(struct adapter *adap, int enable);
740void t3_tp_get_mib_stats(struct adapter *adap, struct tp_mib_stats *tps); 729void t3_tp_get_mib_stats(struct adapter *adap, struct tp_mib_stats *tps);
741void t3_load_mtus(struct adapter *adap, unsigned short mtus[NMTUS], 730void t3_load_mtus(struct adapter *adap, unsigned short mtus[NMTUS],
742 unsigned short alpha[NCCTRL_WIN], 731 unsigned short alpha[NCCTRL_WIN],
743 unsigned short beta[NCCTRL_WIN], unsigned short mtu_cap); 732 unsigned short beta[NCCTRL_WIN], unsigned short mtu_cap);
744void t3_read_hw_mtus(struct adapter *adap, unsigned short mtus[NMTUS]);
745void t3_get_cong_cntl_tab(struct adapter *adap,
746 unsigned short incr[NMTUS][NCCTRL_WIN]);
747void t3_config_trace_filter(struct adapter *adapter, 733void t3_config_trace_filter(struct adapter *adapter,
748 const struct trace_params *tp, int filter_index, 734 const struct trace_params *tp, int filter_index,
749 int invert, int enable); 735 int invert, int enable);
@@ -769,10 +755,6 @@ int t3_sge_enable_ecntxt(struct adapter *adapter, unsigned int id, int enable);
769int t3_sge_disable_fl(struct adapter *adapter, unsigned int id); 755int t3_sge_disable_fl(struct adapter *adapter, unsigned int id);
770int t3_sge_disable_rspcntxt(struct adapter *adapter, unsigned int id); 756int t3_sge_disable_rspcntxt(struct adapter *adapter, unsigned int id);
771int t3_sge_disable_cqcntxt(struct adapter *adapter, unsigned int id); 757int t3_sge_disable_cqcntxt(struct adapter *adapter, unsigned int id);
772int t3_sge_read_ecntxt(struct adapter *adapter, unsigned int id, u32 data[4]);
773int t3_sge_read_fl(struct adapter *adapter, unsigned int id, u32 data[4]);
774int t3_sge_read_cq(struct adapter *adapter, unsigned int id, u32 data[4]);
775int t3_sge_read_rspq(struct adapter *adapter, unsigned int id, u32 data[4]);
776int t3_sge_cqcntxt_op(struct adapter *adapter, unsigned int id, unsigned int op, 758int t3_sge_cqcntxt_op(struct adapter *adapter, unsigned int id, unsigned int op,
777 unsigned int credits); 759 unsigned int credits);
778 760
diff --git a/drivers/net/cxgb3/cxgb3_defs.h b/drivers/net/cxgb3/cxgb3_defs.h
index 47e53769af5b..920d918ed193 100644
--- a/drivers/net/cxgb3/cxgb3_defs.h
+++ b/drivers/net/cxgb3/cxgb3_defs.h
@@ -43,8 +43,6 @@
43 43
44void *cxgb_alloc_mem(unsigned long size); 44void *cxgb_alloc_mem(unsigned long size);
45void cxgb_free_mem(void *addr); 45void cxgb_free_mem(void *addr);
46void cxgb_neigh_update(struct neighbour *neigh);
47void cxgb_redirect(struct dst_entry *old, struct dst_entry *new);
48 46
49/* 47/*
50 * Map an ATID or STID to their entries in the corresponding TID tables. 48 * Map an ATID or STID to their entries in the corresponding TID tables.
@@ -111,7 +109,6 @@ static inline struct t3c_tid_entry *lookup_atid(const struct tid_info *t,
111 return &e->t3c_tid; 109 return &e->t3c_tid;
112} 110}
113 111
114int process_rx(struct t3cdev *dev, struct sk_buff **skbs, int n);
115int attach_t3cdev(struct t3cdev *dev); 112int attach_t3cdev(struct t3cdev *dev);
116void detach_t3cdev(struct t3cdev *dev); 113void detach_t3cdev(struct t3cdev *dev);
117#endif 114#endif
diff --git a/drivers/net/cxgb3/cxgb3_main.c b/drivers/net/cxgb3/cxgb3_main.c
index 1ecf53dafe06..a04ce6a5f637 100644
--- a/drivers/net/cxgb3/cxgb3_main.c
+++ b/drivers/net/cxgb3/cxgb3_main.c
@@ -1399,7 +1399,10 @@ static int cxgb_open(struct net_device *dev)
1399 "Could not initialize offload capabilities\n"); 1399 "Could not initialize offload capabilities\n");
1400 } 1400 }
1401 1401
1402 dev->real_num_tx_queues = pi->nqsets; 1402 netif_set_real_num_tx_queues(dev, pi->nqsets);
1403 err = netif_set_real_num_rx_queues(dev, pi->nqsets);
1404 if (err)
1405 return err;
1403 link_start(dev); 1406 link_start(dev);
1404 t3_port_intr_enable(adapter, pi->port_id); 1407 t3_port_intr_enable(adapter, pi->port_id);
1405 netif_tx_start_all_queues(dev); 1408 netif_tx_start_all_queues(dev);
@@ -2302,6 +2305,8 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
2302 case CHELSIO_GET_QSET_NUM:{ 2305 case CHELSIO_GET_QSET_NUM:{
2303 struct ch_reg edata; 2306 struct ch_reg edata;
2304 2307
2308 memset(&edata, 0, sizeof(struct ch_reg));
2309
2305 edata.cmd = CHELSIO_GET_QSET_NUM; 2310 edata.cmd = CHELSIO_GET_QSET_NUM;
2306 edata.val = pi->nqsets; 2311 edata.val = pi->nqsets;
2307 if (copy_to_user(useraddr, &edata, sizeof(edata))) 2312 if (copy_to_user(useraddr, &edata, sizeof(edata)))
diff --git a/drivers/net/cxgb3/cxgb3_offload.c b/drivers/net/cxgb3/cxgb3_offload.c
index c6485b39eb0e..bcf07532953d 100644
--- a/drivers/net/cxgb3/cxgb3_offload.c
+++ b/drivers/net/cxgb3/cxgb3_offload.c
@@ -60,11 +60,14 @@ static LIST_HEAD(adapter_list);
60static const unsigned int MAX_ATIDS = 64 * 1024; 60static const unsigned int MAX_ATIDS = 64 * 1024;
61static const unsigned int ATID_BASE = 0x10000; 61static const unsigned int ATID_BASE = 0x10000;
62 62
63static void cxgb_neigh_update(struct neighbour *neigh);
64static void cxgb_redirect(struct dst_entry *old, struct dst_entry *new);
65
63static inline int offload_activated(struct t3cdev *tdev) 66static inline int offload_activated(struct t3cdev *tdev)
64{ 67{
65 const struct adapter *adapter = tdev2adap(tdev); 68 const struct adapter *adapter = tdev2adap(tdev);
66 69
67 return (test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)); 70 return test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
68} 71}
69 72
70/** 73/**
@@ -1015,7 +1018,7 @@ EXPORT_SYMBOL(t3_register_cpl_handler);
1015/* 1018/*
1016 * T3CDEV's receive method. 1019 * T3CDEV's receive method.
1017 */ 1020 */
1018int process_rx(struct t3cdev *dev, struct sk_buff **skbs, int n) 1021static int process_rx(struct t3cdev *dev, struct sk_buff **skbs, int n)
1019{ 1022{
1020 while (n--) { 1023 while (n--) {
1021 struct sk_buff *skb = *skbs++; 1024 struct sk_buff *skb = *skbs++;
@@ -1070,7 +1073,7 @@ static int is_offloading(struct net_device *dev)
1070 return 0; 1073 return 0;
1071} 1074}
1072 1075
1073void cxgb_neigh_update(struct neighbour *neigh) 1076static void cxgb_neigh_update(struct neighbour *neigh)
1074{ 1077{
1075 struct net_device *dev = neigh->dev; 1078 struct net_device *dev = neigh->dev;
1076 1079
@@ -1104,7 +1107,7 @@ static void set_l2t_ix(struct t3cdev *tdev, u32 tid, struct l2t_entry *e)
1104 tdev->send(tdev, skb); 1107 tdev->send(tdev, skb);
1105} 1108}
1106 1109
1107void cxgb_redirect(struct dst_entry *old, struct dst_entry *new) 1110static void cxgb_redirect(struct dst_entry *old, struct dst_entry *new)
1108{ 1111{
1109 struct net_device *olddev, *newdev; 1112 struct net_device *olddev, *newdev;
1110 struct tid_info *ti; 1113 struct tid_info *ti;
diff --git a/drivers/net/cxgb3/mc5.c b/drivers/net/cxgb3/mc5.c
index 3b5517b8fbde..a8766fb2f9ab 100644
--- a/drivers/net/cxgb3/mc5.c
+++ b/drivers/net/cxgb3/mc5.c
@@ -374,44 +374,6 @@ int t3_mc5_init(struct mc5 *mc5, unsigned int nservers, unsigned int nfilters,
374 return err; 374 return err;
375} 375}
376 376
377/*
378 * read_mc5_range - dump a part of the memory managed by MC5
379 * @mc5: the MC5 handle
380 * @start: the start address for the dump
381 * @n: number of 72-bit words to read
382 * @buf: result buffer
383 *
384 * Read n 72-bit words from MC5 memory from the given start location.
385 */
386int t3_read_mc5_range(const struct mc5 *mc5, unsigned int start,
387 unsigned int n, u32 *buf)
388{
389 u32 read_cmd;
390 int err = 0;
391 struct adapter *adap = mc5->adapter;
392
393 if (mc5->part_type == IDT75P52100)
394 read_cmd = IDT_CMD_READ;
395 else if (mc5->part_type == IDT75N43102)
396 read_cmd = IDT4_CMD_READ;
397 else
398 return -EINVAL;
399
400 mc5_dbgi_mode_enable(mc5);
401
402 while (n--) {
403 t3_write_reg(adap, A_MC5_DB_DBGI_REQ_ADDR0, start++);
404 if (mc5_cmd_write(adap, read_cmd)) {
405 err = -EIO;
406 break;
407 }
408 dbgi_rd_rsp3(adap, buf + 2, buf + 1, buf);
409 buf += 3;
410 }
411
412 mc5_dbgi_mode_disable(mc5);
413 return 0;
414}
415 377
416#define MC5_INT_FATAL (F_PARITYERR | F_REQQPARERR | F_DISPQPARERR) 378#define MC5_INT_FATAL (F_PARITYERR | F_REQQPARERR | F_DISPQPARERR)
417 379
diff --git a/drivers/net/cxgb3/sge.c b/drivers/net/cxgb3/sge.c
index c5a142bea5e9..5d72bda54389 100644
--- a/drivers/net/cxgb3/sge.c
+++ b/drivers/net/cxgb3/sge.c
@@ -1145,7 +1145,7 @@ static void write_tx_pkt_wr(struct adapter *adap, struct sk_buff *skb,
1145 cpl->len = htonl(skb->len); 1145 cpl->len = htonl(skb->len);
1146 cntrl = V_TXPKT_INTF(pi->port_id); 1146 cntrl = V_TXPKT_INTF(pi->port_id);
1147 1147
1148 if (vlan_tx_tag_present(skb) && pi->vlan_grp) 1148 if (vlan_tx_tag_present(skb))
1149 cntrl |= F_TXPKT_VLAN_VLD | V_TXPKT_VLAN(vlan_tx_tag_get(skb)); 1149 cntrl |= F_TXPKT_VLAN_VLD | V_TXPKT_VLAN(vlan_tx_tag_get(skb));
1150 1150
1151 tso_info = V_LSO_MSS(skb_shinfo(skb)->gso_size); 1151 tso_info = V_LSO_MSS(skb_shinfo(skb)->gso_size);
@@ -1279,7 +1279,7 @@ netdev_tx_t t3_eth_xmit(struct sk_buff *skb, struct net_device *dev)
1279 qs->port_stats[SGE_PSTAT_TX_CSUM]++; 1279 qs->port_stats[SGE_PSTAT_TX_CSUM]++;
1280 if (skb_shinfo(skb)->gso_size) 1280 if (skb_shinfo(skb)->gso_size)
1281 qs->port_stats[SGE_PSTAT_TSO]++; 1281 qs->port_stats[SGE_PSTAT_TSO]++;
1282 if (vlan_tx_tag_present(skb) && pi->vlan_grp) 1282 if (vlan_tx_tag_present(skb))
1283 qs->port_stats[SGE_PSTAT_VLANINS]++; 1283 qs->port_stats[SGE_PSTAT_VLANINS]++;
1284 1284
1285 /* 1285 /*
@@ -2554,7 +2554,7 @@ static inline int handle_responses(struct adapter *adap, struct sge_rspq *q)
2554 * The MSI-X interrupt handler for an SGE response queue for the non-NAPI case 2554 * The MSI-X interrupt handler for an SGE response queue for the non-NAPI case
2555 * (i.e., response queue serviced in hard interrupt). 2555 * (i.e., response queue serviced in hard interrupt).
2556 */ 2556 */
2557irqreturn_t t3_sge_intr_msix(int irq, void *cookie) 2557static irqreturn_t t3_sge_intr_msix(int irq, void *cookie)
2558{ 2558{
2559 struct sge_qset *qs = cookie; 2559 struct sge_qset *qs = cookie;
2560 struct adapter *adap = qs->adap; 2560 struct adapter *adap = qs->adap;
@@ -3320,40 +3320,3 @@ void t3_sge_prep(struct adapter *adap, struct sge_params *p)
3320 3320
3321 spin_lock_init(&adap->sge.reg_lock); 3321 spin_lock_init(&adap->sge.reg_lock);
3322} 3322}
3323
3324/**
3325 * t3_get_desc - dump an SGE descriptor for debugging purposes
3326 * @qs: the queue set
3327 * @qnum: identifies the specific queue (0..2: Tx, 3:response, 4..5: Rx)
3328 * @idx: the descriptor index in the queue
3329 * @data: where to dump the descriptor contents
3330 *
3331 * Dumps the contents of a HW descriptor of an SGE queue. Returns the
3332 * size of the descriptor.
3333 */
3334int t3_get_desc(const struct sge_qset *qs, unsigned int qnum, unsigned int idx,
3335 unsigned char *data)
3336{
3337 if (qnum >= 6)
3338 return -EINVAL;
3339
3340 if (qnum < 3) {
3341 if (!qs->txq[qnum].desc || idx >= qs->txq[qnum].size)
3342 return -EINVAL;
3343 memcpy(data, &qs->txq[qnum].desc[idx], sizeof(struct tx_desc));
3344 return sizeof(struct tx_desc);
3345 }
3346
3347 if (qnum == 3) {
3348 if (!qs->rspq.desc || idx >= qs->rspq.size)
3349 return -EINVAL;
3350 memcpy(data, &qs->rspq.desc[idx], sizeof(struct rsp_desc));
3351 return sizeof(struct rsp_desc);
3352 }
3353
3354 qnum -= 4;
3355 if (!qs->fl[qnum].desc || idx >= qs->fl[qnum].size)
3356 return -EINVAL;
3357 memcpy(data, &qs->fl[qnum].desc[idx], sizeof(struct rx_desc));
3358 return sizeof(struct rx_desc);
3359}
diff --git a/drivers/net/cxgb3/t3_hw.c b/drivers/net/cxgb3/t3_hw.c
index 421d5589cecd..3a6adf0b3e9d 100644
--- a/drivers/net/cxgb3/t3_hw.c
+++ b/drivers/net/cxgb3/t3_hw.c
@@ -34,6 +34,8 @@
34#include "sge_defs.h" 34#include "sge_defs.h"
35#include "firmware_exports.h" 35#include "firmware_exports.h"
36 36
37static void t3_port_intr_clear(struct adapter *adapter, int idx);
38
37/** 39/**
38 * t3_wait_op_done_val - wait until an operation is completed 40 * t3_wait_op_done_val - wait until an operation is completed
39 * @adapter: the adapter performing the operation 41 * @adapter: the adapter performing the operation
@@ -840,8 +842,8 @@ static int flash_wait_op(struct adapter *adapter, int attempts, int delay)
840 * (i.e., big-endian), otherwise as 32-bit words in the platform's 842 * (i.e., big-endian), otherwise as 32-bit words in the platform's
841 * natural endianess. 843 * natural endianess.
842 */ 844 */
843int t3_read_flash(struct adapter *adapter, unsigned int addr, 845static int t3_read_flash(struct adapter *adapter, unsigned int addr,
844 unsigned int nwords, u32 *data, int byte_oriented) 846 unsigned int nwords, u32 *data, int byte_oriented)
845{ 847{
846 int ret; 848 int ret;
847 849
@@ -2111,7 +2113,7 @@ void t3_port_intr_disable(struct adapter *adapter, int idx)
2111 * Clear port-specific (i.e., MAC and PHY) interrupts for the given 2113 * Clear port-specific (i.e., MAC and PHY) interrupts for the given
2112 * adapter port. 2114 * adapter port.
2113 */ 2115 */
2114void t3_port_intr_clear(struct adapter *adapter, int idx) 2116static void t3_port_intr_clear(struct adapter *adapter, int idx)
2115{ 2117{
2116 struct cphy *phy = &adap2pinfo(adapter, idx)->phy; 2118 struct cphy *phy = &adap2pinfo(adapter, idx)->phy;
2117 2119
@@ -2484,98 +2486,6 @@ int t3_sge_cqcntxt_op(struct adapter *adapter, unsigned int id, unsigned int op,
2484} 2486}
2485 2487
2486/** 2488/**
2487 * t3_sge_read_context - read an SGE context
2488 * @type: the context type
2489 * @adapter: the adapter
2490 * @id: the context id
2491 * @data: holds the retrieved context
2492 *
2493 * Read an SGE egress context. The caller is responsible for ensuring
2494 * only one context operation occurs at a time.
2495 */
2496static int t3_sge_read_context(unsigned int type, struct adapter *adapter,
2497 unsigned int id, u32 data[4])
2498{
2499 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2500 return -EBUSY;
2501
2502 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2503 V_CONTEXT_CMD_OPCODE(0) | type | V_CONTEXT(id));
2504 if (t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY, 0,
2505 SG_CONTEXT_CMD_ATTEMPTS, 1))
2506 return -EIO;
2507 data[0] = t3_read_reg(adapter, A_SG_CONTEXT_DATA0);
2508 data[1] = t3_read_reg(adapter, A_SG_CONTEXT_DATA1);
2509 data[2] = t3_read_reg(adapter, A_SG_CONTEXT_DATA2);
2510 data[3] = t3_read_reg(adapter, A_SG_CONTEXT_DATA3);
2511 return 0;
2512}
2513
2514/**
2515 * t3_sge_read_ecntxt - read an SGE egress context
2516 * @adapter: the adapter
2517 * @id: the context id
2518 * @data: holds the retrieved context
2519 *
2520 * Read an SGE egress context. The caller is responsible for ensuring
2521 * only one context operation occurs at a time.
2522 */
2523int t3_sge_read_ecntxt(struct adapter *adapter, unsigned int id, u32 data[4])
2524{
2525 if (id >= 65536)
2526 return -EINVAL;
2527 return t3_sge_read_context(F_EGRESS, adapter, id, data);
2528}
2529
2530/**
2531 * t3_sge_read_cq - read an SGE CQ context
2532 * @adapter: the adapter
2533 * @id: the context id
2534 * @data: holds the retrieved context
2535 *
2536 * Read an SGE CQ context. The caller is responsible for ensuring
2537 * only one context operation occurs at a time.
2538 */
2539int t3_sge_read_cq(struct adapter *adapter, unsigned int id, u32 data[4])
2540{
2541 if (id >= 65536)
2542 return -EINVAL;
2543 return t3_sge_read_context(F_CQ, adapter, id, data);
2544}
2545
2546/**
2547 * t3_sge_read_fl - read an SGE free-list context
2548 * @adapter: the adapter
2549 * @id: the context id
2550 * @data: holds the retrieved context
2551 *
2552 * Read an SGE free-list context. The caller is responsible for ensuring
2553 * only one context operation occurs at a time.
2554 */
2555int t3_sge_read_fl(struct adapter *adapter, unsigned int id, u32 data[4])
2556{
2557 if (id >= SGE_QSETS * 2)
2558 return -EINVAL;
2559 return t3_sge_read_context(F_FREELIST, adapter, id, data);
2560}
2561
2562/**
2563 * t3_sge_read_rspq - read an SGE response queue context
2564 * @adapter: the adapter
2565 * @id: the context id
2566 * @data: holds the retrieved context
2567 *
2568 * Read an SGE response queue context. The caller is responsible for
2569 * ensuring only one context operation occurs at a time.
2570 */
2571int t3_sge_read_rspq(struct adapter *adapter, unsigned int id, u32 data[4])
2572{
2573 if (id >= SGE_QSETS)
2574 return -EINVAL;
2575 return t3_sge_read_context(F_RESPONSEQ, adapter, id, data);
2576}
2577
2578/**
2579 * t3_config_rss - configure Rx packet steering 2489 * t3_config_rss - configure Rx packet steering
2580 * @adapter: the adapter 2490 * @adapter: the adapter
2581 * @rss_config: RSS settings (written to TP_RSS_CONFIG) 2491 * @rss_config: RSS settings (written to TP_RSS_CONFIG)
@@ -2616,42 +2526,6 @@ void t3_config_rss(struct adapter *adapter, unsigned int rss_config,
2616} 2526}
2617 2527
2618/** 2528/**
2619 * t3_read_rss - read the contents of the RSS tables
2620 * @adapter: the adapter
2621 * @lkup: holds the contents of the RSS lookup table
2622 * @map: holds the contents of the RSS map table
2623 *
2624 * Reads the contents of the receive packet steering tables.
2625 */
2626int t3_read_rss(struct adapter *adapter, u8 * lkup, u16 *map)
2627{
2628 int i;
2629 u32 val;
2630
2631 if (lkup)
2632 for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2633 t3_write_reg(adapter, A_TP_RSS_LKP_TABLE,
2634 0xffff0000 | i);
2635 val = t3_read_reg(adapter, A_TP_RSS_LKP_TABLE);
2636 if (!(val & 0x80000000))
2637 return -EAGAIN;
2638 *lkup++ = val;
2639 *lkup++ = (val >> 8);
2640 }
2641
2642 if (map)
2643 for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2644 t3_write_reg(adapter, A_TP_RSS_MAP_TABLE,
2645 0xffff0000 | i);
2646 val = t3_read_reg(adapter, A_TP_RSS_MAP_TABLE);
2647 if (!(val & 0x80000000))
2648 return -EAGAIN;
2649 *map++ = val;
2650 }
2651 return 0;
2652}
2653
2654/**
2655 * t3_tp_set_offload_mode - put TP in NIC/offload mode 2529 * t3_tp_set_offload_mode - put TP in NIC/offload mode
2656 * @adap: the adapter 2530 * @adap: the adapter
2657 * @enable: 1 to select offload mode, 0 for regular NIC 2531 * @enable: 1 to select offload mode, 0 for regular NIC
@@ -2868,7 +2742,8 @@ static void tp_set_timers(struct adapter *adap, unsigned int core_clk)
2868 * 2742 *
2869 * Set the receive coalescing size and PSH bit handling. 2743 * Set the receive coalescing size and PSH bit handling.
2870 */ 2744 */
2871int t3_tp_set_coalescing_size(struct adapter *adap, unsigned int size, int psh) 2745static int t3_tp_set_coalescing_size(struct adapter *adap,
2746 unsigned int size, int psh)
2872{ 2747{
2873 u32 val; 2748 u32 val;
2874 2749
@@ -2898,7 +2773,7 @@ int t3_tp_set_coalescing_size(struct adapter *adap, unsigned int size, int psh)
2898 * Set TP's max receive size. This is the limit that applies when 2773 * Set TP's max receive size. This is the limit that applies when
2899 * receive coalescing is disabled. 2774 * receive coalescing is disabled.
2900 */ 2775 */
2901void t3_tp_set_max_rxsize(struct adapter *adap, unsigned int size) 2776static void t3_tp_set_max_rxsize(struct adapter *adap, unsigned int size)
2902{ 2777{
2903 t3_write_reg(adap, A_TP_PARA_REG7, 2778 t3_write_reg(adap, A_TP_PARA_REG7,
2904 V_PMMAXXFERLEN0(size) | V_PMMAXXFERLEN1(size)); 2779 V_PMMAXXFERLEN0(size) | V_PMMAXXFERLEN1(size));
@@ -3018,48 +2893,6 @@ void t3_load_mtus(struct adapter *adap, unsigned short mtus[NMTUS],
3018} 2893}
3019 2894
3020/** 2895/**
3021 * t3_read_hw_mtus - returns the values in the HW MTU table
3022 * @adap: the adapter
3023 * @mtus: where to store the HW MTU values
3024 *
3025 * Reads the HW MTU table.
3026 */
3027void t3_read_hw_mtus(struct adapter *adap, unsigned short mtus[NMTUS])
3028{
3029 int i;
3030
3031 for (i = 0; i < NMTUS; ++i) {
3032 unsigned int val;
3033
3034 t3_write_reg(adap, A_TP_MTU_TABLE, 0xff000000 | i);
3035 val = t3_read_reg(adap, A_TP_MTU_TABLE);
3036 mtus[i] = val & 0x3fff;
3037 }
3038}
3039
3040/**
3041 * t3_get_cong_cntl_tab - reads the congestion control table
3042 * @adap: the adapter
3043 * @incr: where to store the alpha values
3044 *
3045 * Reads the additive increments programmed into the HW congestion
3046 * control table.
3047 */
3048void t3_get_cong_cntl_tab(struct adapter *adap,
3049 unsigned short incr[NMTUS][NCCTRL_WIN])
3050{
3051 unsigned int mtu, w;
3052
3053 for (mtu = 0; mtu < NMTUS; ++mtu)
3054 for (w = 0; w < NCCTRL_WIN; ++w) {
3055 t3_write_reg(adap, A_TP_CCTRL_TABLE,
3056 0xffff0000 | (mtu << 5) | w);
3057 incr[mtu][w] = t3_read_reg(adap, A_TP_CCTRL_TABLE) &
3058 0x1fff;
3059 }
3060}
3061
3062/**
3063 * t3_tp_get_mib_stats - read TP's MIB counters 2896 * t3_tp_get_mib_stats - read TP's MIB counters
3064 * @adap: the adapter 2897 * @adap: the adapter
3065 * @tps: holds the returned counter values 2898 * @tps: holds the returned counter values
@@ -3223,15 +3056,6 @@ static int tp_init(struct adapter *adap, const struct tp_params *p)
3223 return busy; 3056 return busy;
3224} 3057}
3225 3058
3226int t3_mps_set_active_ports(struct adapter *adap, unsigned int port_mask)
3227{
3228 if (port_mask & ~((1 << adap->params.nports) - 1))
3229 return -EINVAL;
3230 t3_set_reg_field(adap, A_MPS_CFG, F_PORT1ACTIVE | F_PORT0ACTIVE,
3231 port_mask << S_PORT0ACTIVE);
3232 return 0;
3233}
3234
3235/* 3059/*
3236 * Perform the bits of HW initialization that are dependent on the Tx 3060 * Perform the bits of HW initialization that are dependent on the Tx
3237 * channels being used. 3061 * channels being used.
@@ -3687,7 +3511,7 @@ static void mc7_prep(struct adapter *adapter, struct mc7 *mc7,
3687 mc7->width = G_WIDTH(cfg); 3511 mc7->width = G_WIDTH(cfg);
3688} 3512}
3689 3513
3690void mac_prep(struct cmac *mac, struct adapter *adapter, int index) 3514static void mac_prep(struct cmac *mac, struct adapter *adapter, int index)
3691{ 3515{
3692 u16 devid; 3516 u16 devid;
3693 3517
@@ -3707,7 +3531,8 @@ void mac_prep(struct cmac *mac, struct adapter *adapter, int index)
3707 } 3531 }
3708} 3532}
3709 3533
3710void early_hw_init(struct adapter *adapter, const struct adapter_info *ai) 3534static void early_hw_init(struct adapter *adapter,
3535 const struct adapter_info *ai)
3711{ 3536{
3712 u32 val = V_PORTSPEED(is_10G(adapter) ? 3 : 2); 3537 u32 val = V_PORTSPEED(is_10G(adapter) ? 3 : 2);
3713 3538
diff --git a/drivers/net/cxgb4/cxgb4.h b/drivers/net/cxgb4/cxgb4.h
index 3ece9f5069fa..eaa49e4119f1 100644
--- a/drivers/net/cxgb4/cxgb4.h
+++ b/drivers/net/cxgb4/cxgb4.h
@@ -592,7 +592,6 @@ void t4_os_portmod_changed(const struct adapter *adap, int port_id);
592void t4_os_link_changed(struct adapter *adap, int port_id, int link_stat); 592void t4_os_link_changed(struct adapter *adap, int port_id, int link_stat);
593 593
594void *t4_alloc_mem(size_t size); 594void *t4_alloc_mem(size_t size);
595void t4_free_mem(void *addr);
596 595
597void t4_free_sge_resources(struct adapter *adap); 596void t4_free_sge_resources(struct adapter *adap);
598irq_handler_t t4_intr_handler(struct adapter *adap); 597irq_handler_t t4_intr_handler(struct adapter *adap);
@@ -651,7 +650,6 @@ static inline int t4_wr_mbox_ns(struct adapter *adap, int mbox, const void *cmd,
651 650
652void t4_intr_enable(struct adapter *adapter); 651void t4_intr_enable(struct adapter *adapter);
653void t4_intr_disable(struct adapter *adapter); 652void t4_intr_disable(struct adapter *adapter);
654void t4_intr_clear(struct adapter *adapter);
655int t4_slow_intr_handler(struct adapter *adapter); 653int t4_slow_intr_handler(struct adapter *adapter);
656 654
657int t4_wait_dev_ready(struct adapter *adap); 655int t4_wait_dev_ready(struct adapter *adap);
@@ -664,24 +662,16 @@ int t4_check_fw_version(struct adapter *adapter);
664int t4_prep_adapter(struct adapter *adapter); 662int t4_prep_adapter(struct adapter *adapter);
665int t4_port_init(struct adapter *adap, int mbox, int pf, int vf); 663int t4_port_init(struct adapter *adap, int mbox, int pf, int vf);
666void t4_fatal_err(struct adapter *adapter); 664void t4_fatal_err(struct adapter *adapter);
667int t4_set_trace_filter(struct adapter *adapter, const struct trace_params *tp,
668 int filter_index, int enable);
669void t4_get_trace_filter(struct adapter *adapter, struct trace_params *tp,
670 int filter_index, int *enabled);
671int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid, 665int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid,
672 int start, int n, const u16 *rspq, unsigned int nrspq); 666 int start, int n, const u16 *rspq, unsigned int nrspq);
673int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode, 667int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode,
674 unsigned int flags); 668 unsigned int flags);
675int t4_read_rss(struct adapter *adapter, u16 *entries);
676int t4_mc_read(struct adapter *adap, u32 addr, __be32 *data, u64 *parity); 669int t4_mc_read(struct adapter *adap, u32 addr, __be32 *data, u64 *parity);
677int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, 670int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data,
678 u64 *parity); 671 u64 *parity);
679 672
680void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p); 673void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p);
681void t4_get_lb_stats(struct adapter *adap, int idx, struct lb_port_stats *p);
682
683void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log); 674void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log);
684void t4_tp_get_err_stats(struct adapter *adap, struct tp_err_stats *st);
685void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4, 675void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4,
686 struct tp_tcp_stats *v6); 676 struct tp_tcp_stats *v6);
687void t4_load_mtus(struct adapter *adap, const unsigned short *mtus, 677void t4_load_mtus(struct adapter *adap, const unsigned short *mtus,
@@ -711,8 +701,6 @@ int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf,
711int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port, 701int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port,
712 unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac, 702 unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac,
713 unsigned int *rss_size); 703 unsigned int *rss_size);
714int t4_free_vi(struct adapter *adap, unsigned int mbox, unsigned int pf,
715 unsigned int vf, unsigned int viid);
716int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid, 704int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid,
717 int mtu, int promisc, int all_multi, int bcast, int vlanex, 705 int mtu, int promisc, int all_multi, int bcast, int vlanex,
718 bool sleep_ok); 706 bool sleep_ok);
@@ -731,9 +719,6 @@ int t4_mdio_rd(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
731 unsigned int mmd, unsigned int reg, u16 *valp); 719 unsigned int mmd, unsigned int reg, u16 *valp);
732int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr, 720int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
733 unsigned int mmd, unsigned int reg, u16 val); 721 unsigned int mmd, unsigned int reg, u16 val);
734int t4_iq_start_stop(struct adapter *adap, unsigned int mbox, bool start,
735 unsigned int pf, unsigned int vf, unsigned int iqid,
736 unsigned int fl0id, unsigned int fl1id);
737int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, 722int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
738 unsigned int vf, unsigned int iqtype, unsigned int iqid, 723 unsigned int vf, unsigned int iqtype, unsigned int iqid,
739 unsigned int fl0id, unsigned int fl1id); 724 unsigned int fl0id, unsigned int fl1id);
diff --git a/drivers/net/cxgb4/cxgb4_main.c b/drivers/net/cxgb4/cxgb4_main.c
index 75b9401fd484..930bd075a43e 100644
--- a/drivers/net/cxgb4/cxgb4_main.c
+++ b/drivers/net/cxgb4/cxgb4_main.c
@@ -880,7 +880,7 @@ void *t4_alloc_mem(size_t size)
880/* 880/*
881 * Free memory allocated through alloc_mem(). 881 * Free memory allocated through alloc_mem().
882 */ 882 */
883void t4_free_mem(void *addr) 883static void t4_free_mem(void *addr)
884{ 884{
885 if (is_vmalloc_addr(addr)) 885 if (is_vmalloc_addr(addr))
886 vfree(addr); 886 vfree(addr);
@@ -2206,8 +2206,8 @@ static void mk_tid_release(struct sk_buff *skb, unsigned int chan,
2206 * Queue a TID release request and if necessary schedule a work queue to 2206 * Queue a TID release request and if necessary schedule a work queue to
2207 * process it. 2207 * process it.
2208 */ 2208 */
2209void cxgb4_queue_tid_release(struct tid_info *t, unsigned int chan, 2209static void cxgb4_queue_tid_release(struct tid_info *t, unsigned int chan,
2210 unsigned int tid) 2210 unsigned int tid)
2211{ 2211{
2212 void **p = &t->tid_tab[tid]; 2212 void **p = &t->tid_tab[tid];
2213 struct adapter *adap = container_of(t, struct adapter, tids); 2213 struct adapter *adap = container_of(t, struct adapter, tids);
@@ -2222,7 +2222,6 @@ void cxgb4_queue_tid_release(struct tid_info *t, unsigned int chan,
2222 } 2222 }
2223 spin_unlock_bh(&adap->tid_release_lock); 2223 spin_unlock_bh(&adap->tid_release_lock);
2224} 2224}
2225EXPORT_SYMBOL(cxgb4_queue_tid_release);
2226 2225
2227/* 2226/*
2228 * Process the list of pending TID release requests. 2227 * Process the list of pending TID release requests.
@@ -2355,48 +2354,6 @@ int cxgb4_create_server(const struct net_device *dev, unsigned int stid,
2355EXPORT_SYMBOL(cxgb4_create_server); 2354EXPORT_SYMBOL(cxgb4_create_server);
2356 2355
2357/** 2356/**
2358 * cxgb4_create_server6 - create an IPv6 server
2359 * @dev: the device
2360 * @stid: the server TID
2361 * @sip: local IPv6 address to bind server to
2362 * @sport: the server's TCP port
2363 * @queue: queue to direct messages from this server to
2364 *
2365 * Create an IPv6 server for the given port and address.
2366 * Returns <0 on error and one of the %NET_XMIT_* values on success.
2367 */
2368int cxgb4_create_server6(const struct net_device *dev, unsigned int stid,
2369 const struct in6_addr *sip, __be16 sport,
2370 unsigned int queue)
2371{
2372 unsigned int chan;
2373 struct sk_buff *skb;
2374 struct adapter *adap;
2375 struct cpl_pass_open_req6 *req;
2376
2377 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
2378 if (!skb)
2379 return -ENOMEM;
2380
2381 adap = netdev2adap(dev);
2382 req = (struct cpl_pass_open_req6 *)__skb_put(skb, sizeof(*req));
2383 INIT_TP_WR(req, 0);
2384 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ6, stid));
2385 req->local_port = sport;
2386 req->peer_port = htons(0);
2387 req->local_ip_hi = *(__be64 *)(sip->s6_addr);
2388 req->local_ip_lo = *(__be64 *)(sip->s6_addr + 8);
2389 req->peer_ip_hi = cpu_to_be64(0);
2390 req->peer_ip_lo = cpu_to_be64(0);
2391 chan = rxq_to_chan(&adap->sge, queue);
2392 req->opt0 = cpu_to_be64(TX_CHAN(chan));
2393 req->opt1 = cpu_to_be64(CONN_POLICY_ASK |
2394 SYN_RSS_ENABLE | SYN_RSS_QUEUE(queue));
2395 return t4_mgmt_tx(adap, skb);
2396}
2397EXPORT_SYMBOL(cxgb4_create_server6);
2398
2399/**
2400 * cxgb4_best_mtu - find the entry in the MTU table closest to an MTU 2357 * cxgb4_best_mtu - find the entry in the MTU table closest to an MTU
2401 * @mtus: the HW MTU table 2358 * @mtus: the HW MTU table
2402 * @mtu: the target MTU 2359 * @mtu: the target MTU
@@ -2455,25 +2412,6 @@ unsigned int cxgb4_port_idx(const struct net_device *dev)
2455} 2412}
2456EXPORT_SYMBOL(cxgb4_port_idx); 2413EXPORT_SYMBOL(cxgb4_port_idx);
2457 2414
2458/**
2459 * cxgb4_netdev_by_hwid - return the net device of a HW port
2460 * @pdev: identifies the adapter
2461 * @id: the HW port id
2462 *
2463 * Return the net device associated with the interface with the given HW
2464 * id.
2465 */
2466struct net_device *cxgb4_netdev_by_hwid(struct pci_dev *pdev, unsigned int id)
2467{
2468 const struct adapter *adap = pci_get_drvdata(pdev);
2469
2470 if (!adap || id >= NCHAN)
2471 return NULL;
2472 id = adap->chan_map[id];
2473 return id < MAX_NPORTS ? adap->port[id] : NULL;
2474}
2475EXPORT_SYMBOL(cxgb4_netdev_by_hwid);
2476
2477void cxgb4_get_tcp_stats(struct pci_dev *pdev, struct tp_tcp_stats *v4, 2415void cxgb4_get_tcp_stats(struct pci_dev *pdev, struct tp_tcp_stats *v4,
2478 struct tp_tcp_stats *v6) 2416 struct tp_tcp_stats *v6)
2479{ 2417{
@@ -2763,7 +2701,10 @@ static int cxgb_open(struct net_device *dev)
2763 return err; 2701 return err;
2764 } 2702 }
2765 2703
2766 dev->real_num_tx_queues = pi->nqsets; 2704 netif_set_real_num_tx_queues(dev, pi->nqsets);
2705 err = netif_set_real_num_rx_queues(dev, pi->nqsets);
2706 if (err)
2707 return err;
2767 err = link_start(dev); 2708 err = link_start(dev);
2768 if (!err) 2709 if (!err)
2769 netif_tx_start_all_queues(dev); 2710 netif_tx_start_all_queues(dev);
@@ -3860,7 +3801,7 @@ static void __devexit remove_one(struct pci_dev *pdev)
3860 pci_disable_device(pdev); 3801 pci_disable_device(pdev);
3861 pci_release_regions(pdev); 3802 pci_release_regions(pdev);
3862 pci_set_drvdata(pdev, NULL); 3803 pci_set_drvdata(pdev, NULL);
3863 } else if (PCI_FUNC(pdev->devfn) > 0) 3804 } else
3864 pci_release_regions(pdev); 3805 pci_release_regions(pdev);
3865} 3806}
3866 3807
diff --git a/drivers/net/cxgb4/cxgb4_uld.h b/drivers/net/cxgb4/cxgb4_uld.h
index 85d74e751ce0..1b48c0170145 100644
--- a/drivers/net/cxgb4/cxgb4_uld.h
+++ b/drivers/net/cxgb4/cxgb4_uld.h
@@ -139,16 +139,11 @@ int cxgb4_alloc_stid(struct tid_info *t, int family, void *data);
139void cxgb4_free_atid(struct tid_info *t, unsigned int atid); 139void cxgb4_free_atid(struct tid_info *t, unsigned int atid);
140void cxgb4_free_stid(struct tid_info *t, unsigned int stid, int family); 140void cxgb4_free_stid(struct tid_info *t, unsigned int stid, int family);
141void cxgb4_remove_tid(struct tid_info *t, unsigned int qid, unsigned int tid); 141void cxgb4_remove_tid(struct tid_info *t, unsigned int qid, unsigned int tid);
142void cxgb4_queue_tid_release(struct tid_info *t, unsigned int chan,
143 unsigned int tid);
144 142
145struct in6_addr; 143struct in6_addr;
146 144
147int cxgb4_create_server(const struct net_device *dev, unsigned int stid, 145int cxgb4_create_server(const struct net_device *dev, unsigned int stid,
148 __be32 sip, __be16 sport, unsigned int queue); 146 __be32 sip, __be16 sport, unsigned int queue);
149int cxgb4_create_server6(const struct net_device *dev, unsigned int stid,
150 const struct in6_addr *sip, __be16 sport,
151 unsigned int queue);
152 147
153static inline void set_wr_txq(struct sk_buff *skb, int prio, int queue) 148static inline void set_wr_txq(struct sk_buff *skb, int prio, int queue)
154{ 149{
@@ -233,7 +228,6 @@ int cxgb4_ofld_send(struct net_device *dev, struct sk_buff *skb);
233unsigned int cxgb4_port_chan(const struct net_device *dev); 228unsigned int cxgb4_port_chan(const struct net_device *dev);
234unsigned int cxgb4_port_viid(const struct net_device *dev); 229unsigned int cxgb4_port_viid(const struct net_device *dev);
235unsigned int cxgb4_port_idx(const struct net_device *dev); 230unsigned int cxgb4_port_idx(const struct net_device *dev);
236struct net_device *cxgb4_netdev_by_hwid(struct pci_dev *pdev, unsigned int id);
237unsigned int cxgb4_best_mtu(const unsigned short *mtus, unsigned short mtu, 231unsigned int cxgb4_best_mtu(const unsigned short *mtus, unsigned short mtu,
238 unsigned int *idx); 232 unsigned int *idx);
239void cxgb4_get_tcp_stats(struct pci_dev *pdev, struct tp_tcp_stats *v4, 233void cxgb4_get_tcp_stats(struct pci_dev *pdev, struct tp_tcp_stats *v4,
diff --git a/drivers/net/cxgb4/l2t.c b/drivers/net/cxgb4/l2t.c
index e8f0f55e9d08..a2d323c473f8 100644
--- a/drivers/net/cxgb4/l2t.c
+++ b/drivers/net/cxgb4/l2t.c
@@ -481,40 +481,6 @@ void t4_l2t_update(struct adapter *adap, struct neighbour *neigh)
481 handle_failed_resolution(adap, arpq); 481 handle_failed_resolution(adap, arpq);
482} 482}
483 483
484/*
485 * Allocate an L2T entry for use by a switching rule. Such entries need to be
486 * explicitly freed and while busy they are not on any hash chain, so normal
487 * address resolution updates do not see them.
488 */
489struct l2t_entry *t4_l2t_alloc_switching(struct l2t_data *d)
490{
491 struct l2t_entry *e;
492
493 write_lock_bh(&d->lock);
494 e = alloc_l2e(d);
495 if (e) {
496 spin_lock(&e->lock); /* avoid race with t4_l2t_free */
497 e->state = L2T_STATE_SWITCHING;
498 atomic_set(&e->refcnt, 1);
499 spin_unlock(&e->lock);
500 }
501 write_unlock_bh(&d->lock);
502 return e;
503}
504
505/*
506 * Sets/updates the contents of a switching L2T entry that has been allocated
507 * with an earlier call to @t4_l2t_alloc_switching.
508 */
509int t4_l2t_set_switching(struct adapter *adap, struct l2t_entry *e, u16 vlan,
510 u8 port, u8 *eth_addr)
511{
512 e->vlan = vlan;
513 e->lport = port;
514 memcpy(e->dmac, eth_addr, ETH_ALEN);
515 return write_l2e(adap, e, 0);
516}
517
518struct l2t_data *t4_init_l2t(void) 484struct l2t_data *t4_init_l2t(void)
519{ 485{
520 int i; 486 int i;
diff --git a/drivers/net/cxgb4/l2t.h b/drivers/net/cxgb4/l2t.h
index 643f27ed3cf4..7bd8f42378ff 100644
--- a/drivers/net/cxgb4/l2t.h
+++ b/drivers/net/cxgb4/l2t.h
@@ -100,9 +100,6 @@ struct l2t_entry *cxgb4_l2t_get(struct l2t_data *d, struct neighbour *neigh,
100 unsigned int priority); 100 unsigned int priority);
101 101
102void t4_l2t_update(struct adapter *adap, struct neighbour *neigh); 102void t4_l2t_update(struct adapter *adap, struct neighbour *neigh);
103struct l2t_entry *t4_l2t_alloc_switching(struct l2t_data *d);
104int t4_l2t_set_switching(struct adapter *adap, struct l2t_entry *e, u16 vlan,
105 u8 port, u8 *eth_addr);
106struct l2t_data *t4_init_l2t(void); 103struct l2t_data *t4_init_l2t(void);
107void do_l2t_write_rpl(struct adapter *p, const struct cpl_l2t_write_rpl *rpl); 104void do_l2t_write_rpl(struct adapter *p, const struct cpl_l2t_write_rpl *rpl);
108 105
diff --git a/drivers/net/cxgb4/t4_hw.c b/drivers/net/cxgb4/t4_hw.c
index 9e1a4b49b47a..bb813d94aea8 100644
--- a/drivers/net/cxgb4/t4_hw.c
+++ b/drivers/net/cxgb4/t4_hw.c
@@ -120,30 +120,6 @@ static void t4_read_indirect(struct adapter *adap, unsigned int addr_reg,
120 } 120 }
121} 121}
122 122
123#if 0
124/**
125 * t4_write_indirect - write indirectly addressed registers
126 * @adap: the adapter
127 * @addr_reg: register holding the indirect addresses
128 * @data_reg: register holding the value for the indirect registers
129 * @vals: values to write
130 * @nregs: how many indirect registers to write
131 * @start_idx: address of first indirect register to write
132 *
133 * Writes a sequential block of registers that are accessed indirectly
134 * through an address/data register pair.
135 */
136static void t4_write_indirect(struct adapter *adap, unsigned int addr_reg,
137 unsigned int data_reg, const u32 *vals,
138 unsigned int nregs, unsigned int start_idx)
139{
140 while (nregs--) {
141 t4_write_reg(adap, addr_reg, start_idx++);
142 t4_write_reg(adap, data_reg, *vals++);
143 }
144}
145#endif
146
147/* 123/*
148 * Get the reply to a mailbox command and store it in @rpl in big-endian order. 124 * Get the reply to a mailbox command and store it in @rpl in big-endian order.
149 */ 125 */
@@ -1560,44 +1536,6 @@ void t4_intr_disable(struct adapter *adapter)
1560} 1536}
1561 1537
1562/** 1538/**
1563 * t4_intr_clear - clear all interrupts
1564 * @adapter: the adapter whose interrupts should be cleared
1565 *
1566 * Clears all interrupts. The caller must be a PCI function managing
1567 * global interrupts.
1568 */
1569void t4_intr_clear(struct adapter *adapter)
1570{
1571 static const unsigned int cause_reg[] = {
1572 SGE_INT_CAUSE1, SGE_INT_CAUSE2, SGE_INT_CAUSE3,
1573 PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS,
1574 PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS,
1575 PCIE_NONFAT_ERR, PCIE_INT_CAUSE,
1576 MC_INT_CAUSE,
1577 MA_INT_WRAP_STATUS, MA_PARITY_ERROR_STATUS, MA_INT_CAUSE,
1578 EDC_INT_CAUSE, EDC_REG(EDC_INT_CAUSE, 1),
1579 CIM_HOST_INT_CAUSE, CIM_HOST_UPACC_INT_CAUSE,
1580 MYPF_REG(CIM_PF_HOST_INT_CAUSE),
1581 TP_INT_CAUSE,
1582 ULP_RX_INT_CAUSE, ULP_TX_INT_CAUSE,
1583 PM_RX_INT_CAUSE, PM_TX_INT_CAUSE,
1584 MPS_RX_PERR_INT_CAUSE,
1585 CPL_INTR_CAUSE,
1586 MYPF_REG(PL_PF_INT_CAUSE),
1587 PL_PL_INT_CAUSE,
1588 LE_DB_INT_CAUSE,
1589 };
1590
1591 unsigned int i;
1592
1593 for (i = 0; i < ARRAY_SIZE(cause_reg); ++i)
1594 t4_write_reg(adapter, cause_reg[i], 0xffffffff);
1595
1596 t4_write_reg(adapter, PL_INT_CAUSE, GLBL_INTR_MASK);
1597 (void) t4_read_reg(adapter, PL_INT_CAUSE); /* flush */
1598}
1599
1600/**
1601 * hash_mac_addr - return the hash value of a MAC address 1539 * hash_mac_addr - return the hash value of a MAC address
1602 * @addr: the 48-bit Ethernet MAC address 1540 * @addr: the 48-bit Ethernet MAC address
1603 * 1541 *
@@ -1709,36 +1647,6 @@ int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode,
1709 return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL); 1647 return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL);
1710} 1648}
1711 1649
1712/* Read an RSS table row */
1713static int rd_rss_row(struct adapter *adap, int row, u32 *val)
1714{
1715 t4_write_reg(adap, TP_RSS_LKP_TABLE, 0xfff00000 | row);
1716 return t4_wait_op_done_val(adap, TP_RSS_LKP_TABLE, LKPTBLROWVLD, 1,
1717 5, 0, val);
1718}
1719
1720/**
1721 * t4_read_rss - read the contents of the RSS mapping table
1722 * @adapter: the adapter
1723 * @map: holds the contents of the RSS mapping table
1724 *
1725 * Reads the contents of the RSS hash->queue mapping table.
1726 */
1727int t4_read_rss(struct adapter *adapter, u16 *map)
1728{
1729 u32 val;
1730 int i, ret;
1731
1732 for (i = 0; i < RSS_NENTRIES / 2; ++i) {
1733 ret = rd_rss_row(adapter, i, &val);
1734 if (ret)
1735 return ret;
1736 *map++ = LKPTBLQUEUE0_GET(val);
1737 *map++ = LKPTBLQUEUE1_GET(val);
1738 }
1739 return 0;
1740}
1741
1742/** 1650/**
1743 * t4_tp_get_tcp_stats - read TP's TCP MIB counters 1651 * t4_tp_get_tcp_stats - read TP's TCP MIB counters
1744 * @adap: the adapter 1652 * @adap: the adapter
@@ -1779,29 +1687,6 @@ void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4,
1779} 1687}
1780 1688
1781/** 1689/**
1782 * t4_tp_get_err_stats - read TP's error MIB counters
1783 * @adap: the adapter
1784 * @st: holds the counter values
1785 *
1786 * Returns the values of TP's error counters.
1787 */
1788void t4_tp_get_err_stats(struct adapter *adap, struct tp_err_stats *st)
1789{
1790 t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, st->macInErrs,
1791 12, TP_MIB_MAC_IN_ERR_0);
1792 t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, st->tnlCongDrops,
1793 8, TP_MIB_TNL_CNG_DROP_0);
1794 t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, st->tnlTxDrops,
1795 4, TP_MIB_TNL_DROP_0);
1796 t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, st->ofldVlanDrops,
1797 4, TP_MIB_OFD_VLN_DROP_0);
1798 t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, st->tcp6InErrs,
1799 4, TP_MIB_TCP_V6IN_ERR_0);
1800 t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, &st->ofldNoNeigh,
1801 2, TP_MIB_OFD_ARP_DROP);
1802}
1803
1804/**
1805 * t4_read_mtu_tbl - returns the values in the HW path MTU table 1690 * t4_read_mtu_tbl - returns the values in the HW path MTU table
1806 * @adap: the adapter 1691 * @adap: the adapter
1807 * @mtus: where to store the MTU values 1692 * @mtus: where to store the MTU values
@@ -1916,122 +1801,6 @@ void t4_load_mtus(struct adapter *adap, const unsigned short *mtus,
1916} 1801}
1917 1802
1918/** 1803/**
1919 * t4_set_trace_filter - configure one of the tracing filters
1920 * @adap: the adapter
1921 * @tp: the desired trace filter parameters
1922 * @idx: which filter to configure
1923 * @enable: whether to enable or disable the filter
1924 *
1925 * Configures one of the tracing filters available in HW. If @enable is
1926 * %0 @tp is not examined and may be %NULL.
1927 */
1928int t4_set_trace_filter(struct adapter *adap, const struct trace_params *tp,
1929 int idx, int enable)
1930{
1931 int i, ofst = idx * 4;
1932 u32 data_reg, mask_reg, cfg;
1933 u32 multitrc = TRCMULTIFILTER;
1934
1935 if (!enable) {
1936 t4_write_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A + ofst, 0);
1937 goto out;
1938 }
1939
1940 if (tp->port > 11 || tp->invert > 1 || tp->skip_len > 0x1f ||
1941 tp->skip_ofst > 0x1f || tp->min_len > 0x1ff ||
1942 tp->snap_len > 9600 || (idx && tp->snap_len > 256))
1943 return -EINVAL;
1944
1945 if (tp->snap_len > 256) { /* must be tracer 0 */
1946 if ((t4_read_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A + 4) |
1947 t4_read_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A + 8) |
1948 t4_read_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A + 12)) & TFEN)
1949 return -EINVAL; /* other tracers are enabled */
1950 multitrc = 0;
1951 } else if (idx) {
1952 i = t4_read_reg(adap, MPS_TRC_FILTER_MATCH_CTL_B);
1953 if (TFCAPTUREMAX_GET(i) > 256 &&
1954 (t4_read_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A) & TFEN))
1955 return -EINVAL;
1956 }
1957
1958 /* stop the tracer we'll be changing */
1959 t4_write_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A + ofst, 0);
1960
1961 /* disable tracing globally if running in the wrong single/multi mode */
1962 cfg = t4_read_reg(adap, MPS_TRC_CFG);
1963 if ((cfg & TRCEN) && multitrc != (cfg & TRCMULTIFILTER)) {
1964 t4_write_reg(adap, MPS_TRC_CFG, cfg ^ TRCEN);
1965 t4_read_reg(adap, MPS_TRC_CFG); /* flush */
1966 msleep(1);
1967 if (!(t4_read_reg(adap, MPS_TRC_CFG) & TRCFIFOEMPTY))
1968 return -ETIMEDOUT;
1969 }
1970 /*
1971 * At this point either the tracing is enabled and in the right mode or
1972 * disabled.
1973 */
1974
1975 idx *= (MPS_TRC_FILTER1_MATCH - MPS_TRC_FILTER0_MATCH);
1976 data_reg = MPS_TRC_FILTER0_MATCH + idx;
1977 mask_reg = MPS_TRC_FILTER0_DONT_CARE + idx;
1978
1979 for (i = 0; i < TRACE_LEN / 4; i++, data_reg += 4, mask_reg += 4) {
1980 t4_write_reg(adap, data_reg, tp->data[i]);
1981 t4_write_reg(adap, mask_reg, ~tp->mask[i]);
1982 }
1983 t4_write_reg(adap, MPS_TRC_FILTER_MATCH_CTL_B + ofst,
1984 TFCAPTUREMAX(tp->snap_len) |
1985 TFMINPKTSIZE(tp->min_len));
1986 t4_write_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A + ofst,
1987 TFOFFSET(tp->skip_ofst) | TFLENGTH(tp->skip_len) |
1988 TFPORT(tp->port) | TFEN |
1989 (tp->invert ? TFINVERTMATCH : 0));
1990
1991 cfg &= ~TRCMULTIFILTER;
1992 t4_write_reg(adap, MPS_TRC_CFG, cfg | TRCEN | multitrc);
1993out: t4_read_reg(adap, MPS_TRC_CFG); /* flush */
1994 return 0;
1995}
1996
1997/**
1998 * t4_get_trace_filter - query one of the tracing filters
1999 * @adap: the adapter
2000 * @tp: the current trace filter parameters
2001 * @idx: which trace filter to query
2002 * @enabled: non-zero if the filter is enabled
2003 *
2004 * Returns the current settings of one of the HW tracing filters.
2005 */
2006void t4_get_trace_filter(struct adapter *adap, struct trace_params *tp, int idx,
2007 int *enabled)
2008{
2009 u32 ctla, ctlb;
2010 int i, ofst = idx * 4;
2011 u32 data_reg, mask_reg;
2012
2013 ctla = t4_read_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A + ofst);
2014 ctlb = t4_read_reg(adap, MPS_TRC_FILTER_MATCH_CTL_B + ofst);
2015
2016 *enabled = !!(ctla & TFEN);
2017 tp->snap_len = TFCAPTUREMAX_GET(ctlb);
2018 tp->min_len = TFMINPKTSIZE_GET(ctlb);
2019 tp->skip_ofst = TFOFFSET_GET(ctla);
2020 tp->skip_len = TFLENGTH_GET(ctla);
2021 tp->invert = !!(ctla & TFINVERTMATCH);
2022 tp->port = TFPORT_GET(ctla);
2023
2024 ofst = (MPS_TRC_FILTER1_MATCH - MPS_TRC_FILTER0_MATCH) * idx;
2025 data_reg = MPS_TRC_FILTER0_MATCH + ofst;
2026 mask_reg = MPS_TRC_FILTER0_DONT_CARE + ofst;
2027
2028 for (i = 0; i < TRACE_LEN / 4; i++, data_reg += 4, mask_reg += 4) {
2029 tp->mask[i] = ~t4_read_reg(adap, mask_reg);
2030 tp->data[i] = t4_read_reg(adap, data_reg) & tp->mask[i];
2031 }
2032}
2033
2034/**
2035 * get_mps_bg_map - return the buffer groups associated with a port 1804 * get_mps_bg_map - return the buffer groups associated with a port
2036 * @adap: the adapter 1805 * @adap: the adapter
2037 * @idx: the port index 1806 * @idx: the port index
@@ -2133,52 +1902,6 @@ void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p)
2133} 1902}
2134 1903
2135/** 1904/**
2136 * t4_get_lb_stats - collect loopback port statistics
2137 * @adap: the adapter
2138 * @idx: the loopback port index
2139 * @p: the stats structure to fill
2140 *
2141 * Return HW statistics for the given loopback port.
2142 */
2143void t4_get_lb_stats(struct adapter *adap, int idx, struct lb_port_stats *p)
2144{
2145 u32 bgmap = get_mps_bg_map(adap, idx);
2146
2147#define GET_STAT(name) \
2148 t4_read_reg64(adap, PORT_REG(idx, MPS_PORT_STAT_LB_PORT_##name##_L))
2149#define GET_STAT_COM(name) t4_read_reg64(adap, MPS_STAT_##name##_L)
2150
2151 p->octets = GET_STAT(BYTES);
2152 p->frames = GET_STAT(FRAMES);
2153 p->bcast_frames = GET_STAT(BCAST);
2154 p->mcast_frames = GET_STAT(MCAST);
2155 p->ucast_frames = GET_STAT(UCAST);
2156 p->error_frames = GET_STAT(ERROR);
2157
2158 p->frames_64 = GET_STAT(64B);
2159 p->frames_65_127 = GET_STAT(65B_127B);
2160 p->frames_128_255 = GET_STAT(128B_255B);
2161 p->frames_256_511 = GET_STAT(256B_511B);
2162 p->frames_512_1023 = GET_STAT(512B_1023B);
2163 p->frames_1024_1518 = GET_STAT(1024B_1518B);
2164 p->frames_1519_max = GET_STAT(1519B_MAX);
2165 p->drop = t4_read_reg(adap, PORT_REG(idx,
2166 MPS_PORT_STAT_LB_PORT_DROP_FRAMES));
2167
2168 p->ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_LB_DROP_FRAME) : 0;
2169 p->ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_LB_DROP_FRAME) : 0;
2170 p->ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_LB_DROP_FRAME) : 0;
2171 p->ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_LB_DROP_FRAME) : 0;
2172 p->trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_LB_TRUNC_FRAME) : 0;
2173 p->trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_LB_TRUNC_FRAME) : 0;
2174 p->trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_LB_TRUNC_FRAME) : 0;
2175 p->trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_LB_TRUNC_FRAME) : 0;
2176
2177#undef GET_STAT
2178#undef GET_STAT_COM
2179}
2180
2181/**
2182 * t4_wol_magic_enable - enable/disable magic packet WoL 1905 * t4_wol_magic_enable - enable/disable magic packet WoL
2183 * @adap: the adapter 1906 * @adap: the adapter
2184 * @port: the physical port index 1907 * @port: the physical port index
@@ -2584,30 +2307,6 @@ int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port,
2584} 2307}
2585 2308
2586/** 2309/**
2587 * t4_free_vi - free a virtual interface
2588 * @adap: the adapter
2589 * @mbox: mailbox to use for the FW command
2590 * @pf: the PF owning the VI
2591 * @vf: the VF owning the VI
2592 * @viid: virtual interface identifiler
2593 *
2594 * Free a previously allocated virtual interface.
2595 */
2596int t4_free_vi(struct adapter *adap, unsigned int mbox, unsigned int pf,
2597 unsigned int vf, unsigned int viid)
2598{
2599 struct fw_vi_cmd c;
2600
2601 memset(&c, 0, sizeof(c));
2602 c.op_to_vfn = htonl(FW_CMD_OP(FW_VI_CMD) | FW_CMD_REQUEST |
2603 FW_CMD_EXEC | FW_VI_CMD_PFN(pf) |
2604 FW_VI_CMD_VFN(vf));
2605 c.alloc_to_len16 = htonl(FW_VI_CMD_FREE | FW_LEN16(c));
2606 c.type_viid = htons(FW_VI_CMD_VIID(viid));
2607 return t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
2608}
2609
2610/**
2611 * t4_set_rxmode - set Rx properties of a virtual interface 2310 * t4_set_rxmode - set Rx properties of a virtual interface
2612 * @adap: the adapter 2311 * @adap: the adapter
2613 * @mbox: mailbox to use for the FW command 2312 * @mbox: mailbox to use for the FW command
@@ -2833,37 +2532,6 @@ int t4_identify_port(struct adapter *adap, unsigned int mbox, unsigned int viid,
2833} 2532}
2834 2533
2835/** 2534/**
2836 * t4_iq_start_stop - enable/disable an ingress queue and its FLs
2837 * @adap: the adapter
2838 * @mbox: mailbox to use for the FW command
2839 * @start: %true to enable the queues, %false to disable them
2840 * @pf: the PF owning the queues
2841 * @vf: the VF owning the queues
2842 * @iqid: ingress queue id
2843 * @fl0id: FL0 queue id or 0xffff if no attached FL0
2844 * @fl1id: FL1 queue id or 0xffff if no attached FL1
2845 *
2846 * Starts or stops an ingress queue and its associated FLs, if any.
2847 */
2848int t4_iq_start_stop(struct adapter *adap, unsigned int mbox, bool start,
2849 unsigned int pf, unsigned int vf, unsigned int iqid,
2850 unsigned int fl0id, unsigned int fl1id)
2851{
2852 struct fw_iq_cmd c;
2853
2854 memset(&c, 0, sizeof(c));
2855 c.op_to_vfn = htonl(FW_CMD_OP(FW_IQ_CMD) | FW_CMD_REQUEST |
2856 FW_CMD_EXEC | FW_IQ_CMD_PFN(pf) |
2857 FW_IQ_CMD_VFN(vf));
2858 c.alloc_to_len16 = htonl(FW_IQ_CMD_IQSTART(start) |
2859 FW_IQ_CMD_IQSTOP(!start) | FW_LEN16(c));
2860 c.iqid = htons(iqid);
2861 c.fl0id = htons(fl0id);
2862 c.fl1id = htons(fl1id);
2863 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2864}
2865
2866/**
2867 * t4_iq_free - free an ingress queue and its FLs 2535 * t4_iq_free - free an ingress queue and its FLs
2868 * @adap: the adapter 2536 * @adap: the adapter
2869 * @mbox: mailbox to use for the FW command 2537 * @mbox: mailbox to use for the FW command
diff --git a/drivers/net/cxgb4vf/cxgb4vf_main.c b/drivers/net/cxgb4vf/cxgb4vf_main.c
index 7b6d07f50c71..555ecc5a2e93 100644
--- a/drivers/net/cxgb4vf/cxgb4vf_main.c
+++ b/drivers/net/cxgb4vf/cxgb4vf_main.c
@@ -748,7 +748,10 @@ static int cxgb4vf_open(struct net_device *dev)
748 /* 748 /*
749 * Note that this interface is up and start everything up ... 749 * Note that this interface is up and start everything up ...
750 */ 750 */
751 dev->real_num_tx_queues = pi->nqsets; 751 netif_set_real_num_tx_queues(dev, pi->nqsets);
752 err = netif_set_real_num_rx_queues(dev, pi->nqsets);
753 if (err)
754 return err;
752 set_bit(pi->port_id, &adapter->open_device_map); 755 set_bit(pi->port_id, &adapter->open_device_map);
753 link_start(dev); 756 link_start(dev);
754 netif_tx_start_all_queues(dev); 757 netif_tx_start_all_queues(dev);
diff --git a/drivers/net/cxgb4vf/t4vf_common.h b/drivers/net/cxgb4vf/t4vf_common.h
index 5c7bde7f9bae..873cb7d86c57 100644
--- a/drivers/net/cxgb4vf/t4vf_common.h
+++ b/drivers/net/cxgb4vf/t4vf_common.h
@@ -132,15 +132,15 @@ struct rss_params {
132 unsigned int mode; /* RSS mode */ 132 unsigned int mode; /* RSS mode */
133 union { 133 union {
134 struct { 134 struct {
135 int synmapen:1; /* SYN Map Enable */ 135 unsigned int synmapen:1; /* SYN Map Enable */
136 int syn4tupenipv6:1; /* enable hashing 4-tuple IPv6 SYNs */ 136 unsigned int syn4tupenipv6:1; /* enable hashing 4-tuple IPv6 SYNs */
137 int syn2tupenipv6:1; /* enable hashing 2-tuple IPv6 SYNs */ 137 unsigned int syn2tupenipv6:1; /* enable hashing 2-tuple IPv6 SYNs */
138 int syn4tupenipv4:1; /* enable hashing 4-tuple IPv4 SYNs */ 138 unsigned int syn4tupenipv4:1; /* enable hashing 4-tuple IPv4 SYNs */
139 int syn2tupenipv4:1; /* enable hashing 2-tuple IPv4 SYNs */ 139 unsigned int syn2tupenipv4:1; /* enable hashing 2-tuple IPv4 SYNs */
140 int ofdmapen:1; /* Offload Map Enable */ 140 unsigned int ofdmapen:1; /* Offload Map Enable */
141 int tnlmapen:1; /* Tunnel Map Enable */ 141 unsigned int tnlmapen:1; /* Tunnel Map Enable */
142 int tnlalllookup:1; /* Tunnel All Lookup */ 142 unsigned int tnlalllookup:1; /* Tunnel All Lookup */
143 int hashtoeplitz:1; /* use Toeplitz hash */ 143 unsigned int hashtoeplitz:1; /* use Toeplitz hash */
144 } basicvirtual; 144 } basicvirtual;
145 } u; 145 } u;
146}; 146};
@@ -151,10 +151,10 @@ struct rss_params {
151union rss_vi_config { 151union rss_vi_config {
152 struct { 152 struct {
153 u16 defaultq; /* Ingress Queue ID for !tnlalllookup */ 153 u16 defaultq; /* Ingress Queue ID for !tnlalllookup */
154 int ip6fourtupen:1; /* hash 4-tuple IPv6 ingress packets */ 154 unsigned int ip6fourtupen:1; /* hash 4-tuple IPv6 ingress packets */
155 int ip6twotupen:1; /* hash 2-tuple IPv6 ingress packets */ 155 unsigned int ip6twotupen:1; /* hash 2-tuple IPv6 ingress packets */
156 int ip4fourtupen:1; /* hash 4-tuple IPv4 ingress packets */ 156 unsigned int ip4fourtupen:1; /* hash 4-tuple IPv4 ingress packets */
157 int ip4twotupen:1; /* hash 2-tuple IPv4 ingress packets */ 157 unsigned int ip4twotupen:1; /* hash 2-tuple IPv4 ingress packets */
158 int udpen; /* hash 4-tuple UDP ingress packets */ 158 int udpen; /* hash 4-tuple UDP ingress packets */
159 } basicvirtual; 159 } basicvirtual;
160}; 160};
diff --git a/drivers/net/de620.c b/drivers/net/de620.c
index f3650fd096f4..1c51a7576119 100644
--- a/drivers/net/de620.c
+++ b/drivers/net/de620.c
@@ -676,7 +676,7 @@ static int de620_rx_intr(struct net_device *dev)
676 de620_set_register(dev, W_NPRF, next_rx_page); 676 de620_set_register(dev, W_NPRF, next_rx_page);
677 pr_debug("next_rx_page=%d CPR=%d\n", next_rx_page, curr_page); 677 pr_debug("next_rx_page=%d CPR=%d\n", next_rx_page, curr_page);
678 678
679 return (next_rx_page != curr_page); /* That was slightly tricky... */ 679 return next_rx_page != curr_page; /* That was slightly tricky... */
680} 680}
681 681
682/********************************************* 682/*********************************************
diff --git a/drivers/net/defxx.c b/drivers/net/defxx.c
index e5667c55844e..417e14385623 100644
--- a/drivers/net/defxx.c
+++ b/drivers/net/defxx.c
@@ -1024,7 +1024,7 @@ static int __devinit dfx_driver_init(struct net_device *dev,
1024 &data) != DFX_K_SUCCESS) { 1024 &data) != DFX_K_SUCCESS) {
1025 printk("%s: Could not read adapter factory MAC address!\n", 1025 printk("%s: Could not read adapter factory MAC address!\n",
1026 print_name); 1026 print_name);
1027 return(DFX_K_FAILURE); 1027 return DFX_K_FAILURE;
1028 } 1028 }
1029 le32 = cpu_to_le32(data); 1029 le32 = cpu_to_le32(data);
1030 memcpy(&bp->factory_mac_addr[0], &le32, sizeof(u32)); 1030 memcpy(&bp->factory_mac_addr[0], &le32, sizeof(u32));
@@ -1033,7 +1033,7 @@ static int __devinit dfx_driver_init(struct net_device *dev,
1033 &data) != DFX_K_SUCCESS) { 1033 &data) != DFX_K_SUCCESS) {
1034 printk("%s: Could not read adapter factory MAC address!\n", 1034 printk("%s: Could not read adapter factory MAC address!\n",
1035 print_name); 1035 print_name);
1036 return(DFX_K_FAILURE); 1036 return DFX_K_FAILURE;
1037 } 1037 }
1038 le32 = cpu_to_le32(data); 1038 le32 = cpu_to_le32(data);
1039 memcpy(&bp->factory_mac_addr[4], &le32, sizeof(u16)); 1039 memcpy(&bp->factory_mac_addr[4], &le32, sizeof(u16));
@@ -1075,7 +1075,7 @@ static int __devinit dfx_driver_init(struct net_device *dev,
1075 if (top_v == NULL) { 1075 if (top_v == NULL) {
1076 printk("%s: Could not allocate memory for host buffers " 1076 printk("%s: Could not allocate memory for host buffers "
1077 "and structures!\n", print_name); 1077 "and structures!\n", print_name);
1078 return(DFX_K_FAILURE); 1078 return DFX_K_FAILURE;
1079 } 1079 }
1080 memset(top_v, 0, alloc_size); /* zero out memory before continuing */ 1080 memset(top_v, 0, alloc_size); /* zero out memory before continuing */
1081 top_p = bp->kmalloced_dma; /* get physical address of buffer */ 1081 top_p = bp->kmalloced_dma; /* get physical address of buffer */
@@ -1145,7 +1145,7 @@ static int __devinit dfx_driver_init(struct net_device *dev,
1145 DBG_printk("%s: Consumer block virt = %0lX, phys = %0X\n", 1145 DBG_printk("%s: Consumer block virt = %0lX, phys = %0X\n",
1146 print_name, (long)bp->cons_block_virt, bp->cons_block_phys); 1146 print_name, (long)bp->cons_block_virt, bp->cons_block_phys);
1147 1147
1148 return(DFX_K_SUCCESS); 1148 return DFX_K_SUCCESS;
1149} 1149}
1150 1150
1151 1151
@@ -1195,7 +1195,7 @@ static int dfx_adap_init(DFX_board_t *bp, int get_buffers)
1195 if (dfx_hw_dma_uninit(bp, bp->reset_type) != DFX_K_SUCCESS) 1195 if (dfx_hw_dma_uninit(bp, bp->reset_type) != DFX_K_SUCCESS)
1196 { 1196 {
1197 printk("%s: Could not uninitialize/reset adapter!\n", bp->dev->name); 1197 printk("%s: Could not uninitialize/reset adapter!\n", bp->dev->name);
1198 return(DFX_K_FAILURE); 1198 return DFX_K_FAILURE;
1199 } 1199 }
1200 1200
1201 /* 1201 /*
@@ -1229,7 +1229,7 @@ static int dfx_adap_init(DFX_board_t *bp, int get_buffers)
1229 NULL) != DFX_K_SUCCESS) 1229 NULL) != DFX_K_SUCCESS)
1230 { 1230 {
1231 printk("%s: Could not set adapter burst size!\n", bp->dev->name); 1231 printk("%s: Could not set adapter burst size!\n", bp->dev->name);
1232 return(DFX_K_FAILURE); 1232 return DFX_K_FAILURE;
1233 } 1233 }
1234 1234
1235 /* 1235 /*
@@ -1246,7 +1246,7 @@ static int dfx_adap_init(DFX_board_t *bp, int get_buffers)
1246 NULL) != DFX_K_SUCCESS) 1246 NULL) != DFX_K_SUCCESS)
1247 { 1247 {
1248 printk("%s: Could not set consumer block address!\n", bp->dev->name); 1248 printk("%s: Could not set consumer block address!\n", bp->dev->name);
1249 return(DFX_K_FAILURE); 1249 return DFX_K_FAILURE;
1250 } 1250 }
1251 1251
1252 /* 1252 /*
@@ -1278,7 +1278,7 @@ static int dfx_adap_init(DFX_board_t *bp, int get_buffers)
1278 if (dfx_hw_dma_cmd_req(bp) != DFX_K_SUCCESS) 1278 if (dfx_hw_dma_cmd_req(bp) != DFX_K_SUCCESS)
1279 { 1279 {
1280 printk("%s: DMA command request failed!\n", bp->dev->name); 1280 printk("%s: DMA command request failed!\n", bp->dev->name);
1281 return(DFX_K_FAILURE); 1281 return DFX_K_FAILURE;
1282 } 1282 }
1283 1283
1284 /* Set the initial values for eFDXEnable and MACTReq MIB objects */ 1284 /* Set the initial values for eFDXEnable and MACTReq MIB objects */
@@ -1294,7 +1294,7 @@ static int dfx_adap_init(DFX_board_t *bp, int get_buffers)
1294 if (dfx_hw_dma_cmd_req(bp) != DFX_K_SUCCESS) 1294 if (dfx_hw_dma_cmd_req(bp) != DFX_K_SUCCESS)
1295 { 1295 {
1296 printk("%s: DMA command request failed!\n", bp->dev->name); 1296 printk("%s: DMA command request failed!\n", bp->dev->name);
1297 return(DFX_K_FAILURE); 1297 return DFX_K_FAILURE;
1298 } 1298 }
1299 1299
1300 /* Initialize adapter CAM */ 1300 /* Initialize adapter CAM */
@@ -1302,7 +1302,7 @@ static int dfx_adap_init(DFX_board_t *bp, int get_buffers)
1302 if (dfx_ctl_update_cam(bp) != DFX_K_SUCCESS) 1302 if (dfx_ctl_update_cam(bp) != DFX_K_SUCCESS)
1303 { 1303 {
1304 printk("%s: Adapter CAM update failed!\n", bp->dev->name); 1304 printk("%s: Adapter CAM update failed!\n", bp->dev->name);
1305 return(DFX_K_FAILURE); 1305 return DFX_K_FAILURE;
1306 } 1306 }
1307 1307
1308 /* Initialize adapter filters */ 1308 /* Initialize adapter filters */
@@ -1310,7 +1310,7 @@ static int dfx_adap_init(DFX_board_t *bp, int get_buffers)
1310 if (dfx_ctl_update_filters(bp) != DFX_K_SUCCESS) 1310 if (dfx_ctl_update_filters(bp) != DFX_K_SUCCESS)
1311 { 1311 {
1312 printk("%s: Adapter filters update failed!\n", bp->dev->name); 1312 printk("%s: Adapter filters update failed!\n", bp->dev->name);
1313 return(DFX_K_FAILURE); 1313 return DFX_K_FAILURE;
1314 } 1314 }
1315 1315
1316 /* 1316 /*
@@ -1328,7 +1328,7 @@ static int dfx_adap_init(DFX_board_t *bp, int get_buffers)
1328 printk("%s: Receive buffer allocation failed\n", bp->dev->name); 1328 printk("%s: Receive buffer allocation failed\n", bp->dev->name);
1329 if (get_buffers) 1329 if (get_buffers)
1330 dfx_rcv_flush(bp); 1330 dfx_rcv_flush(bp);
1331 return(DFX_K_FAILURE); 1331 return DFX_K_FAILURE;
1332 } 1332 }
1333 1333
1334 /* Issue START command and bring adapter to LINK_(UN)AVAILABLE state */ 1334 /* Issue START command and bring adapter to LINK_(UN)AVAILABLE state */
@@ -1339,13 +1339,13 @@ static int dfx_adap_init(DFX_board_t *bp, int get_buffers)
1339 printk("%s: Start command failed\n", bp->dev->name); 1339 printk("%s: Start command failed\n", bp->dev->name);
1340 if (get_buffers) 1340 if (get_buffers)
1341 dfx_rcv_flush(bp); 1341 dfx_rcv_flush(bp);
1342 return(DFX_K_FAILURE); 1342 return DFX_K_FAILURE;
1343 } 1343 }
1344 1344
1345 /* Initialization succeeded, reenable PDQ interrupts */ 1345 /* Initialization succeeded, reenable PDQ interrupts */
1346 1346
1347 dfx_port_write_long(bp, PI_PDQ_K_REG_HOST_INT_ENB, PI_HOST_INT_K_ENABLE_DEF_INTS); 1347 dfx_port_write_long(bp, PI_PDQ_K_REG_HOST_INT_ENB, PI_HOST_INT_K_ENABLE_DEF_INTS);
1348 return(DFX_K_SUCCESS); 1348 return DFX_K_SUCCESS;
1349 } 1349 }
1350 1350
1351 1351
@@ -1434,7 +1434,7 @@ static int dfx_open(struct net_device *dev)
1434 1434
1435 /* Set device structure info */ 1435 /* Set device structure info */
1436 netif_start_queue(dev); 1436 netif_start_queue(dev);
1437 return(0); 1437 return 0;
1438} 1438}
1439 1439
1440 1440
@@ -1526,7 +1526,7 @@ static int dfx_close(struct net_device *dev)
1526 1526
1527 free_irq(dev->irq, dev); 1527 free_irq(dev->irq, dev);
1528 1528
1529 return(0); 1529 return 0;
1530} 1530}
1531 1531
1532 1532
@@ -2027,7 +2027,7 @@ static struct net_device_stats *dfx_ctl_get_stats(struct net_device *dev)
2027 2027
2028 bp->cmd_req_virt->cmd_type = PI_CMD_K_SMT_MIB_GET; 2028 bp->cmd_req_virt->cmd_type = PI_CMD_K_SMT_MIB_GET;
2029 if (dfx_hw_dma_cmd_req(bp) != DFX_K_SUCCESS) 2029 if (dfx_hw_dma_cmd_req(bp) != DFX_K_SUCCESS)
2030 return((struct net_device_stats *) &bp->stats); 2030 return (struct net_device_stats *)&bp->stats;
2031 2031
2032 /* Fill the bp->stats structure with the SMT MIB object values */ 2032 /* Fill the bp->stats structure with the SMT MIB object values */
2033 2033
@@ -2128,7 +2128,7 @@ static struct net_device_stats *dfx_ctl_get_stats(struct net_device *dev)
2128 2128
2129 bp->cmd_req_virt->cmd_type = PI_CMD_K_CNTRS_GET; 2129 bp->cmd_req_virt->cmd_type = PI_CMD_K_CNTRS_GET;
2130 if (dfx_hw_dma_cmd_req(bp) != DFX_K_SUCCESS) 2130 if (dfx_hw_dma_cmd_req(bp) != DFX_K_SUCCESS)
2131 return((struct net_device_stats *) &bp->stats); 2131 return (struct net_device_stats *)&bp->stats;
2132 2132
2133 /* Fill the bp->stats structure with the FDDI counter values */ 2133 /* Fill the bp->stats structure with the FDDI counter values */
2134 2134
@@ -2144,7 +2144,7 @@ static struct net_device_stats *dfx_ctl_get_stats(struct net_device *dev)
2144 bp->stats.port_lem_cts[0] = bp->cmd_rsp_virt->cntrs_get.cntrs.link_errors[0].ls; 2144 bp->stats.port_lem_cts[0] = bp->cmd_rsp_virt->cntrs_get.cntrs.link_errors[0].ls;
2145 bp->stats.port_lem_cts[1] = bp->cmd_rsp_virt->cntrs_get.cntrs.link_errors[1].ls; 2145 bp->stats.port_lem_cts[1] = bp->cmd_rsp_virt->cntrs_get.cntrs.link_errors[1].ls;
2146 2146
2147 return((struct net_device_stats *) &bp->stats); 2147 return (struct net_device_stats *)&bp->stats;
2148 } 2148 }
2149 2149
2150 2150
@@ -2354,7 +2354,7 @@ static int dfx_ctl_set_mac_address(struct net_device *dev, void *addr)
2354 { 2354 {
2355 DBG_printk("%s: Adapter CAM updated with new MAC address\n", dev->name); 2355 DBG_printk("%s: Adapter CAM updated with new MAC address\n", dev->name);
2356 } 2356 }
2357 return(0); /* always return zero */ 2357 return 0; /* always return zero */
2358 } 2358 }
2359 2359
2360 2360
@@ -2438,8 +2438,8 @@ static int dfx_ctl_update_cam(DFX_board_t *bp)
2438 /* Issue command to update adapter CAM, then return */ 2438 /* Issue command to update adapter CAM, then return */
2439 2439
2440 if (dfx_hw_dma_cmd_req(bp) != DFX_K_SUCCESS) 2440 if (dfx_hw_dma_cmd_req(bp) != DFX_K_SUCCESS)
2441 return(DFX_K_FAILURE); 2441 return DFX_K_FAILURE;
2442 return(DFX_K_SUCCESS); 2442 return DFX_K_SUCCESS;
2443 } 2443 }
2444 2444
2445 2445
@@ -2504,8 +2504,8 @@ static int dfx_ctl_update_filters(DFX_board_t *bp)
2504 /* Issue command to update adapter filters, then return */ 2504 /* Issue command to update adapter filters, then return */
2505 2505
2506 if (dfx_hw_dma_cmd_req(bp) != DFX_K_SUCCESS) 2506 if (dfx_hw_dma_cmd_req(bp) != DFX_K_SUCCESS)
2507 return(DFX_K_FAILURE); 2507 return DFX_K_FAILURE;
2508 return(DFX_K_SUCCESS); 2508 return DFX_K_SUCCESS;
2509 } 2509 }
2510 2510
2511 2511
@@ -2561,7 +2561,7 @@ static int dfx_hw_dma_cmd_req(DFX_board_t *bp)
2561 (status == PI_STATE_K_HALTED) || 2561 (status == PI_STATE_K_HALTED) ||
2562 (status == PI_STATE_K_DMA_UNAVAIL) || 2562 (status == PI_STATE_K_DMA_UNAVAIL) ||
2563 (status == PI_STATE_K_UPGRADE)) 2563 (status == PI_STATE_K_UPGRADE))
2564 return(DFX_K_OUTSTATE); 2564 return DFX_K_OUTSTATE;
2565 2565
2566 /* Put response buffer on the command response queue */ 2566 /* Put response buffer on the command response queue */
2567 2567
@@ -2599,7 +2599,7 @@ static int dfx_hw_dma_cmd_req(DFX_board_t *bp)
2599 udelay(100); /* wait for 100 microseconds */ 2599 udelay(100); /* wait for 100 microseconds */
2600 } 2600 }
2601 if (timeout_cnt == 0) 2601 if (timeout_cnt == 0)
2602 return(DFX_K_HW_TIMEOUT); 2602 return DFX_K_HW_TIMEOUT;
2603 2603
2604 /* Bump (and wrap) the completion index and write out to register */ 2604 /* Bump (and wrap) the completion index and write out to register */
2605 2605
@@ -2619,14 +2619,14 @@ static int dfx_hw_dma_cmd_req(DFX_board_t *bp)
2619 udelay(100); /* wait for 100 microseconds */ 2619 udelay(100); /* wait for 100 microseconds */
2620 } 2620 }
2621 if (timeout_cnt == 0) 2621 if (timeout_cnt == 0)
2622 return(DFX_K_HW_TIMEOUT); 2622 return DFX_K_HW_TIMEOUT;
2623 2623
2624 /* Bump (and wrap) the completion index and write out to register */ 2624 /* Bump (and wrap) the completion index and write out to register */
2625 2625
2626 bp->cmd_rsp_reg.index.comp += 1; 2626 bp->cmd_rsp_reg.index.comp += 1;
2627 bp->cmd_rsp_reg.index.comp &= PI_CMD_RSP_K_NUM_ENTRIES-1; 2627 bp->cmd_rsp_reg.index.comp &= PI_CMD_RSP_K_NUM_ENTRIES-1;
2628 dfx_port_write_long(bp, PI_PDQ_K_REG_CMD_RSP_PROD, bp->cmd_rsp_reg.lword); 2628 dfx_port_write_long(bp, PI_PDQ_K_REG_CMD_RSP_PROD, bp->cmd_rsp_reg.lword);
2629 return(DFX_K_SUCCESS); 2629 return DFX_K_SUCCESS;
2630 } 2630 }
2631 2631
2632 2632
@@ -2700,7 +2700,7 @@ static int dfx_hw_port_ctrl_req(
2700 udelay(100); /* wait for 100 microseconds */ 2700 udelay(100); /* wait for 100 microseconds */
2701 } 2701 }
2702 if (timeout_cnt == 0) 2702 if (timeout_cnt == 0)
2703 return(DFX_K_HW_TIMEOUT); 2703 return DFX_K_HW_TIMEOUT;
2704 2704
2705 /* 2705 /*
2706 * If the address of host_data is non-zero, assume caller has supplied a 2706 * If the address of host_data is non-zero, assume caller has supplied a
@@ -2710,7 +2710,7 @@ static int dfx_hw_port_ctrl_req(
2710 2710
2711 if (host_data != NULL) 2711 if (host_data != NULL)
2712 dfx_port_read_long(bp, PI_PDQ_K_REG_HOST_DATA, host_data); 2712 dfx_port_read_long(bp, PI_PDQ_K_REG_HOST_DATA, host_data);
2713 return(DFX_K_SUCCESS); 2713 return DFX_K_SUCCESS;
2714 } 2714 }
2715 2715
2716 2716
@@ -2800,7 +2800,7 @@ static int dfx_hw_adap_state_rd(DFX_board_t *bp)
2800 PI_UINT32 port_status; /* Port Status register value */ 2800 PI_UINT32 port_status; /* Port Status register value */
2801 2801
2802 dfx_port_read_long(bp, PI_PDQ_K_REG_PORT_STATUS, &port_status); 2802 dfx_port_read_long(bp, PI_PDQ_K_REG_PORT_STATUS, &port_status);
2803 return((port_status & PI_PSTATUS_M_STATE) >> PI_PSTATUS_V_STATE); 2803 return (port_status & PI_PSTATUS_M_STATE) >> PI_PSTATUS_V_STATE;
2804 } 2804 }
2805 2805
2806 2806
@@ -2852,8 +2852,8 @@ static int dfx_hw_dma_uninit(DFX_board_t *bp, PI_UINT32 type)
2852 udelay(100); /* wait for 100 microseconds */ 2852 udelay(100); /* wait for 100 microseconds */
2853 } 2853 }
2854 if (timeout_cnt == 0) 2854 if (timeout_cnt == 0)
2855 return(DFX_K_HW_TIMEOUT); 2855 return DFX_K_HW_TIMEOUT;
2856 return(DFX_K_SUCCESS); 2856 return DFX_K_SUCCESS;
2857 } 2857 }
2858 2858
2859/* 2859/*
diff --git a/drivers/net/dnet.c b/drivers/net/dnet.c
index 7c075756611a..9d8a20b72fa9 100644
--- a/drivers/net/dnet.c
+++ b/drivers/net/dnet.c
@@ -27,7 +27,7 @@
27#undef DEBUG 27#undef DEBUG
28 28
29/* function for reading internal MAC register */ 29/* function for reading internal MAC register */
30u16 dnet_readw_mac(struct dnet *bp, u16 reg) 30static u16 dnet_readw_mac(struct dnet *bp, u16 reg)
31{ 31{
32 u16 data_read; 32 u16 data_read;
33 33
@@ -46,7 +46,7 @@ u16 dnet_readw_mac(struct dnet *bp, u16 reg)
46} 46}
47 47
48/* function for writing internal MAC register */ 48/* function for writing internal MAC register */
49void dnet_writew_mac(struct dnet *bp, u16 reg, u16 val) 49static void dnet_writew_mac(struct dnet *bp, u16 reg, u16 val)
50{ 50{
51 /* load data to write */ 51 /* load data to write */
52 dnet_writel(bp, val, MACREG_DATA); 52 dnet_writel(bp, val, MACREG_DATA);
@@ -63,11 +63,11 @@ static void __dnet_set_hwaddr(struct dnet *bp)
63{ 63{
64 u16 tmp; 64 u16 tmp;
65 65
66 tmp = cpu_to_be16(*((u16 *) bp->dev->dev_addr)); 66 tmp = be16_to_cpup((__be16 *)bp->dev->dev_addr);
67 dnet_writew_mac(bp, DNET_INTERNAL_MAC_ADDR_0_REG, tmp); 67 dnet_writew_mac(bp, DNET_INTERNAL_MAC_ADDR_0_REG, tmp);
68 tmp = cpu_to_be16(*((u16 *) (bp->dev->dev_addr + 2))); 68 tmp = be16_to_cpup((__be16 *)(bp->dev->dev_addr + 2));
69 dnet_writew_mac(bp, DNET_INTERNAL_MAC_ADDR_1_REG, tmp); 69 dnet_writew_mac(bp, DNET_INTERNAL_MAC_ADDR_1_REG, tmp);
70 tmp = cpu_to_be16(*((u16 *) (bp->dev->dev_addr + 4))); 70 tmp = be16_to_cpup((__be16 *)(bp->dev->dev_addr + 4));
71 dnet_writew_mac(bp, DNET_INTERNAL_MAC_ADDR_2_REG, tmp); 71 dnet_writew_mac(bp, DNET_INTERNAL_MAC_ADDR_2_REG, tmp);
72} 72}
73 73
@@ -89,11 +89,11 @@ static void __devinit dnet_get_hwaddr(struct dnet *bp)
89 * Mac_addr[15:0]). 89 * Mac_addr[15:0]).
90 */ 90 */
91 tmp = dnet_readw_mac(bp, DNET_INTERNAL_MAC_ADDR_0_REG); 91 tmp = dnet_readw_mac(bp, DNET_INTERNAL_MAC_ADDR_0_REG);
92 *((u16 *) addr) = be16_to_cpu(tmp); 92 *((__be16 *)addr) = cpu_to_be16(tmp);
93 tmp = dnet_readw_mac(bp, DNET_INTERNAL_MAC_ADDR_1_REG); 93 tmp = dnet_readw_mac(bp, DNET_INTERNAL_MAC_ADDR_1_REG);
94 *((u16 *) (addr + 2)) = be16_to_cpu(tmp); 94 *((__be16 *)(addr + 2)) = cpu_to_be16(tmp);
95 tmp = dnet_readw_mac(bp, DNET_INTERNAL_MAC_ADDR_2_REG); 95 tmp = dnet_readw_mac(bp, DNET_INTERNAL_MAC_ADDR_2_REG);
96 *((u16 *) (addr + 4)) = be16_to_cpu(tmp); 96 *((__be16 *)(addr + 4)) = cpu_to_be16(tmp);
97 97
98 if (is_valid_ether_addr(addr)) 98 if (is_valid_ether_addr(addr))
99 memcpy(bp->dev->dev_addr, addr, sizeof(addr)); 99 memcpy(bp->dev->dev_addr, addr, sizeof(addr));
@@ -361,7 +361,7 @@ err_out:
361} 361}
362 362
363/* For Neptune board: LINK1000 as Link LED and TX as activity LED */ 363/* For Neptune board: LINK1000 as Link LED and TX as activity LED */
364int dnet_phy_marvell_fixup(struct phy_device *phydev) 364static int dnet_phy_marvell_fixup(struct phy_device *phydev)
365{ 365{
366 return phy_write(phydev, 0x18, 0x4148); 366 return phy_write(phydev, 0x18, 0x4148);
367} 367}
diff --git a/drivers/net/dummy.c b/drivers/net/dummy.c
index 37dcfdc63456..ff2d29b17858 100644
--- a/drivers/net/dummy.c
+++ b/drivers/net/dummy.c
@@ -36,6 +36,7 @@
36#include <linux/moduleparam.h> 36#include <linux/moduleparam.h>
37#include <linux/rtnetlink.h> 37#include <linux/rtnetlink.h>
38#include <net/rtnetlink.h> 38#include <net/rtnetlink.h>
39#include <linux/u64_stats_sync.h>
39 40
40static int numdummies = 1; 41static int numdummies = 1;
41 42
@@ -55,21 +56,69 @@ static void set_multicast_list(struct net_device *dev)
55{ 56{
56} 57}
57 58
59struct pcpu_dstats {
60 u64 tx_packets;
61 u64 tx_bytes;
62 struct u64_stats_sync syncp;
63};
64
65static struct rtnl_link_stats64 *dummy_get_stats64(struct net_device *dev,
66 struct rtnl_link_stats64 *stats)
67{
68 int i;
69
70 for_each_possible_cpu(i) {
71 const struct pcpu_dstats *dstats;
72 u64 tbytes, tpackets;
73 unsigned int start;
74
75 dstats = per_cpu_ptr(dev->dstats, i);
76 do {
77 start = u64_stats_fetch_begin(&dstats->syncp);
78 tbytes = dstats->tx_bytes;
79 tpackets = dstats->tx_packets;
80 } while (u64_stats_fetch_retry(&dstats->syncp, start));
81 stats->tx_bytes += tbytes;
82 stats->tx_packets += tpackets;
83 }
84 return stats;
85}
58 86
59static netdev_tx_t dummy_xmit(struct sk_buff *skb, struct net_device *dev) 87static netdev_tx_t dummy_xmit(struct sk_buff *skb, struct net_device *dev)
60{ 88{
61 dev->stats.tx_packets++; 89 struct pcpu_dstats *dstats = this_cpu_ptr(dev->dstats);
62 dev->stats.tx_bytes += skb->len; 90
91 u64_stats_update_begin(&dstats->syncp);
92 dstats->tx_packets++;
93 dstats->tx_bytes += skb->len;
94 u64_stats_update_end(&dstats->syncp);
63 95
64 dev_kfree_skb(skb); 96 dev_kfree_skb(skb);
65 return NETDEV_TX_OK; 97 return NETDEV_TX_OK;
66} 98}
67 99
100static int dummy_dev_init(struct net_device *dev)
101{
102 dev->dstats = alloc_percpu(struct pcpu_dstats);
103 if (!dev->dstats)
104 return -ENOMEM;
105
106 return 0;
107}
108
109static void dummy_dev_free(struct net_device *dev)
110{
111 free_percpu(dev->dstats);
112 free_netdev(dev);
113}
114
68static const struct net_device_ops dummy_netdev_ops = { 115static const struct net_device_ops dummy_netdev_ops = {
116 .ndo_init = dummy_dev_init,
69 .ndo_start_xmit = dummy_xmit, 117 .ndo_start_xmit = dummy_xmit,
70 .ndo_validate_addr = eth_validate_addr, 118 .ndo_validate_addr = eth_validate_addr,
71 .ndo_set_multicast_list = set_multicast_list, 119 .ndo_set_multicast_list = set_multicast_list,
72 .ndo_set_mac_address = dummy_set_address, 120 .ndo_set_mac_address = dummy_set_address,
121 .ndo_get_stats64 = dummy_get_stats64,
73}; 122};
74 123
75static void dummy_setup(struct net_device *dev) 124static void dummy_setup(struct net_device *dev)
@@ -78,14 +127,17 @@ static void dummy_setup(struct net_device *dev)
78 127
79 /* Initialize the device structure. */ 128 /* Initialize the device structure. */
80 dev->netdev_ops = &dummy_netdev_ops; 129 dev->netdev_ops = &dummy_netdev_ops;
81 dev->destructor = free_netdev; 130 dev->destructor = dummy_dev_free;
82 131
83 /* Fill in device structure with ethernet-generic values. */ 132 /* Fill in device structure with ethernet-generic values. */
84 dev->tx_queue_len = 0; 133 dev->tx_queue_len = 0;
85 dev->flags |= IFF_NOARP; 134 dev->flags |= IFF_NOARP;
86 dev->flags &= ~IFF_MULTICAST; 135 dev->flags &= ~IFF_MULTICAST;
136 dev->features |= NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_TSO;
137 dev->features |= NETIF_F_NO_CSUM | NETIF_F_HIGHDMA | NETIF_F_LLTX;
87 random_ether_addr(dev->dev_addr); 138 random_ether_addr(dev->dev_addr);
88} 139}
140
89static int dummy_validate(struct nlattr *tb[], struct nlattr *data[]) 141static int dummy_validate(struct nlattr *tb[], struct nlattr *data[])
90{ 142{
91 if (tb[IFLA_ADDRESS]) { 143 if (tb[IFLA_ADDRESS]) {
diff --git a/drivers/net/e100.c b/drivers/net/e100.c
index 8e2eab4e7c75..b0aa9e68990a 100644
--- a/drivers/net/e100.c
+++ b/drivers/net/e100.c
@@ -2215,10 +2215,10 @@ static int e100_change_mtu(struct net_device *netdev, int new_mtu)
2215static int e100_asf(struct nic *nic) 2215static int e100_asf(struct nic *nic)
2216{ 2216{
2217 /* ASF can be enabled from eeprom */ 2217 /* ASF can be enabled from eeprom */
2218 return((nic->pdev->device >= 0x1050) && (nic->pdev->device <= 0x1057) && 2218 return (nic->pdev->device >= 0x1050) && (nic->pdev->device <= 0x1057) &&
2219 (nic->eeprom[eeprom_config_asf] & eeprom_asf) && 2219 (nic->eeprom[eeprom_config_asf] & eeprom_asf) &&
2220 !(nic->eeprom[eeprom_config_asf] & eeprom_gcl) && 2220 !(nic->eeprom[eeprom_config_asf] & eeprom_gcl) &&
2221 ((nic->eeprom[eeprom_smbus_addr] & 0xFF) != 0xFE)); 2221 ((nic->eeprom[eeprom_smbus_addr] & 0xFF) != 0xFE);
2222} 2222}
2223 2223
2224static int e100_up(struct nic *nic) 2224static int e100_up(struct nic *nic)
diff --git a/drivers/net/e1000/e1000.h b/drivers/net/e1000/e1000.h
index 99288b95aead..a881dd0093bd 100644
--- a/drivers/net/e1000/e1000.h
+++ b/drivers/net/e1000/e1000.h
@@ -310,6 +310,9 @@ struct e1000_adapter {
310 int need_ioport; 310 int need_ioport;
311 311
312 bool discarding; 312 bool discarding;
313
314 struct work_struct fifo_stall_task;
315 struct work_struct phy_info_task;
313}; 316};
314 317
315enum e1000_state_t { 318enum e1000_state_t {
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
index 8d9269d12a67..a117f2a0252e 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/e1000/e1000_main.c
@@ -123,8 +123,10 @@ static void e1000_clean_rx_ring(struct e1000_adapter *adapter,
123 struct e1000_rx_ring *rx_ring); 123 struct e1000_rx_ring *rx_ring);
124static void e1000_set_rx_mode(struct net_device *netdev); 124static void e1000_set_rx_mode(struct net_device *netdev);
125static void e1000_update_phy_info(unsigned long data); 125static void e1000_update_phy_info(unsigned long data);
126static void e1000_update_phy_info_task(struct work_struct *work);
126static void e1000_watchdog(unsigned long data); 127static void e1000_watchdog(unsigned long data);
127static void e1000_82547_tx_fifo_stall(unsigned long data); 128static void e1000_82547_tx_fifo_stall(unsigned long data);
129static void e1000_82547_tx_fifo_stall_task(struct work_struct *work);
128static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, 130static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
129 struct net_device *netdev); 131 struct net_device *netdev);
130static struct net_device_stats * e1000_get_stats(struct net_device *netdev); 132static struct net_device_stats * e1000_get_stats(struct net_device *netdev);
@@ -519,8 +521,21 @@ void e1000_down(struct e1000_adapter *adapter)
519 e1000_clean_all_rx_rings(adapter); 521 e1000_clean_all_rx_rings(adapter);
520} 522}
521 523
524void e1000_reinit_safe(struct e1000_adapter *adapter)
525{
526 while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
527 msleep(1);
528 rtnl_lock();
529 e1000_down(adapter);
530 e1000_up(adapter);
531 rtnl_unlock();
532 clear_bit(__E1000_RESETTING, &adapter->flags);
533}
534
522void e1000_reinit_locked(struct e1000_adapter *adapter) 535void e1000_reinit_locked(struct e1000_adapter *adapter)
523{ 536{
537 /* if rtnl_lock is not held the call path is bogus */
538 ASSERT_RTNL();
524 WARN_ON(in_interrupt()); 539 WARN_ON(in_interrupt());
525 while (test_and_set_bit(__E1000_RESETTING, &adapter->flags)) 540 while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
526 msleep(1); 541 msleep(1);
@@ -988,8 +1003,10 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
988 (hw->mac_type != e1000_82547)) 1003 (hw->mac_type != e1000_82547))
989 netdev->features |= NETIF_F_TSO; 1004 netdev->features |= NETIF_F_TSO;
990 1005
991 if (pci_using_dac) 1006 if (pci_using_dac) {
992 netdev->features |= NETIF_F_HIGHDMA; 1007 netdev->features |= NETIF_F_HIGHDMA;
1008 netdev->vlan_features |= NETIF_F_HIGHDMA;
1009 }
993 1010
994 netdev->vlan_features |= NETIF_F_TSO; 1011 netdev->vlan_features |= NETIF_F_TSO;
995 netdev->vlan_features |= NETIF_F_HW_CSUM; 1012 netdev->vlan_features |= NETIF_F_HW_CSUM;
@@ -1045,7 +1062,9 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
1045 adapter->phy_info_timer.function = e1000_update_phy_info; 1062 adapter->phy_info_timer.function = e1000_update_phy_info;
1046 adapter->phy_info_timer.data = (unsigned long)adapter; 1063 adapter->phy_info_timer.data = (unsigned long)adapter;
1047 1064
1065 INIT_WORK(&adapter->fifo_stall_task, e1000_82547_tx_fifo_stall_task);
1048 INIT_WORK(&adapter->reset_task, e1000_reset_task); 1066 INIT_WORK(&adapter->reset_task, e1000_reset_task);
1067 INIT_WORK(&adapter->phy_info_task, e1000_update_phy_info_task);
1049 1068
1050 e1000_check_options(adapter); 1069 e1000_check_options(adapter);
1051 1070
@@ -2232,22 +2251,45 @@ static void e1000_set_rx_mode(struct net_device *netdev)
2232static void e1000_update_phy_info(unsigned long data) 2251static void e1000_update_phy_info(unsigned long data)
2233{ 2252{
2234 struct e1000_adapter *adapter = (struct e1000_adapter *)data; 2253 struct e1000_adapter *adapter = (struct e1000_adapter *)data;
2254 schedule_work(&adapter->phy_info_task);
2255}
2256
2257static void e1000_update_phy_info_task(struct work_struct *work)
2258{
2259 struct e1000_adapter *adapter = container_of(work,
2260 struct e1000_adapter,
2261 phy_info_task);
2235 struct e1000_hw *hw = &adapter->hw; 2262 struct e1000_hw *hw = &adapter->hw;
2263
2264 rtnl_lock();
2236 e1000_phy_get_info(hw, &adapter->phy_info); 2265 e1000_phy_get_info(hw, &adapter->phy_info);
2266 rtnl_unlock();
2237} 2267}
2238 2268
2239/** 2269/**
2240 * e1000_82547_tx_fifo_stall - Timer Call-back 2270 * e1000_82547_tx_fifo_stall - Timer Call-back
2241 * @data: pointer to adapter cast into an unsigned long 2271 * @data: pointer to adapter cast into an unsigned long
2242 **/ 2272 **/
2243
2244static void e1000_82547_tx_fifo_stall(unsigned long data) 2273static void e1000_82547_tx_fifo_stall(unsigned long data)
2245{ 2274{
2246 struct e1000_adapter *adapter = (struct e1000_adapter *)data; 2275 struct e1000_adapter *adapter = (struct e1000_adapter *)data;
2276 schedule_work(&adapter->fifo_stall_task);
2277}
2278
2279/**
2280 * e1000_82547_tx_fifo_stall_task - task to complete work
2281 * @work: work struct contained inside adapter struct
2282 **/
2283static void e1000_82547_tx_fifo_stall_task(struct work_struct *work)
2284{
2285 struct e1000_adapter *adapter = container_of(work,
2286 struct e1000_adapter,
2287 fifo_stall_task);
2247 struct e1000_hw *hw = &adapter->hw; 2288 struct e1000_hw *hw = &adapter->hw;
2248 struct net_device *netdev = adapter->netdev; 2289 struct net_device *netdev = adapter->netdev;
2249 u32 tctl; 2290 u32 tctl;
2250 2291
2292 rtnl_lock();
2251 if (atomic_read(&adapter->tx_fifo_stall)) { 2293 if (atomic_read(&adapter->tx_fifo_stall)) {
2252 if ((er32(TDT) == er32(TDH)) && 2294 if ((er32(TDT) == er32(TDH)) &&
2253 (er32(TDFT) == er32(TDFH)) && 2295 (er32(TDFT) == er32(TDFH)) &&
@@ -2268,6 +2310,7 @@ static void e1000_82547_tx_fifo_stall(unsigned long data)
2268 mod_timer(&adapter->tx_fifo_stall_timer, jiffies + 1); 2310 mod_timer(&adapter->tx_fifo_stall_timer, jiffies + 1);
2269 } 2311 }
2270 } 2312 }
2313 rtnl_unlock();
2271} 2314}
2272 2315
2273bool e1000_has_link(struct e1000_adapter *adapter) 2316bool e1000_has_link(struct e1000_adapter *adapter)
@@ -3076,7 +3119,7 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
3076 } 3119 }
3077 } 3120 }
3078 3121
3079 if (unlikely(adapter->vlgrp && vlan_tx_tag_present(skb))) { 3122 if (unlikely(vlan_tx_tag_present(skb))) {
3080 tx_flags |= E1000_TX_FLAGS_VLAN; 3123 tx_flags |= E1000_TX_FLAGS_VLAN;
3081 tx_flags |= (vlan_tx_tag_get(skb) << E1000_TX_FLAGS_VLAN_SHIFT); 3124 tx_flags |= (vlan_tx_tag_get(skb) << E1000_TX_FLAGS_VLAN_SHIFT);
3082 } 3125 }
@@ -3135,7 +3178,7 @@ static void e1000_reset_task(struct work_struct *work)
3135 struct e1000_adapter *adapter = 3178 struct e1000_adapter *adapter =
3136 container_of(work, struct e1000_adapter, reset_task); 3179 container_of(work, struct e1000_adapter, reset_task);
3137 3180
3138 e1000_reinit_locked(adapter); 3181 e1000_reinit_safe(adapter);
3139} 3182}
3140 3183
3141/** 3184/**
@@ -3557,7 +3600,7 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
3557 adapter->total_tx_packets += total_tx_packets; 3600 adapter->total_tx_packets += total_tx_packets;
3558 netdev->stats.tx_bytes += total_tx_bytes; 3601 netdev->stats.tx_bytes += total_tx_bytes;
3559 netdev->stats.tx_packets += total_tx_packets; 3602 netdev->stats.tx_packets += total_tx_packets;
3560 return (count < tx_ring->count); 3603 return count < tx_ring->count;
3561} 3604}
3562 3605
3563/** 3606/**
@@ -3621,13 +3664,14 @@ static void e1000_consume_page(struct e1000_buffer *bi, struct sk_buff *skb,
3621static void e1000_receive_skb(struct e1000_adapter *adapter, u8 status, 3664static void e1000_receive_skb(struct e1000_adapter *adapter, u8 status,
3622 __le16 vlan, struct sk_buff *skb) 3665 __le16 vlan, struct sk_buff *skb)
3623{ 3666{
3624 if (unlikely(adapter->vlgrp && (status & E1000_RXD_STAT_VP))) { 3667 skb->protocol = eth_type_trans(skb, adapter->netdev);
3625 vlan_hwaccel_receive_skb(skb, adapter->vlgrp, 3668
3626 le16_to_cpu(vlan) & 3669 if ((unlikely(adapter->vlgrp && (status & E1000_RXD_STAT_VP))))
3627 E1000_RXD_SPC_VLAN_MASK); 3670 vlan_gro_receive(&adapter->napi, adapter->vlgrp,
3628 } else { 3671 le16_to_cpu(vlan) & E1000_RXD_SPC_VLAN_MASK,
3629 netif_receive_skb(skb); 3672 skb);
3630 } 3673 else
3674 napi_gro_receive(&adapter->napi, skb);
3631} 3675}
3632 3676
3633/** 3677/**
@@ -3785,8 +3829,6 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
3785 goto next_desc; 3829 goto next_desc;
3786 } 3830 }
3787 3831
3788 skb->protocol = eth_type_trans(skb, netdev);
3789
3790 e1000_receive_skb(adapter, status, rx_desc->special, skb); 3832 e1000_receive_skb(adapter, status, rx_desc->special, skb);
3791 3833
3792next_desc: 3834next_desc:
@@ -3949,8 +3991,6 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
3949 ((u32)(rx_desc->errors) << 24), 3991 ((u32)(rx_desc->errors) << 24),
3950 le16_to_cpu(rx_desc->csum), skb); 3992 le16_to_cpu(rx_desc->csum), skb);
3951 3993
3952 skb->protocol = eth_type_trans(skb, netdev);
3953
3954 e1000_receive_skb(adapter, status, rx_desc->special, skb); 3994 e1000_receive_skb(adapter, status, rx_desc->special, skb);
3955 3995
3956next_desc: 3996next_desc:
@@ -4501,7 +4541,7 @@ static void e1000_restore_vlan(struct e1000_adapter *adapter)
4501 4541
4502 if (adapter->vlgrp) { 4542 if (adapter->vlgrp) {
4503 u16 vid; 4543 u16 vid;
4504 for (vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) { 4544 for (vid = 0; vid < VLAN_N_VID; vid++) {
4505 if (!vlan_group_get_device(adapter->vlgrp, vid)) 4545 if (!vlan_group_get_device(adapter->vlgrp, vid))
4506 continue; 4546 continue;
4507 e1000_vlan_rx_add_vid(adapter->netdev, vid); 4547 e1000_vlan_rx_add_vid(adapter->netdev, vid);
diff --git a/drivers/net/e1000e/82571.c b/drivers/net/e1000e/82571.c
index d3d4a57e2450..ca663f19d7df 100644
--- a/drivers/net/e1000e/82571.c
+++ b/drivers/net/e1000e/82571.c
@@ -1801,7 +1801,8 @@ struct e1000_info e1000_82571_info = {
1801 | FLAG_RESET_OVERWRITES_LAA /* errata */ 1801 | FLAG_RESET_OVERWRITES_LAA /* errata */
1802 | FLAG_TARC_SPEED_MODE_BIT /* errata */ 1802 | FLAG_TARC_SPEED_MODE_BIT /* errata */
1803 | FLAG_APME_CHECK_PORT_B, 1803 | FLAG_APME_CHECK_PORT_B,
1804 .flags2 = FLAG2_DISABLE_ASPM_L1, /* errata 13 */ 1804 .flags2 = FLAG2_DISABLE_ASPM_L1 /* errata 13 */
1805 | FLAG2_DMA_BURST,
1805 .pba = 38, 1806 .pba = 38,
1806 .max_hw_frame_size = DEFAULT_JUMBO, 1807 .max_hw_frame_size = DEFAULT_JUMBO,
1807 .get_variants = e1000_get_variants_82571, 1808 .get_variants = e1000_get_variants_82571,
@@ -1819,7 +1820,8 @@ struct e1000_info e1000_82572_info = {
1819 | FLAG_RX_CSUM_ENABLED 1820 | FLAG_RX_CSUM_ENABLED
1820 | FLAG_HAS_CTRLEXT_ON_LOAD 1821 | FLAG_HAS_CTRLEXT_ON_LOAD
1821 | FLAG_TARC_SPEED_MODE_BIT, /* errata */ 1822 | FLAG_TARC_SPEED_MODE_BIT, /* errata */
1822 .flags2 = FLAG2_DISABLE_ASPM_L1, /* errata 13 */ 1823 .flags2 = FLAG2_DISABLE_ASPM_L1 /* errata 13 */
1824 | FLAG2_DMA_BURST,
1823 .pba = 38, 1825 .pba = 38,
1824 .max_hw_frame_size = DEFAULT_JUMBO, 1826 .max_hw_frame_size = DEFAULT_JUMBO,
1825 .get_variants = e1000_get_variants_82571, 1827 .get_variants = e1000_get_variants_82571,
diff --git a/drivers/net/e1000e/defines.h b/drivers/net/e1000e/defines.h
index 93b3bedae8d2..d3f7a9c3f973 100644
--- a/drivers/net/e1000e/defines.h
+++ b/drivers/net/e1000e/defines.h
@@ -446,7 +446,9 @@
446 446
447/* Transmit Descriptor Control */ 447/* Transmit Descriptor Control */
448#define E1000_TXDCTL_PTHRESH 0x0000003F /* TXDCTL Prefetch Threshold */ 448#define E1000_TXDCTL_PTHRESH 0x0000003F /* TXDCTL Prefetch Threshold */
449#define E1000_TXDCTL_HTHRESH 0x00003F00 /* TXDCTL Host Threshold */
449#define E1000_TXDCTL_WTHRESH 0x003F0000 /* TXDCTL Writeback Threshold */ 450#define E1000_TXDCTL_WTHRESH 0x003F0000 /* TXDCTL Writeback Threshold */
451#define E1000_TXDCTL_GRAN 0x01000000 /* TXDCTL Granularity */
450#define E1000_TXDCTL_FULL_TX_DESC_WB 0x01010000 /* GRAN=1, WTHRESH=1 */ 452#define E1000_TXDCTL_FULL_TX_DESC_WB 0x01010000 /* GRAN=1, WTHRESH=1 */
451#define E1000_TXDCTL_MAX_TX_DESC_PREFETCH 0x0100001F /* GRAN=1, PTHRESH=31 */ 453#define E1000_TXDCTL_MAX_TX_DESC_PREFETCH 0x0100001F /* GRAN=1, PTHRESH=31 */
452/* Enable the counting of desc. still to be processed. */ 454/* Enable the counting of desc. still to be processed. */
diff --git a/drivers/net/e1000e/e1000.h b/drivers/net/e1000e/e1000.h
index f9a31c82f871..cee882dd67bf 100644
--- a/drivers/net/e1000e/e1000.h
+++ b/drivers/net/e1000e/e1000.h
@@ -153,6 +153,33 @@ struct e1000_info;
153/* Time to wait before putting the device into D3 if there's no link (in ms). */ 153/* Time to wait before putting the device into D3 if there's no link (in ms). */
154#define LINK_TIMEOUT 100 154#define LINK_TIMEOUT 100
155 155
156#define DEFAULT_RDTR 0
157#define DEFAULT_RADV 8
158#define BURST_RDTR 0x20
159#define BURST_RADV 0x20
160
161/*
162 * in the case of WTHRESH, it appears at least the 82571/2 hardware
163 * writes back 4 descriptors when WTHRESH=5, and 3 descriptors when
164 * WTHRESH=4, and since we want 64 bytes at a time written back, set
165 * it to 5
166 */
167#define E1000_TXDCTL_DMA_BURST_ENABLE \
168 (E1000_TXDCTL_GRAN | /* set descriptor granularity */ \
169 E1000_TXDCTL_COUNT_DESC | \
170 (5 << 16) | /* wthresh must be +1 more than desired */\
171 (1 << 8) | /* hthresh */ \
172 0x1f) /* pthresh */
173
174#define E1000_RXDCTL_DMA_BURST_ENABLE \
175 (0x01000000 | /* set descriptor granularity */ \
176 (4 << 16) | /* set writeback threshold */ \
177 (4 << 8) | /* set prefetch threshold */ \
178 0x20) /* set hthresh */
179
180#define E1000_TIDV_FPD (1 << 31)
181#define E1000_RDTR_FPD (1 << 31)
182
156enum e1000_boards { 183enum e1000_boards {
157 board_82571, 184 board_82571,
158 board_82572, 185 board_82572,
@@ -425,6 +452,8 @@ struct e1000_info {
425#define FLAG2_DISABLE_ASPM_L1 (1 << 3) 452#define FLAG2_DISABLE_ASPM_L1 (1 << 3)
426#define FLAG2_HAS_PHY_STATS (1 << 4) 453#define FLAG2_HAS_PHY_STATS (1 << 4)
427#define FLAG2_HAS_EEE (1 << 5) 454#define FLAG2_HAS_EEE (1 << 5)
455#define FLAG2_DMA_BURST (1 << 6)
456#define FLAG2_DISABLE_AIM (1 << 8)
428 457
429#define E1000_RX_DESC_PS(R, i) \ 458#define E1000_RX_DESC_PS(R, i) \
430 (&(((union e1000_rx_desc_packet_split *)((R).desc))[i])) 459 (&(((union e1000_rx_desc_packet_split *)((R).desc))[i]))
diff --git a/drivers/net/e1000e/es2lan.c b/drivers/net/e1000e/es2lan.c
index 45aebb4a6fe1..24f8ac9cf703 100644
--- a/drivers/net/e1000e/es2lan.c
+++ b/drivers/net/e1000e/es2lan.c
@@ -1494,6 +1494,7 @@ struct e1000_info e1000_es2_info = {
1494 | FLAG_APME_CHECK_PORT_B 1494 | FLAG_APME_CHECK_PORT_B
1495 | FLAG_DISABLE_FC_PAUSE_TIME /* errata */ 1495 | FLAG_DISABLE_FC_PAUSE_TIME /* errata */
1496 | FLAG_TIPG_MEDIUM_FOR_80003ESLAN, 1496 | FLAG_TIPG_MEDIUM_FOR_80003ESLAN,
1497 .flags2 = FLAG2_DMA_BURST,
1497 .pba = 38, 1498 .pba = 38,
1498 .max_hw_frame_size = DEFAULT_JUMBO, 1499 .max_hw_frame_size = DEFAULT_JUMBO,
1499 .get_variants = e1000_get_variants_80003es2lan, 1500 .get_variants = e1000_get_variants_80003es2lan,
diff --git a/drivers/net/e1000e/ethtool.c b/drivers/net/e1000e/ethtool.c
index 6355a1b779d3..8984d165a39b 100644
--- a/drivers/net/e1000e/ethtool.c
+++ b/drivers/net/e1000e/ethtool.c
@@ -368,7 +368,7 @@ out:
368static u32 e1000_get_rx_csum(struct net_device *netdev) 368static u32 e1000_get_rx_csum(struct net_device *netdev)
369{ 369{
370 struct e1000_adapter *adapter = netdev_priv(netdev); 370 struct e1000_adapter *adapter = netdev_priv(netdev);
371 return (adapter->flags & FLAG_RX_CSUM_ENABLED); 371 return adapter->flags & FLAG_RX_CSUM_ENABLED;
372} 372}
373 373
374static int e1000_set_rx_csum(struct net_device *netdev, u32 data) 374static int e1000_set_rx_csum(struct net_device *netdev, u32 data)
@@ -389,7 +389,7 @@ static int e1000_set_rx_csum(struct net_device *netdev, u32 data)
389 389
390static u32 e1000_get_tx_csum(struct net_device *netdev) 390static u32 e1000_get_tx_csum(struct net_device *netdev)
391{ 391{
392 return ((netdev->features & NETIF_F_HW_CSUM) != 0); 392 return (netdev->features & NETIF_F_HW_CSUM) != 0;
393} 393}
394 394
395static int e1000_set_tx_csum(struct net_device *netdev, u32 data) 395static int e1000_set_tx_csum(struct net_device *netdev, u32 data)
@@ -1717,13 +1717,6 @@ static void e1000_diag_test(struct net_device *netdev,
1717 1717
1718 e_info("offline testing starting\n"); 1718 e_info("offline testing starting\n");
1719 1719
1720 /*
1721 * Link test performed before hardware reset so autoneg doesn't
1722 * interfere with test result
1723 */
1724 if (e1000_link_test(adapter, &data[4]))
1725 eth_test->flags |= ETH_TEST_FL_FAILED;
1726
1727 if (if_running) 1720 if (if_running)
1728 /* indicate we're in test mode */ 1721 /* indicate we're in test mode */
1729 dev_close(netdev); 1722 dev_close(netdev);
@@ -1747,15 +1740,19 @@ static void e1000_diag_test(struct net_device *netdev,
1747 if (e1000_loopback_test(adapter, &data[3])) 1740 if (e1000_loopback_test(adapter, &data[3]))
1748 eth_test->flags |= ETH_TEST_FL_FAILED; 1741 eth_test->flags |= ETH_TEST_FL_FAILED;
1749 1742
1743 /* force this routine to wait until autoneg complete/timeout */
1744 adapter->hw.phy.autoneg_wait_to_complete = 1;
1745 e1000e_reset(adapter);
1746 adapter->hw.phy.autoneg_wait_to_complete = 0;
1747
1748 if (e1000_link_test(adapter, &data[4]))
1749 eth_test->flags |= ETH_TEST_FL_FAILED;
1750
1750 /* restore speed, duplex, autoneg settings */ 1751 /* restore speed, duplex, autoneg settings */
1751 adapter->hw.phy.autoneg_advertised = autoneg_advertised; 1752 adapter->hw.phy.autoneg_advertised = autoneg_advertised;
1752 adapter->hw.mac.forced_speed_duplex = forced_speed_duplex; 1753 adapter->hw.mac.forced_speed_duplex = forced_speed_duplex;
1753 adapter->hw.mac.autoneg = autoneg; 1754 adapter->hw.mac.autoneg = autoneg;
1754
1755 /* force this routine to wait until autoneg complete/timeout */
1756 adapter->hw.phy.autoneg_wait_to_complete = 1;
1757 e1000e_reset(adapter); 1755 e1000e_reset(adapter);
1758 adapter->hw.phy.autoneg_wait_to_complete = 0;
1759 1756
1760 clear_bit(__E1000_TESTING, &adapter->state); 1757 clear_bit(__E1000_TESTING, &adapter->state);
1761 if (if_running) 1758 if (if_running)
diff --git a/drivers/net/e1000e/hw.h b/drivers/net/e1000e/hw.h
index 66ed08f726fb..ba302a5c2c30 100644
--- a/drivers/net/e1000e/hw.h
+++ b/drivers/net/e1000e/hw.h
@@ -57,6 +57,7 @@ enum e1e_registers {
57 E1000_SCTL = 0x00024, /* SerDes Control - RW */ 57 E1000_SCTL = 0x00024, /* SerDes Control - RW */
58 E1000_FCAL = 0x00028, /* Flow Control Address Low - RW */ 58 E1000_FCAL = 0x00028, /* Flow Control Address Low - RW */
59 E1000_FCAH = 0x0002C, /* Flow Control Address High -RW */ 59 E1000_FCAH = 0x0002C, /* Flow Control Address High -RW */
60 E1000_FEXTNVM4 = 0x00024, /* Future Extended NVM 4 - RW */
60 E1000_FEXTNVM = 0x00028, /* Future Extended NVM - RW */ 61 E1000_FEXTNVM = 0x00028, /* Future Extended NVM - RW */
61 E1000_FCT = 0x00030, /* Flow Control Type - RW */ 62 E1000_FCT = 0x00030, /* Flow Control Type - RW */
62 E1000_VET = 0x00038, /* VLAN Ether Type - RW */ 63 E1000_VET = 0x00038, /* VLAN Ether Type - RW */
diff --git a/drivers/net/e1000e/ich8lan.c b/drivers/net/e1000e/ich8lan.c
index 63930d12711c..e3374d9a2472 100644
--- a/drivers/net/e1000e/ich8lan.c
+++ b/drivers/net/e1000e/ich8lan.c
@@ -105,6 +105,10 @@
105#define E1000_FEXTNVM_SW_CONFIG 1 105#define E1000_FEXTNVM_SW_CONFIG 1
106#define E1000_FEXTNVM_SW_CONFIG_ICH8M (1 << 27) /* Bit redefined for ICH8M :/ */ 106#define E1000_FEXTNVM_SW_CONFIG_ICH8M (1 << 27) /* Bit redefined for ICH8M :/ */
107 107
108#define E1000_FEXTNVM4_BEACON_DURATION_MASK 0x7
109#define E1000_FEXTNVM4_BEACON_DURATION_8USEC 0x7
110#define E1000_FEXTNVM4_BEACON_DURATION_16USEC 0x3
111
108#define PCIE_ICH8_SNOOP_ALL PCIE_NO_SNOOP_ALL 112#define PCIE_ICH8_SNOOP_ALL PCIE_NO_SNOOP_ALL
109 113
110#define E1000_ICH_RAR_ENTRIES 7 114#define E1000_ICH_RAR_ENTRIES 7
@@ -125,6 +129,7 @@
125 129
126/* SMBus Address Phy Register */ 130/* SMBus Address Phy Register */
127#define HV_SMB_ADDR PHY_REG(768, 26) 131#define HV_SMB_ADDR PHY_REG(768, 26)
132#define HV_SMB_ADDR_MASK 0x007F
128#define HV_SMB_ADDR_PEC_EN 0x0200 133#define HV_SMB_ADDR_PEC_EN 0x0200
129#define HV_SMB_ADDR_VALID 0x0080 134#define HV_SMB_ADDR_VALID 0x0080
130 135
@@ -237,6 +242,8 @@ static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link);
237static s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw); 242static s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw);
238static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw); 243static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw);
239static bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw); 244static bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw);
245static s32 e1000_k1_workaround_lv(struct e1000_hw *hw);
246static void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate);
240 247
241static inline u16 __er16flash(struct e1000_hw *hw, unsigned long reg) 248static inline u16 __er16flash(struct e1000_hw *hw, unsigned long reg)
242{ 249{
@@ -272,7 +279,7 @@ static inline void __ew32flash(struct e1000_hw *hw, unsigned long reg, u32 val)
272static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw) 279static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
273{ 280{
274 struct e1000_phy_info *phy = &hw->phy; 281 struct e1000_phy_info *phy = &hw->phy;
275 u32 ctrl; 282 u32 ctrl, fwsm;
276 s32 ret_val = 0; 283 s32 ret_val = 0;
277 284
278 phy->addr = 1; 285 phy->addr = 1;
@@ -294,7 +301,8 @@ static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
294 * disabled, then toggle the LANPHYPC Value bit to force 301 * disabled, then toggle the LANPHYPC Value bit to force
295 * the interconnect to PCIe mode. 302 * the interconnect to PCIe mode.
296 */ 303 */
297 if (!(er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) { 304 fwsm = er32(FWSM);
305 if (!(fwsm & E1000_ICH_FWSM_FW_VALID)) {
298 ctrl = er32(CTRL); 306 ctrl = er32(CTRL);
299 ctrl |= E1000_CTRL_LANPHYPC_OVERRIDE; 307 ctrl |= E1000_CTRL_LANPHYPC_OVERRIDE;
300 ctrl &= ~E1000_CTRL_LANPHYPC_VALUE; 308 ctrl &= ~E1000_CTRL_LANPHYPC_VALUE;
@@ -303,6 +311,13 @@ static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
303 ctrl &= ~E1000_CTRL_LANPHYPC_OVERRIDE; 311 ctrl &= ~E1000_CTRL_LANPHYPC_OVERRIDE;
304 ew32(CTRL, ctrl); 312 ew32(CTRL, ctrl);
305 msleep(50); 313 msleep(50);
314
315 /*
316 * Gate automatic PHY configuration by hardware on
317 * non-managed 82579
318 */
319 if (hw->mac.type == e1000_pch2lan)
320 e1000_gate_hw_phy_config_ich8lan(hw, true);
306 } 321 }
307 322
308 /* 323 /*
@@ -315,6 +330,13 @@ static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
315 if (ret_val) 330 if (ret_val)
316 goto out; 331 goto out;
317 332
333 /* Ungate automatic PHY configuration on non-managed 82579 */
334 if ((hw->mac.type == e1000_pch2lan) &&
335 !(fwsm & E1000_ICH_FWSM_FW_VALID)) {
336 msleep(10);
337 e1000_gate_hw_phy_config_ich8lan(hw, false);
338 }
339
318 phy->id = e1000_phy_unknown; 340 phy->id = e1000_phy_unknown;
319 ret_val = e1000e_get_phy_id(hw); 341 ret_val = e1000e_get_phy_id(hw);
320 if (ret_val) 342 if (ret_val)
@@ -561,13 +583,10 @@ static s32 e1000_init_mac_params_ich8lan(struct e1000_adapter *adapter)
561 if (mac->type == e1000_ich8lan) 583 if (mac->type == e1000_ich8lan)
562 e1000e_set_kmrn_lock_loss_workaround_ich8lan(hw, true); 584 e1000e_set_kmrn_lock_loss_workaround_ich8lan(hw, true);
563 585
564 /* Disable PHY configuration by hardware, config by software */ 586 /* Gate automatic PHY configuration by hardware on managed 82579 */
565 if (mac->type == e1000_pch2lan) { 587 if ((mac->type == e1000_pch2lan) &&
566 u32 extcnf_ctrl = er32(EXTCNF_CTRL); 588 (er32(FWSM) & E1000_ICH_FWSM_FW_VALID))
567 589 e1000_gate_hw_phy_config_ich8lan(hw, true);
568 extcnf_ctrl |= E1000_EXTCNF_CTRL_GATE_PHY_CFG;
569 ew32(EXTCNF_CTRL, extcnf_ctrl);
570 }
571 590
572 return 0; 591 return 0;
573} 592}
@@ -652,6 +671,12 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
652 goto out; 671 goto out;
653 } 672 }
654 673
674 if (hw->mac.type == e1000_pch2lan) {
675 ret_val = e1000_k1_workaround_lv(hw);
676 if (ret_val)
677 goto out;
678 }
679
655 /* 680 /*
656 * Check if there was DownShift, must be checked 681 * Check if there was DownShift, must be checked
657 * immediately after link-up 682 * immediately after link-up
@@ -895,6 +920,34 @@ static s32 e1000_check_reset_block_ich8lan(struct e1000_hw *hw)
895} 920}
896 921
897/** 922/**
923 * e1000_write_smbus_addr - Write SMBus address to PHY needed during Sx states
924 * @hw: pointer to the HW structure
925 *
926 * Assumes semaphore already acquired.
927 *
928 **/
929static s32 e1000_write_smbus_addr(struct e1000_hw *hw)
930{
931 u16 phy_data;
932 u32 strap = er32(STRAP);
933 s32 ret_val = 0;
934
935 strap &= E1000_STRAP_SMBUS_ADDRESS_MASK;
936
937 ret_val = e1000_read_phy_reg_hv_locked(hw, HV_SMB_ADDR, &phy_data);
938 if (ret_val)
939 goto out;
940
941 phy_data &= ~HV_SMB_ADDR_MASK;
942 phy_data |= (strap >> E1000_STRAP_SMBUS_ADDRESS_SHIFT);
943 phy_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID;
944 ret_val = e1000_write_phy_reg_hv_locked(hw, HV_SMB_ADDR, phy_data);
945
946out:
947 return ret_val;
948}
949
950/**
898 * e1000_sw_lcd_config_ich8lan - SW-based LCD Configuration 951 * e1000_sw_lcd_config_ich8lan - SW-based LCD Configuration
899 * @hw: pointer to the HW structure 952 * @hw: pointer to the HW structure
900 * 953 *
@@ -903,7 +956,6 @@ static s32 e1000_check_reset_block_ich8lan(struct e1000_hw *hw)
903 **/ 956 **/
904static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw) 957static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw)
905{ 958{
906 struct e1000_adapter *adapter = hw->adapter;
907 struct e1000_phy_info *phy = &hw->phy; 959 struct e1000_phy_info *phy = &hw->phy;
908 u32 i, data, cnf_size, cnf_base_addr, sw_cfg_mask; 960 u32 i, data, cnf_size, cnf_base_addr, sw_cfg_mask;
909 s32 ret_val = 0; 961 s32 ret_val = 0;
@@ -921,7 +973,8 @@ static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw)
921 if (phy->type != e1000_phy_igp_3) 973 if (phy->type != e1000_phy_igp_3)
922 return ret_val; 974 return ret_val;
923 975
924 if (adapter->pdev->device == E1000_DEV_ID_ICH8_IGP_AMT) { 976 if ((hw->adapter->pdev->device == E1000_DEV_ID_ICH8_IGP_AMT) ||
977 (hw->adapter->pdev->device == E1000_DEV_ID_ICH8_IGP_C)) {
925 sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG; 978 sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG;
926 break; 979 break;
927 } 980 }
@@ -961,21 +1014,16 @@ static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw)
961 cnf_base_addr = data & E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK; 1014 cnf_base_addr = data & E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK;
962 cnf_base_addr >>= E1000_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT; 1015 cnf_base_addr >>= E1000_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT;
963 1016
964 if (!(data & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE) && 1017 if ((!(data & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE) &&
965 ((hw->mac.type == e1000_pchlan) || 1018 (hw->mac.type == e1000_pchlan)) ||
966 (hw->mac.type == e1000_pch2lan))) { 1019 (hw->mac.type == e1000_pch2lan)) {
967 /* 1020 /*
968 * HW configures the SMBus address and LEDs when the 1021 * HW configures the SMBus address and LEDs when the
969 * OEM and LCD Write Enable bits are set in the NVM. 1022 * OEM and LCD Write Enable bits are set in the NVM.
970 * When both NVM bits are cleared, SW will configure 1023 * When both NVM bits are cleared, SW will configure
971 * them instead. 1024 * them instead.
972 */ 1025 */
973 data = er32(STRAP); 1026 ret_val = e1000_write_smbus_addr(hw);
974 data &= E1000_STRAP_SMBUS_ADDRESS_MASK;
975 reg_data = data >> E1000_STRAP_SMBUS_ADDRESS_SHIFT;
976 reg_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID;
977 ret_val = e1000_write_phy_reg_hv_locked(hw, HV_SMB_ADDR,
978 reg_data);
979 if (ret_val) 1027 if (ret_val)
980 goto out; 1028 goto out;
981 1029
@@ -1440,10 +1488,6 @@ s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable)
1440 goto out; 1488 goto out;
1441 1489
1442 /* Enable jumbo frame workaround in the PHY */ 1490 /* Enable jumbo frame workaround in the PHY */
1443 e1e_rphy(hw, PHY_REG(769, 20), &data);
1444 ret_val = e1e_wphy(hw, PHY_REG(769, 20), data & ~(1 << 14));
1445 if (ret_val)
1446 goto out;
1447 e1e_rphy(hw, PHY_REG(769, 23), &data); 1491 e1e_rphy(hw, PHY_REG(769, 23), &data);
1448 data &= ~(0x7F << 5); 1492 data &= ~(0x7F << 5);
1449 data |= (0x37 << 5); 1493 data |= (0x37 << 5);
@@ -1452,7 +1496,6 @@ s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable)
1452 goto out; 1496 goto out;
1453 e1e_rphy(hw, PHY_REG(769, 16), &data); 1497 e1e_rphy(hw, PHY_REG(769, 16), &data);
1454 data &= ~(1 << 13); 1498 data &= ~(1 << 13);
1455 data |= (1 << 12);
1456 ret_val = e1e_wphy(hw, PHY_REG(769, 16), data); 1499 ret_val = e1e_wphy(hw, PHY_REG(769, 16), data);
1457 if (ret_val) 1500 if (ret_val)
1458 goto out; 1501 goto out;
@@ -1477,7 +1520,7 @@ s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable)
1477 1520
1478 mac_reg = er32(RCTL); 1521 mac_reg = er32(RCTL);
1479 mac_reg &= ~E1000_RCTL_SECRC; 1522 mac_reg &= ~E1000_RCTL_SECRC;
1480 ew32(FFLT_DBG, mac_reg); 1523 ew32(RCTL, mac_reg);
1481 1524
1482 ret_val = e1000e_read_kmrn_reg(hw, 1525 ret_val = e1000e_read_kmrn_reg(hw,
1483 E1000_KMRNCTRLSTA_CTRL_OFFSET, 1526 E1000_KMRNCTRLSTA_CTRL_OFFSET,
@@ -1503,17 +1546,12 @@ s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable)
1503 goto out; 1546 goto out;
1504 1547
1505 /* Write PHY register values back to h/w defaults */ 1548 /* Write PHY register values back to h/w defaults */
1506 e1e_rphy(hw, PHY_REG(769, 20), &data);
1507 ret_val = e1e_wphy(hw, PHY_REG(769, 20), data & ~(1 << 14));
1508 if (ret_val)
1509 goto out;
1510 e1e_rphy(hw, PHY_REG(769, 23), &data); 1549 e1e_rphy(hw, PHY_REG(769, 23), &data);
1511 data &= ~(0x7F << 5); 1550 data &= ~(0x7F << 5);
1512 ret_val = e1e_wphy(hw, PHY_REG(769, 23), data); 1551 ret_val = e1e_wphy(hw, PHY_REG(769, 23), data);
1513 if (ret_val) 1552 if (ret_val)
1514 goto out; 1553 goto out;
1515 e1e_rphy(hw, PHY_REG(769, 16), &data); 1554 e1e_rphy(hw, PHY_REG(769, 16), &data);
1516 data &= ~(1 << 12);
1517 data |= (1 << 13); 1555 data |= (1 << 13);
1518 ret_val = e1e_wphy(hw, PHY_REG(769, 16), data); 1556 ret_val = e1e_wphy(hw, PHY_REG(769, 16), data);
1519 if (ret_val) 1557 if (ret_val)
@@ -1559,6 +1597,69 @@ out:
1559} 1597}
1560 1598
1561/** 1599/**
1600 * e1000_k1_gig_workaround_lv - K1 Si workaround
1601 * @hw: pointer to the HW structure
1602 *
1603 * Workaround to set the K1 beacon duration for 82579 parts
1604 **/
1605static s32 e1000_k1_workaround_lv(struct e1000_hw *hw)
1606{
1607 s32 ret_val = 0;
1608 u16 status_reg = 0;
1609 u32 mac_reg;
1610
1611 if (hw->mac.type != e1000_pch2lan)
1612 goto out;
1613
1614 /* Set K1 beacon duration based on 1Gbps speed or otherwise */
1615 ret_val = e1e_rphy(hw, HV_M_STATUS, &status_reg);
1616 if (ret_val)
1617 goto out;
1618
1619 if ((status_reg & (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE))
1620 == (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) {
1621 mac_reg = er32(FEXTNVM4);
1622 mac_reg &= ~E1000_FEXTNVM4_BEACON_DURATION_MASK;
1623
1624 if (status_reg & HV_M_STATUS_SPEED_1000)
1625 mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_8USEC;
1626 else
1627 mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_16USEC;
1628
1629 ew32(FEXTNVM4, mac_reg);
1630 }
1631
1632out:
1633 return ret_val;
1634}
1635
1636/**
1637 * e1000_gate_hw_phy_config_ich8lan - disable PHY config via hardware
1638 * @hw: pointer to the HW structure
1639 * @gate: boolean set to true to gate, false to ungate
1640 *
1641 * Gate/ungate the automatic PHY configuration via hardware; perform
1642 * the configuration via software instead.
1643 **/
1644static void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate)
1645{
1646 u32 extcnf_ctrl;
1647
1648 if (hw->mac.type != e1000_pch2lan)
1649 return;
1650
1651 extcnf_ctrl = er32(EXTCNF_CTRL);
1652
1653 if (gate)
1654 extcnf_ctrl |= E1000_EXTCNF_CTRL_GATE_PHY_CFG;
1655 else
1656 extcnf_ctrl &= ~E1000_EXTCNF_CTRL_GATE_PHY_CFG;
1657
1658 ew32(EXTCNF_CTRL, extcnf_ctrl);
1659 return;
1660}
1661
1662/**
1562 * e1000_lan_init_done_ich8lan - Check for PHY config completion 1663 * e1000_lan_init_done_ich8lan - Check for PHY config completion
1563 * @hw: pointer to the HW structure 1664 * @hw: pointer to the HW structure
1564 * 1665 *
@@ -1602,6 +1703,9 @@ static s32 e1000_post_phy_reset_ich8lan(struct e1000_hw *hw)
1602 if (e1000_check_reset_block(hw)) 1703 if (e1000_check_reset_block(hw))
1603 goto out; 1704 goto out;
1604 1705
1706 /* Allow time for h/w to get to quiescent state after reset */
1707 msleep(10);
1708
1605 /* Perform any necessary post-reset workarounds */ 1709 /* Perform any necessary post-reset workarounds */
1606 switch (hw->mac.type) { 1710 switch (hw->mac.type) {
1607 case e1000_pchlan: 1711 case e1000_pchlan:
@@ -1630,6 +1734,13 @@ static s32 e1000_post_phy_reset_ich8lan(struct e1000_hw *hw)
1630 /* Configure the LCD with the OEM bits in NVM */ 1734 /* Configure the LCD with the OEM bits in NVM */
1631 ret_val = e1000_oem_bits_config_ich8lan(hw, true); 1735 ret_val = e1000_oem_bits_config_ich8lan(hw, true);
1632 1736
1737 /* Ungate automatic PHY configuration on non-managed 82579 */
1738 if ((hw->mac.type == e1000_pch2lan) &&
1739 !(er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) {
1740 msleep(10);
1741 e1000_gate_hw_phy_config_ich8lan(hw, false);
1742 }
1743
1633out: 1744out:
1634 return ret_val; 1745 return ret_val;
1635} 1746}
@@ -1646,6 +1757,11 @@ static s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw)
1646{ 1757{
1647 s32 ret_val = 0; 1758 s32 ret_val = 0;
1648 1759
1760 /* Gate automatic PHY configuration by hardware on non-managed 82579 */
1761 if ((hw->mac.type == e1000_pch2lan) &&
1762 !(er32(FWSM) & E1000_ICH_FWSM_FW_VALID))
1763 e1000_gate_hw_phy_config_ich8lan(hw, true);
1764
1649 ret_val = e1000e_phy_hw_reset_generic(hw); 1765 ret_val = e1000e_phy_hw_reset_generic(hw);
1650 if (ret_val) 1766 if (ret_val)
1651 goto out; 1767 goto out;
@@ -2910,6 +3026,14 @@ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
2910 * external PHY is reset. 3026 * external PHY is reset.
2911 */ 3027 */
2912 ctrl |= E1000_CTRL_PHY_RST; 3028 ctrl |= E1000_CTRL_PHY_RST;
3029
3030 /*
3031 * Gate automatic PHY configuration by hardware on
3032 * non-managed 82579
3033 */
3034 if ((hw->mac.type == e1000_pch2lan) &&
3035 !(er32(FWSM) & E1000_ICH_FWSM_FW_VALID))
3036 e1000_gate_hw_phy_config_ich8lan(hw, true);
2913 } 3037 }
2914 ret_val = e1000_acquire_swflag_ich8lan(hw); 3038 ret_val = e1000_acquire_swflag_ich8lan(hw);
2915 e_dbg("Issuing a global reset to ich8lan\n"); 3039 e_dbg("Issuing a global reset to ich8lan\n");
@@ -3460,13 +3584,20 @@ void e1000e_gig_downshift_workaround_ich8lan(struct e1000_hw *hw)
3460void e1000e_disable_gig_wol_ich8lan(struct e1000_hw *hw) 3584void e1000e_disable_gig_wol_ich8lan(struct e1000_hw *hw)
3461{ 3585{
3462 u32 phy_ctrl; 3586 u32 phy_ctrl;
3587 s32 ret_val;
3463 3588
3464 phy_ctrl = er32(PHY_CTRL); 3589 phy_ctrl = er32(PHY_CTRL);
3465 phy_ctrl |= E1000_PHY_CTRL_D0A_LPLU | E1000_PHY_CTRL_GBE_DISABLE; 3590 phy_ctrl |= E1000_PHY_CTRL_D0A_LPLU | E1000_PHY_CTRL_GBE_DISABLE;
3466 ew32(PHY_CTRL, phy_ctrl); 3591 ew32(PHY_CTRL, phy_ctrl);
3467 3592
3468 if (hw->mac.type >= e1000_pchlan) 3593 if (hw->mac.type >= e1000_pchlan) {
3469 e1000_phy_hw_reset_ich8lan(hw); 3594 e1000_oem_bits_config_ich8lan(hw, true);
3595 ret_val = hw->phy.ops.acquire(hw);
3596 if (ret_val)
3597 return;
3598 e1000_write_smbus_addr(hw);
3599 hw->phy.ops.release(hw);
3600 }
3470} 3601}
3471 3602
3472/** 3603/**
@@ -3855,7 +3986,7 @@ struct e1000_info e1000_pch2_info = {
3855 | FLAG_APME_IN_WUC, 3986 | FLAG_APME_IN_WUC,
3856 .flags2 = FLAG2_HAS_PHY_STATS 3987 .flags2 = FLAG2_HAS_PHY_STATS
3857 | FLAG2_HAS_EEE, 3988 | FLAG2_HAS_EEE,
3858 .pba = 18, 3989 .pba = 26,
3859 .max_hw_frame_size = DEFAULT_JUMBO, 3990 .max_hw_frame_size = DEFAULT_JUMBO,
3860 .get_variants = e1000_get_variants_ich8lan, 3991 .get_variants = e1000_get_variants_ich8lan,
3861 .mac_ops = &ich8_mac_ops, 3992 .mac_ops = &ich8_mac_ops,
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c
index e2c7e0d767b1..ec8cf3f51423 100644
--- a/drivers/net/e1000e/netdev.c
+++ b/drivers/net/e1000e/netdev.c
@@ -1053,7 +1053,7 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter)
1053 adapter->total_tx_packets += total_tx_packets; 1053 adapter->total_tx_packets += total_tx_packets;
1054 netdev->stats.tx_bytes += total_tx_bytes; 1054 netdev->stats.tx_bytes += total_tx_bytes;
1055 netdev->stats.tx_packets += total_tx_packets; 1055 netdev->stats.tx_packets += total_tx_packets;
1056 return (count < tx_ring->count); 1056 return count < tx_ring->count;
1057} 1057}
1058 1058
1059/** 1059/**
@@ -2290,6 +2290,11 @@ static void e1000_set_itr(struct e1000_adapter *adapter)
2290 goto set_itr_now; 2290 goto set_itr_now;
2291 } 2291 }
2292 2292
2293 if (adapter->flags2 & FLAG2_DISABLE_AIM) {
2294 new_itr = 0;
2295 goto set_itr_now;
2296 }
2297
2293 adapter->tx_itr = e1000_update_itr(adapter, 2298 adapter->tx_itr = e1000_update_itr(adapter,
2294 adapter->tx_itr, 2299 adapter->tx_itr,
2295 adapter->total_tx_packets, 2300 adapter->total_tx_packets,
@@ -2338,7 +2343,10 @@ set_itr_now:
2338 if (adapter->msix_entries) 2343 if (adapter->msix_entries)
2339 adapter->rx_ring->set_itr = 1; 2344 adapter->rx_ring->set_itr = 1;
2340 else 2345 else
2341 ew32(ITR, 1000000000 / (new_itr * 256)); 2346 if (new_itr)
2347 ew32(ITR, 1000000000 / (new_itr * 256));
2348 else
2349 ew32(ITR, 0);
2342 } 2350 }
2343} 2351}
2344 2352
@@ -2537,7 +2545,7 @@ static void e1000_restore_vlan(struct e1000_adapter *adapter)
2537 if (!adapter->vlgrp) 2545 if (!adapter->vlgrp)
2538 return; 2546 return;
2539 2547
2540 for (vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) { 2548 for (vid = 0; vid < VLAN_N_VID; vid++) {
2541 if (!vlan_group_get_device(adapter->vlgrp, vid)) 2549 if (!vlan_group_get_device(adapter->vlgrp, vid))
2542 continue; 2550 continue;
2543 e1000_vlan_rx_add_vid(adapter->netdev, vid); 2551 e1000_vlan_rx_add_vid(adapter->netdev, vid);
@@ -2650,6 +2658,26 @@ static void e1000_configure_tx(struct e1000_adapter *adapter)
2650 /* Tx irq moderation */ 2658 /* Tx irq moderation */
2651 ew32(TADV, adapter->tx_abs_int_delay); 2659 ew32(TADV, adapter->tx_abs_int_delay);
2652 2660
2661 if (adapter->flags2 & FLAG2_DMA_BURST) {
2662 u32 txdctl = er32(TXDCTL(0));
2663 txdctl &= ~(E1000_TXDCTL_PTHRESH | E1000_TXDCTL_HTHRESH |
2664 E1000_TXDCTL_WTHRESH);
2665 /*
2666 * set up some performance related parameters to encourage the
2667 * hardware to use the bus more efficiently in bursts, depends
2668 * on the tx_int_delay to be enabled,
2669 * wthresh = 5 ==> burst write a cacheline (64 bytes) at a time
2670 * hthresh = 1 ==> prefetch when one or more available
2671 * pthresh = 0x1f ==> prefetch if internal cache 31 or less
2672 * BEWARE: this seems to work but should be considered first if
2673 * there are tx hangs or other tx related bugs
2674 */
2675 txdctl |= E1000_TXDCTL_DMA_BURST_ENABLE;
2676 ew32(TXDCTL(0), txdctl);
2677 /* erratum work around: set txdctl the same for both queues */
2678 ew32(TXDCTL(1), txdctl);
2679 }
2680
2653 /* Program the Transmit Control Register */ 2681 /* Program the Transmit Control Register */
2654 tctl = er32(TCTL); 2682 tctl = er32(TCTL);
2655 tctl &= ~E1000_TCTL_CT; 2683 tctl &= ~E1000_TCTL_CT;
@@ -2705,6 +2733,16 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter)
2705 u32 psrctl = 0; 2733 u32 psrctl = 0;
2706 u32 pages = 0; 2734 u32 pages = 0;
2707 2735
2736 /* Workaround Si errata on 82579 - configure jumbo frame flow */
2737 if (hw->mac.type == e1000_pch2lan) {
2738 s32 ret_val;
2739
2740 if (adapter->netdev->mtu > ETH_DATA_LEN)
2741 ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, true);
2742 else
2743 ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, false);
2744 }
2745
2708 /* Program MC offset vector base */ 2746 /* Program MC offset vector base */
2709 rctl = er32(RCTL); 2747 rctl = er32(RCTL);
2710 rctl &= ~(3 << E1000_RCTL_MO_SHIFT); 2748 rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
@@ -2745,16 +2783,6 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter)
2745 e1e_wphy(hw, 22, phy_data); 2783 e1e_wphy(hw, 22, phy_data);
2746 } 2784 }
2747 2785
2748 /* Workaround Si errata on 82579 - configure jumbo frame flow */
2749 if (hw->mac.type == e1000_pch2lan) {
2750 s32 ret_val;
2751
2752 if (rctl & E1000_RCTL_LPE)
2753 ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, true);
2754 else
2755 ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, false);
2756 }
2757
2758 /* Setup buffer sizes */ 2786 /* Setup buffer sizes */
2759 rctl &= ~E1000_RCTL_SZ_4096; 2787 rctl &= ~E1000_RCTL_SZ_4096;
2760 rctl |= E1000_RCTL_BSEX; 2788 rctl |= E1000_RCTL_BSEX;
@@ -2872,12 +2900,35 @@ static void e1000_configure_rx(struct e1000_adapter *adapter)
2872 e1e_flush(); 2900 e1e_flush();
2873 msleep(10); 2901 msleep(10);
2874 2902
2903 if (adapter->flags2 & FLAG2_DMA_BURST) {
2904 /*
2905 * set the writeback threshold (only takes effect if the RDTR
2906 * is set). set GRAN=1 and write back up to 0x4 worth, and
2907 * enable prefetching of 0x20 rx descriptors
2908 * granularity = 01
2909 * wthresh = 04,
2910 * hthresh = 04,
2911 * pthresh = 0x20
2912 */
2913 ew32(RXDCTL(0), E1000_RXDCTL_DMA_BURST_ENABLE);
2914 ew32(RXDCTL(1), E1000_RXDCTL_DMA_BURST_ENABLE);
2915
2916 /*
2917 * override the delay timers for enabling bursting, only if
2918 * the value was not set by the user via module options
2919 */
2920 if (adapter->rx_int_delay == DEFAULT_RDTR)
2921 adapter->rx_int_delay = BURST_RDTR;
2922 if (adapter->rx_abs_int_delay == DEFAULT_RADV)
2923 adapter->rx_abs_int_delay = BURST_RADV;
2924 }
2925
2875 /* set the Receive Delay Timer Register */ 2926 /* set the Receive Delay Timer Register */
2876 ew32(RDTR, adapter->rx_int_delay); 2927 ew32(RDTR, adapter->rx_int_delay);
2877 2928
2878 /* irq moderation */ 2929 /* irq moderation */
2879 ew32(RADV, adapter->rx_abs_int_delay); 2930 ew32(RADV, adapter->rx_abs_int_delay);
2880 if (adapter->itr_setting != 0) 2931 if ((adapter->itr_setting != 0) && (adapter->itr != 0))
2881 ew32(ITR, 1000000000 / (adapter->itr * 256)); 2932 ew32(ITR, 1000000000 / (adapter->itr * 256));
2882 2933
2883 ctrl_ext = er32(CTRL_EXT); 2934 ctrl_ext = er32(CTRL_EXT);
@@ -2922,11 +2973,13 @@ static void e1000_configure_rx(struct e1000_adapter *adapter)
2922 * packet size is equal or larger than the specified value (in 8 byte 2973 * packet size is equal or larger than the specified value (in 8 byte
2923 * units), e.g. using jumbo frames when setting to E1000_ERT_2048 2974 * units), e.g. using jumbo frames when setting to E1000_ERT_2048
2924 */ 2975 */
2925 if (adapter->flags & FLAG_HAS_ERT) { 2976 if ((adapter->flags & FLAG_HAS_ERT) ||
2977 (adapter->hw.mac.type == e1000_pch2lan)) {
2926 if (adapter->netdev->mtu > ETH_DATA_LEN) { 2978 if (adapter->netdev->mtu > ETH_DATA_LEN) {
2927 u32 rxdctl = er32(RXDCTL(0)); 2979 u32 rxdctl = er32(RXDCTL(0));
2928 ew32(RXDCTL(0), rxdctl | 0x3); 2980 ew32(RXDCTL(0), rxdctl | 0x3);
2929 ew32(ERT, E1000_ERT_2048 | (1 << 13)); 2981 if (adapter->flags & FLAG_HAS_ERT)
2982 ew32(ERT, E1000_ERT_2048 | (1 << 13));
2930 /* 2983 /*
2931 * With jumbo frames and early-receive enabled, 2984 * With jumbo frames and early-receive enabled,
2932 * excessive C-state transition latencies result in 2985 * excessive C-state transition latencies result in
@@ -3189,9 +3242,35 @@ void e1000e_reset(struct e1000_adapter *adapter)
3189 fc->low_water = 0x05048; 3242 fc->low_water = 0x05048;
3190 fc->pause_time = 0x0650; 3243 fc->pause_time = 0x0650;
3191 fc->refresh_time = 0x0400; 3244 fc->refresh_time = 0x0400;
3245 if (adapter->netdev->mtu > ETH_DATA_LEN) {
3246 pba = 14;
3247 ew32(PBA, pba);
3248 }
3192 break; 3249 break;
3193 } 3250 }
3194 3251
3252 /*
3253 * Disable Adaptive Interrupt Moderation if 2 full packets cannot
3254 * fit in receive buffer and early-receive not supported.
3255 */
3256 if (adapter->itr_setting & 0x3) {
3257 if (((adapter->max_frame_size * 2) > (pba << 10)) &&
3258 !(adapter->flags & FLAG_HAS_ERT)) {
3259 if (!(adapter->flags2 & FLAG2_DISABLE_AIM)) {
3260 dev_info(&adapter->pdev->dev,
3261 "Interrupt Throttle Rate turned off\n");
3262 adapter->flags2 |= FLAG2_DISABLE_AIM;
3263 ew32(ITR, 0);
3264 }
3265 } else if (adapter->flags2 & FLAG2_DISABLE_AIM) {
3266 dev_info(&adapter->pdev->dev,
3267 "Interrupt Throttle Rate turned on\n");
3268 adapter->flags2 &= ~FLAG2_DISABLE_AIM;
3269 adapter->itr = 20000;
3270 ew32(ITR, 1000000000 / (adapter->itr * 256));
3271 }
3272 }
3273
3195 /* Allow time for pending master requests to run */ 3274 /* Allow time for pending master requests to run */
3196 mac->ops.reset_hw(hw); 3275 mac->ops.reset_hw(hw);
3197 3276
@@ -3510,7 +3589,8 @@ static int e1000_open(struct net_device *netdev)
3510 e1000_update_mng_vlan(adapter); 3589 e1000_update_mng_vlan(adapter);
3511 3590
3512 /* DMA latency requirement to workaround early-receive/jumbo issue */ 3591 /* DMA latency requirement to workaround early-receive/jumbo issue */
3513 if (adapter->flags & FLAG_HAS_ERT) 3592 if ((adapter->flags & FLAG_HAS_ERT) ||
3593 (adapter->hw.mac.type == e1000_pch2lan))
3514 pm_qos_add_request(&adapter->netdev->pm_qos_req, 3594 pm_qos_add_request(&adapter->netdev->pm_qos_req,
3515 PM_QOS_CPU_DMA_LATENCY, 3595 PM_QOS_CPU_DMA_LATENCY,
3516 PM_QOS_DEFAULT_VALUE); 3596 PM_QOS_DEFAULT_VALUE);
@@ -3619,7 +3699,8 @@ static int e1000_close(struct net_device *netdev)
3619 if (adapter->flags & FLAG_HAS_AMT) 3699 if (adapter->flags & FLAG_HAS_AMT)
3620 e1000_release_hw_control(adapter); 3700 e1000_release_hw_control(adapter);
3621 3701
3622 if (adapter->flags & FLAG_HAS_ERT) 3702 if ((adapter->flags & FLAG_HAS_ERT) ||
3703 (adapter->hw.mac.type == e1000_pch2lan))
3623 pm_qos_remove_request(&adapter->netdev->pm_qos_req); 3704 pm_qos_remove_request(&adapter->netdev->pm_qos_req);
3624 3705
3625 pm_runtime_put_sync(&pdev->dev); 3706 pm_runtime_put_sync(&pdev->dev);
@@ -4235,6 +4316,16 @@ link_up:
4235 /* Force detection of hung controller every watchdog period */ 4316 /* Force detection of hung controller every watchdog period */
4236 adapter->detect_tx_hung = 1; 4317 adapter->detect_tx_hung = 1;
4237 4318
4319 /* flush partial descriptors to memory before detecting tx hang */
4320 if (adapter->flags2 & FLAG2_DMA_BURST) {
4321 ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD);
4322 ew32(RDTR, adapter->rx_int_delay | E1000_RDTR_FPD);
4323 /*
4324 * no need to flush the writes because the timeout code does
4325 * an er32 first thing
4326 */
4327 }
4328
4238 /* 4329 /*
4239 * With 82571 controllers, LAA may be overwritten due to controller 4330 * With 82571 controllers, LAA may be overwritten due to controller
4240 * reset from the other port. Set the appropriate LAA in RAR[0] 4331 * reset from the other port. Set the appropriate LAA in RAR[0]
@@ -4709,7 +4800,7 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
4709 if (e1000_maybe_stop_tx(netdev, count + 2)) 4800 if (e1000_maybe_stop_tx(netdev, count + 2))
4710 return NETDEV_TX_BUSY; 4801 return NETDEV_TX_BUSY;
4711 4802
4712 if (adapter->vlgrp && vlan_tx_tag_present(skb)) { 4803 if (vlan_tx_tag_present(skb)) {
4713 tx_flags |= E1000_TX_FLAGS_VLAN; 4804 tx_flags |= E1000_TX_FLAGS_VLAN;
4714 tx_flags |= (vlan_tx_tag_get(skb) << E1000_TX_FLAGS_VLAN_SHIFT); 4805 tx_flags |= (vlan_tx_tag_get(skb) << E1000_TX_FLAGS_VLAN_SHIFT);
4715 } 4806 }
@@ -4813,6 +4904,15 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
4813 return -EINVAL; 4904 return -EINVAL;
4814 } 4905 }
4815 4906
4907 /* Jumbo frame workaround on 82579 requires CRC be stripped */
4908 if ((adapter->hw.mac.type == e1000_pch2lan) &&
4909 !(adapter->flags2 & FLAG2_CRC_STRIPPING) &&
4910 (new_mtu > ETH_DATA_LEN)) {
4911 e_err("Jumbo Frames not supported on 82579 when CRC "
4912 "stripping is disabled.\n");
4913 return -EINVAL;
4914 }
4915
4816 /* 82573 Errata 17 */ 4916 /* 82573 Errata 17 */
4817 if (((adapter->hw.mac.type == e1000_82573) || 4917 if (((adapter->hw.mac.type == e1000_82573) ||
4818 (adapter->hw.mac.type == e1000_82574)) && 4918 (adapter->hw.mac.type == e1000_82574)) &&
@@ -5683,8 +5783,10 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
5683 netdev->vlan_features |= NETIF_F_HW_CSUM; 5783 netdev->vlan_features |= NETIF_F_HW_CSUM;
5684 netdev->vlan_features |= NETIF_F_SG; 5784 netdev->vlan_features |= NETIF_F_SG;
5685 5785
5686 if (pci_using_dac) 5786 if (pci_using_dac) {
5687 netdev->features |= NETIF_F_HIGHDMA; 5787 netdev->features |= NETIF_F_HIGHDMA;
5788 netdev->vlan_features |= NETIF_F_HIGHDMA;
5789 }
5688 5790
5689 if (e1000e_enable_mng_pass_thru(&adapter->hw)) 5791 if (e1000e_enable_mng_pass_thru(&adapter->hw))
5690 adapter->flags |= FLAG_MNG_PT_ENABLED; 5792 adapter->flags |= FLAG_MNG_PT_ENABLED;
diff --git a/drivers/net/e1000e/param.c b/drivers/net/e1000e/param.c
index 34aeec13bb16..3d36911f77f3 100644
--- a/drivers/net/e1000e/param.c
+++ b/drivers/net/e1000e/param.c
@@ -91,7 +91,6 @@ E1000_PARAM(TxAbsIntDelay, "Transmit Absolute Interrupt Delay");
91 * Valid Range: 0-65535 91 * Valid Range: 0-65535
92 */ 92 */
93E1000_PARAM(RxIntDelay, "Receive Interrupt Delay"); 93E1000_PARAM(RxIntDelay, "Receive Interrupt Delay");
94#define DEFAULT_RDTR 0
95#define MAX_RXDELAY 0xFFFF 94#define MAX_RXDELAY 0xFFFF
96#define MIN_RXDELAY 0 95#define MIN_RXDELAY 0
97 96
@@ -101,7 +100,6 @@ E1000_PARAM(RxIntDelay, "Receive Interrupt Delay");
101 * Valid Range: 0-65535 100 * Valid Range: 0-65535
102 */ 101 */
103E1000_PARAM(RxAbsIntDelay, "Receive Absolute Interrupt Delay"); 102E1000_PARAM(RxAbsIntDelay, "Receive Absolute Interrupt Delay");
104#define DEFAULT_RADV 8
105#define MAX_RXABSDELAY 0xFFFF 103#define MAX_RXABSDELAY 0xFFFF
106#define MIN_RXABSDELAY 0 104#define MIN_RXABSDELAY 0
107 105
diff --git a/drivers/net/ehea/ehea.h b/drivers/net/ehea/ehea.h
index 1846623c6ae6..1321cb6401cf 100644
--- a/drivers/net/ehea/ehea.h
+++ b/drivers/net/ehea/ehea.h
@@ -491,6 +491,8 @@ struct ehea_port {
491 u8 full_duplex; 491 u8 full_duplex;
492 u8 autoneg; 492 u8 autoneg;
493 u8 num_def_qps; 493 u8 num_def_qps;
494 wait_queue_head_t swqe_avail_wq;
495 wait_queue_head_t restart_wq;
494}; 496};
495 497
496struct port_res_cfg { 498struct port_res_cfg {
diff --git a/drivers/net/ehea/ehea_main.c b/drivers/net/ehea/ehea_main.c
index 043d99013056..a0d117022be6 100644
--- a/drivers/net/ehea/ehea_main.c
+++ b/drivers/net/ehea/ehea_main.c
@@ -786,6 +786,7 @@ static void reset_sq_restart_flag(struct ehea_port *port)
786 struct ehea_port_res *pr = &port->port_res[i]; 786 struct ehea_port_res *pr = &port->port_res[i];
787 pr->sq_restart_flag = 0; 787 pr->sq_restart_flag = 0;
788 } 788 }
789 wake_up(&port->restart_wq);
789} 790}
790 791
791static void check_sqs(struct ehea_port *port) 792static void check_sqs(struct ehea_port *port)
@@ -796,6 +797,7 @@ static void check_sqs(struct ehea_port *port)
796 797
797 for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) { 798 for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) {
798 struct ehea_port_res *pr = &port->port_res[i]; 799 struct ehea_port_res *pr = &port->port_res[i];
800 int ret;
799 k = 0; 801 k = 0;
800 swqe = ehea_get_swqe(pr->qp, &swqe_index); 802 swqe = ehea_get_swqe(pr->qp, &swqe_index);
801 memset(swqe, 0, SWQE_HEADER_SIZE); 803 memset(swqe, 0, SWQE_HEADER_SIZE);
@@ -809,17 +811,16 @@ static void check_sqs(struct ehea_port *port)
809 811
810 ehea_post_swqe(pr->qp, swqe); 812 ehea_post_swqe(pr->qp, swqe);
811 813
812 while (pr->sq_restart_flag == 0) { 814 ret = wait_event_timeout(port->restart_wq,
813 msleep(5); 815 pr->sq_restart_flag == 0,
814 if (++k == 100) { 816 msecs_to_jiffies(100));
815 ehea_error("HW/SW queues out of sync"); 817
816 ehea_schedule_port_reset(pr->port); 818 if (!ret) {
817 return; 819 ehea_error("HW/SW queues out of sync");
818 } 820 ehea_schedule_port_reset(pr->port);
821 return;
819 } 822 }
820 } 823 }
821
822 return;
823} 824}
824 825
825 826
@@ -890,6 +891,7 @@ static struct ehea_cqe *ehea_proc_cqes(struct ehea_port_res *pr, int my_quota)
890 pr->queue_stopped = 0; 891 pr->queue_stopped = 0;
891 } 892 }
892 spin_unlock_irqrestore(&pr->netif_queue, flags); 893 spin_unlock_irqrestore(&pr->netif_queue, flags);
894 wake_up(&pr->port->swqe_avail_wq);
893 895
894 return cqe; 896 return cqe;
895} 897}
@@ -1916,7 +1918,7 @@ static void ehea_promiscuous(struct net_device *dev, int enable)
1916 struct hcp_ehea_port_cb7 *cb7; 1918 struct hcp_ehea_port_cb7 *cb7;
1917 u64 hret; 1919 u64 hret;
1918 1920
1919 if ((enable && port->promisc) || (!enable && !port->promisc)) 1921 if (enable == port->promisc)
1920 return; 1922 return;
1921 1923
1922 cb7 = (void *)get_zeroed_page(GFP_ATOMIC); 1924 cb7 = (void *)get_zeroed_page(GFP_ATOMIC);
@@ -2270,7 +2272,7 @@ static int ehea_start_xmit(struct sk_buff *skb, struct net_device *dev)
2270 } 2272 }
2271 pr->swqe_id_counter += 1; 2273 pr->swqe_id_counter += 1;
2272 2274
2273 if (port->vgrp && vlan_tx_tag_present(skb)) { 2275 if (vlan_tx_tag_present(skb)) {
2274 swqe->tx_control |= EHEA_SWQE_VLAN_INSERT; 2276 swqe->tx_control |= EHEA_SWQE_VLAN_INSERT;
2275 swqe->vlan_tag = vlan_tx_tag_get(skb); 2277 swqe->vlan_tag = vlan_tx_tag_get(skb);
2276 } 2278 }
@@ -2654,6 +2656,9 @@ static int ehea_open(struct net_device *dev)
2654 netif_start_queue(dev); 2656 netif_start_queue(dev);
2655 } 2657 }
2656 2658
2659 init_waitqueue_head(&port->swqe_avail_wq);
2660 init_waitqueue_head(&port->restart_wq);
2661
2657 mutex_unlock(&port->port_lock); 2662 mutex_unlock(&port->port_lock);
2658 2663
2659 return ret; 2664 return ret;
@@ -2726,13 +2731,15 @@ static void ehea_flush_sq(struct ehea_port *port)
2726 for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) { 2731 for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) {
2727 struct ehea_port_res *pr = &port->port_res[i]; 2732 struct ehea_port_res *pr = &port->port_res[i];
2728 int swqe_max = pr->sq_skba_size - 2 - pr->swqe_ll_count; 2733 int swqe_max = pr->sq_skba_size - 2 - pr->swqe_ll_count;
2729 int k = 0; 2734 int ret;
2730 while (atomic_read(&pr->swqe_avail) < swqe_max) { 2735
2731 msleep(5); 2736 ret = wait_event_timeout(port->swqe_avail_wq,
2732 if (++k == 20) { 2737 atomic_read(&pr->swqe_avail) >= swqe_max,
2733 ehea_error("WARNING: sq not flushed completely"); 2738 msecs_to_jiffies(100));
2734 break; 2739
2735 } 2740 if (!ret) {
2741 ehea_error("WARNING: sq not flushed completely");
2742 break;
2736 } 2743 }
2737 } 2744 }
2738} 2745}
diff --git a/drivers/net/enic/enic.h b/drivers/net/enic/enic.h
index 75869ed7226f..c91d364c5527 100644
--- a/drivers/net/enic/enic.h
+++ b/drivers/net/enic/enic.h
@@ -32,7 +32,7 @@
32 32
33#define DRV_NAME "enic" 33#define DRV_NAME "enic"
34#define DRV_DESCRIPTION "Cisco VIC Ethernet NIC Driver" 34#define DRV_DESCRIPTION "Cisco VIC Ethernet NIC Driver"
35#define DRV_VERSION "1.4.1.2" 35#define DRV_VERSION "1.4.1.6"
36#define DRV_COPYRIGHT "Copyright 2008-2010 Cisco Systems, Inc" 36#define DRV_COPYRIGHT "Copyright 2008-2010 Cisco Systems, Inc"
37 37
38#define ENIC_BARS_MAX 6 38#define ENIC_BARS_MAX 6
@@ -42,25 +42,6 @@
42#define ENIC_CQ_MAX (ENIC_WQ_MAX + ENIC_RQ_MAX) 42#define ENIC_CQ_MAX (ENIC_WQ_MAX + ENIC_RQ_MAX)
43#define ENIC_INTR_MAX (ENIC_CQ_MAX + 2) 43#define ENIC_INTR_MAX (ENIC_CQ_MAX + 2)
44 44
45enum enic_cq_index {
46 ENIC_CQ_RQ,
47 ENIC_CQ_WQ,
48};
49
50enum enic_intx_intr_index {
51 ENIC_INTX_WQ_RQ,
52 ENIC_INTX_ERR,
53 ENIC_INTX_NOTIFY,
54};
55
56enum enic_msix_intr_index {
57 ENIC_MSIX_RQ,
58 ENIC_MSIX_WQ,
59 ENIC_MSIX_ERR,
60 ENIC_MSIX_NOTIFY,
61 ENIC_MSIX_MAX,
62};
63
64struct enic_msix_entry { 45struct enic_msix_entry {
65 int requested; 46 int requested;
66 char devname[IFNAMSIZ]; 47 char devname[IFNAMSIZ];
@@ -91,8 +72,8 @@ struct enic {
91 struct vnic_dev *vdev; 72 struct vnic_dev *vdev;
92 struct timer_list notify_timer; 73 struct timer_list notify_timer;
93 struct work_struct reset; 74 struct work_struct reset;
94 struct msix_entry msix_entry[ENIC_MSIX_MAX]; 75 struct msix_entry msix_entry[ENIC_INTR_MAX];
95 struct enic_msix_entry msix[ENIC_MSIX_MAX]; 76 struct enic_msix_entry msix[ENIC_INTR_MAX];
96 u32 msg_enable; 77 u32 msg_enable;
97 spinlock_t devcmd_lock; 78 spinlock_t devcmd_lock;
98 u8 mac_addr[ETH_ALEN]; 79 u8 mac_addr[ETH_ALEN];
@@ -119,7 +100,7 @@ struct enic {
119 int (*rq_alloc_buf)(struct vnic_rq *rq); 100 int (*rq_alloc_buf)(struct vnic_rq *rq);
120 u64 rq_truncated_pkts; 101 u64 rq_truncated_pkts;
121 u64 rq_bad_fcs; 102 u64 rq_bad_fcs;
122 struct napi_struct napi; 103 struct napi_struct napi[ENIC_RQ_MAX];
123 104
124 /* interrupt resource cache line section */ 105 /* interrupt resource cache line section */
125 ____cacheline_aligned struct vnic_intr intr[ENIC_INTR_MAX]; 106 ____cacheline_aligned struct vnic_intr intr[ENIC_INTR_MAX];
diff --git a/drivers/net/enic/enic_main.c b/drivers/net/enic/enic_main.c
index 711077a2e345..a466ef91dd43 100644
--- a/drivers/net/enic/enic_main.c
+++ b/drivers/net/enic/enic_main.c
@@ -122,6 +122,51 @@ static int enic_is_dynamic(struct enic *enic)
122 return enic->pdev->device == PCI_DEVICE_ID_CISCO_VIC_ENET_DYN; 122 return enic->pdev->device == PCI_DEVICE_ID_CISCO_VIC_ENET_DYN;
123} 123}
124 124
125static inline unsigned int enic_cq_rq(struct enic *enic, unsigned int rq)
126{
127 return rq;
128}
129
130static inline unsigned int enic_cq_wq(struct enic *enic, unsigned int wq)
131{
132 return enic->rq_count + wq;
133}
134
135static inline unsigned int enic_legacy_io_intr(void)
136{
137 return 0;
138}
139
140static inline unsigned int enic_legacy_err_intr(void)
141{
142 return 1;
143}
144
145static inline unsigned int enic_legacy_notify_intr(void)
146{
147 return 2;
148}
149
150static inline unsigned int enic_msix_rq_intr(struct enic *enic, unsigned int rq)
151{
152 return rq;
153}
154
155static inline unsigned int enic_msix_wq_intr(struct enic *enic, unsigned int wq)
156{
157 return enic->rq_count + wq;
158}
159
160static inline unsigned int enic_msix_err_intr(struct enic *enic)
161{
162 return enic->rq_count + enic->wq_count;
163}
164
165static inline unsigned int enic_msix_notify_intr(struct enic *enic)
166{
167 return enic->rq_count + enic->wq_count + 1;
168}
169
125static int enic_get_settings(struct net_device *netdev, 170static int enic_get_settings(struct net_device *netdev,
126 struct ethtool_cmd *ecmd) 171 struct ethtool_cmd *ecmd)
127{ 172{
@@ -306,6 +351,7 @@ static int enic_set_coalesce(struct net_device *netdev,
306 struct enic *enic = netdev_priv(netdev); 351 struct enic *enic = netdev_priv(netdev);
307 u32 tx_coalesce_usecs; 352 u32 tx_coalesce_usecs;
308 u32 rx_coalesce_usecs; 353 u32 rx_coalesce_usecs;
354 unsigned int i, intr;
309 355
310 tx_coalesce_usecs = min_t(u32, 356 tx_coalesce_usecs = min_t(u32,
311 INTR_COALESCE_HW_TO_USEC(VNIC_INTR_TIMER_MAX), 357 INTR_COALESCE_HW_TO_USEC(VNIC_INTR_TIMER_MAX),
@@ -319,7 +365,8 @@ static int enic_set_coalesce(struct net_device *netdev,
319 if (tx_coalesce_usecs != rx_coalesce_usecs) 365 if (tx_coalesce_usecs != rx_coalesce_usecs)
320 return -EINVAL; 366 return -EINVAL;
321 367
322 vnic_intr_coalescing_timer_set(&enic->intr[ENIC_INTX_WQ_RQ], 368 intr = enic_legacy_io_intr();
369 vnic_intr_coalescing_timer_set(&enic->intr[intr],
323 INTR_COALESCE_USEC_TO_HW(tx_coalesce_usecs)); 370 INTR_COALESCE_USEC_TO_HW(tx_coalesce_usecs));
324 break; 371 break;
325 case VNIC_DEV_INTR_MODE_MSI: 372 case VNIC_DEV_INTR_MODE_MSI:
@@ -330,10 +377,18 @@ static int enic_set_coalesce(struct net_device *netdev,
330 INTR_COALESCE_USEC_TO_HW(tx_coalesce_usecs)); 377 INTR_COALESCE_USEC_TO_HW(tx_coalesce_usecs));
331 break; 378 break;
332 case VNIC_DEV_INTR_MODE_MSIX: 379 case VNIC_DEV_INTR_MODE_MSIX:
333 vnic_intr_coalescing_timer_set(&enic->intr[ENIC_MSIX_WQ], 380 for (i = 0; i < enic->wq_count; i++) {
334 INTR_COALESCE_USEC_TO_HW(tx_coalesce_usecs)); 381 intr = enic_msix_wq_intr(enic, i);
335 vnic_intr_coalescing_timer_set(&enic->intr[ENIC_MSIX_RQ], 382 vnic_intr_coalescing_timer_set(&enic->intr[intr],
336 INTR_COALESCE_USEC_TO_HW(rx_coalesce_usecs)); 383 INTR_COALESCE_USEC_TO_HW(tx_coalesce_usecs));
384 }
385
386 for (i = 0; i < enic->rq_count; i++) {
387 intr = enic_msix_rq_intr(enic, i);
388 vnic_intr_coalescing_timer_set(&enic->intr[intr],
389 INTR_COALESCE_USEC_TO_HW(rx_coalesce_usecs));
390 }
391
337 break; 392 break;
338 default: 393 default:
339 break; 394 break;
@@ -482,34 +537,37 @@ static irqreturn_t enic_isr_legacy(int irq, void *data)
482{ 537{
483 struct net_device *netdev = data; 538 struct net_device *netdev = data;
484 struct enic *enic = netdev_priv(netdev); 539 struct enic *enic = netdev_priv(netdev);
540 unsigned int io_intr = enic_legacy_io_intr();
541 unsigned int err_intr = enic_legacy_err_intr();
542 unsigned int notify_intr = enic_legacy_notify_intr();
485 u32 pba; 543 u32 pba;
486 544
487 vnic_intr_mask(&enic->intr[ENIC_INTX_WQ_RQ]); 545 vnic_intr_mask(&enic->intr[io_intr]);
488 546
489 pba = vnic_intr_legacy_pba(enic->legacy_pba); 547 pba = vnic_intr_legacy_pba(enic->legacy_pba);
490 if (!pba) { 548 if (!pba) {
491 vnic_intr_unmask(&enic->intr[ENIC_INTX_WQ_RQ]); 549 vnic_intr_unmask(&enic->intr[io_intr]);
492 return IRQ_NONE; /* not our interrupt */ 550 return IRQ_NONE; /* not our interrupt */
493 } 551 }
494 552
495 if (ENIC_TEST_INTR(pba, ENIC_INTX_NOTIFY)) { 553 if (ENIC_TEST_INTR(pba, notify_intr)) {
496 vnic_intr_return_all_credits(&enic->intr[ENIC_INTX_NOTIFY]); 554 vnic_intr_return_all_credits(&enic->intr[notify_intr]);
497 enic_notify_check(enic); 555 enic_notify_check(enic);
498 } 556 }
499 557
500 if (ENIC_TEST_INTR(pba, ENIC_INTX_ERR)) { 558 if (ENIC_TEST_INTR(pba, err_intr)) {
501 vnic_intr_return_all_credits(&enic->intr[ENIC_INTX_ERR]); 559 vnic_intr_return_all_credits(&enic->intr[err_intr]);
502 enic_log_q_error(enic); 560 enic_log_q_error(enic);
503 /* schedule recovery from WQ/RQ error */ 561 /* schedule recovery from WQ/RQ error */
504 schedule_work(&enic->reset); 562 schedule_work(&enic->reset);
505 return IRQ_HANDLED; 563 return IRQ_HANDLED;
506 } 564 }
507 565
508 if (ENIC_TEST_INTR(pba, ENIC_INTX_WQ_RQ)) { 566 if (ENIC_TEST_INTR(pba, io_intr)) {
509 if (napi_schedule_prep(&enic->napi)) 567 if (napi_schedule_prep(&enic->napi[0]))
510 __napi_schedule(&enic->napi); 568 __napi_schedule(&enic->napi[0]);
511 } else { 569 } else {
512 vnic_intr_unmask(&enic->intr[ENIC_INTX_WQ_RQ]); 570 vnic_intr_unmask(&enic->intr[io_intr]);
513 } 571 }
514 572
515 return IRQ_HANDLED; 573 return IRQ_HANDLED;
@@ -535,17 +593,17 @@ static irqreturn_t enic_isr_msi(int irq, void *data)
535 * writes). 593 * writes).
536 */ 594 */
537 595
538 napi_schedule(&enic->napi); 596 napi_schedule(&enic->napi[0]);
539 597
540 return IRQ_HANDLED; 598 return IRQ_HANDLED;
541} 599}
542 600
543static irqreturn_t enic_isr_msix_rq(int irq, void *data) 601static irqreturn_t enic_isr_msix_rq(int irq, void *data)
544{ 602{
545 struct enic *enic = data; 603 struct napi_struct *napi = data;
546 604
547 /* schedule NAPI polling for RQ cleanup */ 605 /* schedule NAPI polling for RQ cleanup */
548 napi_schedule(&enic->napi); 606 napi_schedule(napi);
549 607
550 return IRQ_HANDLED; 608 return IRQ_HANDLED;
551} 609}
@@ -553,13 +611,15 @@ static irqreturn_t enic_isr_msix_rq(int irq, void *data)
553static irqreturn_t enic_isr_msix_wq(int irq, void *data) 611static irqreturn_t enic_isr_msix_wq(int irq, void *data)
554{ 612{
555 struct enic *enic = data; 613 struct enic *enic = data;
614 unsigned int cq = enic_cq_wq(enic, 0);
615 unsigned int intr = enic_msix_wq_intr(enic, 0);
556 unsigned int wq_work_to_do = -1; /* no limit */ 616 unsigned int wq_work_to_do = -1; /* no limit */
557 unsigned int wq_work_done; 617 unsigned int wq_work_done;
558 618
559 wq_work_done = vnic_cq_service(&enic->cq[ENIC_CQ_WQ], 619 wq_work_done = vnic_cq_service(&enic->cq[cq],
560 wq_work_to_do, enic_wq_service, NULL); 620 wq_work_to_do, enic_wq_service, NULL);
561 621
562 vnic_intr_return_credits(&enic->intr[ENIC_MSIX_WQ], 622 vnic_intr_return_credits(&enic->intr[intr],
563 wq_work_done, 623 wq_work_done,
564 1 /* unmask intr */, 624 1 /* unmask intr */,
565 1 /* reset intr timer */); 625 1 /* reset intr timer */);
@@ -570,8 +630,9 @@ static irqreturn_t enic_isr_msix_wq(int irq, void *data)
570static irqreturn_t enic_isr_msix_err(int irq, void *data) 630static irqreturn_t enic_isr_msix_err(int irq, void *data)
571{ 631{
572 struct enic *enic = data; 632 struct enic *enic = data;
633 unsigned int intr = enic_msix_err_intr(enic);
573 634
574 vnic_intr_return_all_credits(&enic->intr[ENIC_MSIX_ERR]); 635 vnic_intr_return_all_credits(&enic->intr[intr]);
575 636
576 enic_log_q_error(enic); 637 enic_log_q_error(enic);
577 638
@@ -584,8 +645,9 @@ static irqreturn_t enic_isr_msix_err(int irq, void *data)
584static irqreturn_t enic_isr_msix_notify(int irq, void *data) 645static irqreturn_t enic_isr_msix_notify(int irq, void *data)
585{ 646{
586 struct enic *enic = data; 647 struct enic *enic = data;
648 unsigned int intr = enic_msix_notify_intr(enic);
587 649
588 vnic_intr_return_all_credits(&enic->intr[ENIC_MSIX_NOTIFY]); 650 vnic_intr_return_all_credits(&enic->intr[intr]);
589 enic_notify_check(enic); 651 enic_notify_check(enic);
590 652
591 return IRQ_HANDLED; 653 return IRQ_HANDLED;
@@ -743,7 +805,7 @@ static inline void enic_queue_wq_skb(struct enic *enic,
743 int vlan_tag_insert = 0; 805 int vlan_tag_insert = 0;
744 int loopback = 0; 806 int loopback = 0;
745 807
746 if (enic->vlan_group && vlan_tx_tag_present(skb)) { 808 if (vlan_tx_tag_present(skb)) {
747 /* VLAN tag from trunking driver */ 809 /* VLAN tag from trunking driver */
748 vlan_tag_insert = 1; 810 vlan_tag_insert = 1;
749 vlan_tag = vlan_tx_tag_get(skb); 811 vlan_tag = vlan_tx_tag_get(skb);
@@ -912,8 +974,19 @@ static int enic_set_mac_address_dynamic(struct net_device *netdev, void *p)
912static int enic_set_mac_address(struct net_device *netdev, void *p) 974static int enic_set_mac_address(struct net_device *netdev, void *p)
913{ 975{
914 struct sockaddr *saddr = p; 976 struct sockaddr *saddr = p;
977 char *addr = saddr->sa_data;
978 struct enic *enic = netdev_priv(netdev);
979 int err;
980
981 err = enic_dev_del_station_addr(enic);
982 if (err)
983 return err;
915 984
916 return enic_set_mac_addr(netdev, (char *)saddr->sa_data); 985 err = enic_set_mac_addr(netdev, addr);
986 if (err)
987 return err;
988
989 return enic_dev_add_station_addr(enic);
917} 990}
918 991
919static int enic_dev_packet_filter(struct enic *enic, int directed, 992static int enic_dev_packet_filter(struct enic *enic, int directed,
@@ -1409,8 +1482,8 @@ static void enic_rq_indicate_buf(struct vnic_rq *rq,
1409 (vlan_tci & CQ_ENET_RQ_DESC_VLAN_TCI_VLAN_MASK)) { 1482 (vlan_tci & CQ_ENET_RQ_DESC_VLAN_TCI_VLAN_MASK)) {
1410 1483
1411 if (netdev->features & NETIF_F_GRO) 1484 if (netdev->features & NETIF_F_GRO)
1412 vlan_gro_receive(&enic->napi, enic->vlan_group, 1485 vlan_gro_receive(&enic->napi[q_number],
1413 vlan_tci, skb); 1486 enic->vlan_group, vlan_tci, skb);
1414 else 1487 else
1415 vlan_hwaccel_receive_skb(skb, 1488 vlan_hwaccel_receive_skb(skb,
1416 enic->vlan_group, vlan_tci); 1489 enic->vlan_group, vlan_tci);
@@ -1418,12 +1491,11 @@ static void enic_rq_indicate_buf(struct vnic_rq *rq,
1418 } else { 1491 } else {
1419 1492
1420 if (netdev->features & NETIF_F_GRO) 1493 if (netdev->features & NETIF_F_GRO)
1421 napi_gro_receive(&enic->napi, skb); 1494 napi_gro_receive(&enic->napi[q_number], skb);
1422 else 1495 else
1423 netif_receive_skb(skb); 1496 netif_receive_skb(skb);
1424 1497
1425 } 1498 }
1426
1427 } else { 1499 } else {
1428 1500
1429 /* Buffer overflow 1501 /* Buffer overflow
@@ -1447,7 +1519,11 @@ static int enic_rq_service(struct vnic_dev *vdev, struct cq_desc *cq_desc,
1447 1519
1448static int enic_poll(struct napi_struct *napi, int budget) 1520static int enic_poll(struct napi_struct *napi, int budget)
1449{ 1521{
1450 struct enic *enic = container_of(napi, struct enic, napi); 1522 struct net_device *netdev = napi->dev;
1523 struct enic *enic = netdev_priv(netdev);
1524 unsigned int cq_rq = enic_cq_rq(enic, 0);
1525 unsigned int cq_wq = enic_cq_wq(enic, 0);
1526 unsigned int intr = enic_legacy_io_intr();
1451 unsigned int rq_work_to_do = budget; 1527 unsigned int rq_work_to_do = budget;
1452 unsigned int wq_work_to_do = -1; /* no limit */ 1528 unsigned int wq_work_to_do = -1; /* no limit */
1453 unsigned int work_done, rq_work_done, wq_work_done; 1529 unsigned int work_done, rq_work_done, wq_work_done;
@@ -1456,10 +1532,10 @@ static int enic_poll(struct napi_struct *napi, int budget)
1456 /* Service RQ (first) and WQ 1532 /* Service RQ (first) and WQ
1457 */ 1533 */
1458 1534
1459 rq_work_done = vnic_cq_service(&enic->cq[ENIC_CQ_RQ], 1535 rq_work_done = vnic_cq_service(&enic->cq[cq_rq],
1460 rq_work_to_do, enic_rq_service, NULL); 1536 rq_work_to_do, enic_rq_service, NULL);
1461 1537
1462 wq_work_done = vnic_cq_service(&enic->cq[ENIC_CQ_WQ], 1538 wq_work_done = vnic_cq_service(&enic->cq[cq_wq],
1463 wq_work_to_do, enic_wq_service, NULL); 1539 wq_work_to_do, enic_wq_service, NULL);
1464 1540
1465 /* Accumulate intr event credits for this polling 1541 /* Accumulate intr event credits for this polling
@@ -1470,7 +1546,7 @@ static int enic_poll(struct napi_struct *napi, int budget)
1470 work_done = rq_work_done + wq_work_done; 1546 work_done = rq_work_done + wq_work_done;
1471 1547
1472 if (work_done > 0) 1548 if (work_done > 0)
1473 vnic_intr_return_credits(&enic->intr[ENIC_INTX_WQ_RQ], 1549 vnic_intr_return_credits(&enic->intr[intr],
1474 work_done, 1550 work_done,
1475 0 /* don't unmask intr */, 1551 0 /* don't unmask intr */,
1476 0 /* don't reset intr timer */); 1552 0 /* don't reset intr timer */);
@@ -1491,7 +1567,7 @@ static int enic_poll(struct napi_struct *napi, int budget)
1491 */ 1567 */
1492 1568
1493 napi_complete(napi); 1569 napi_complete(napi);
1494 vnic_intr_unmask(&enic->intr[ENIC_INTX_WQ_RQ]); 1570 vnic_intr_unmask(&enic->intr[intr]);
1495 } 1571 }
1496 1572
1497 return rq_work_done; 1573 return rq_work_done;
@@ -1499,7 +1575,11 @@ static int enic_poll(struct napi_struct *napi, int budget)
1499 1575
1500static int enic_poll_msix(struct napi_struct *napi, int budget) 1576static int enic_poll_msix(struct napi_struct *napi, int budget)
1501{ 1577{
1502 struct enic *enic = container_of(napi, struct enic, napi); 1578 struct net_device *netdev = napi->dev;
1579 struct enic *enic = netdev_priv(netdev);
1580 unsigned int rq = (napi - &enic->napi[0]);
1581 unsigned int cq = enic_cq_rq(enic, rq);
1582 unsigned int intr = enic_msix_rq_intr(enic, rq);
1503 unsigned int work_to_do = budget; 1583 unsigned int work_to_do = budget;
1504 unsigned int work_done; 1584 unsigned int work_done;
1505 int err; 1585 int err;
@@ -1507,7 +1587,7 @@ static int enic_poll_msix(struct napi_struct *napi, int budget)
1507 /* Service RQ 1587 /* Service RQ
1508 */ 1588 */
1509 1589
1510 work_done = vnic_cq_service(&enic->cq[ENIC_CQ_RQ], 1590 work_done = vnic_cq_service(&enic->cq[cq],
1511 work_to_do, enic_rq_service, NULL); 1591 work_to_do, enic_rq_service, NULL);
1512 1592
1513 /* Return intr event credits for this polling 1593 /* Return intr event credits for this polling
@@ -1516,12 +1596,12 @@ static int enic_poll_msix(struct napi_struct *napi, int budget)
1516 */ 1596 */
1517 1597
1518 if (work_done > 0) 1598 if (work_done > 0)
1519 vnic_intr_return_credits(&enic->intr[ENIC_MSIX_RQ], 1599 vnic_intr_return_credits(&enic->intr[intr],
1520 work_done, 1600 work_done,
1521 0 /* don't unmask intr */, 1601 0 /* don't unmask intr */,
1522 0 /* don't reset intr timer */); 1602 0 /* don't reset intr timer */);
1523 1603
1524 err = vnic_rq_fill(&enic->rq[0], enic->rq_alloc_buf); 1604 err = vnic_rq_fill(&enic->rq[rq], enic->rq_alloc_buf);
1525 1605
1526 /* Buffer allocation failed. Stay in polling mode 1606 /* Buffer allocation failed. Stay in polling mode
1527 * so we can try to fill the ring again. 1607 * so we can try to fill the ring again.
@@ -1537,7 +1617,7 @@ static int enic_poll_msix(struct napi_struct *napi, int budget)
1537 */ 1617 */
1538 1618
1539 napi_complete(napi); 1619 napi_complete(napi);
1540 vnic_intr_unmask(&enic->intr[ENIC_MSIX_RQ]); 1620 vnic_intr_unmask(&enic->intr[intr]);
1541 } 1621 }
1542 1622
1543 return work_done; 1623 return work_done;
@@ -1579,7 +1659,7 @@ static void enic_free_intr(struct enic *enic)
1579static int enic_request_intr(struct enic *enic) 1659static int enic_request_intr(struct enic *enic)
1580{ 1660{
1581 struct net_device *netdev = enic->netdev; 1661 struct net_device *netdev = enic->netdev;
1582 unsigned int i; 1662 unsigned int i, intr;
1583 int err = 0; 1663 int err = 0;
1584 1664
1585 switch (vnic_dev_get_intr_mode(enic->vdev)) { 1665 switch (vnic_dev_get_intr_mode(enic->vdev)) {
@@ -1598,27 +1678,38 @@ static int enic_request_intr(struct enic *enic)
1598 1678
1599 case VNIC_DEV_INTR_MODE_MSIX: 1679 case VNIC_DEV_INTR_MODE_MSIX:
1600 1680
1601 sprintf(enic->msix[ENIC_MSIX_RQ].devname, 1681 for (i = 0; i < enic->rq_count; i++) {
1602 "%.11s-rx-0", netdev->name); 1682 intr = enic_msix_rq_intr(enic, i);
1603 enic->msix[ENIC_MSIX_RQ].isr = enic_isr_msix_rq; 1683 sprintf(enic->msix[intr].devname,
1604 enic->msix[ENIC_MSIX_RQ].devid = enic; 1684 "%.11s-rx-%d", netdev->name, i);
1685 enic->msix[intr].isr = enic_isr_msix_rq;
1686 enic->msix[intr].devid = &enic->napi[i];
1687 }
1605 1688
1606 sprintf(enic->msix[ENIC_MSIX_WQ].devname, 1689 for (i = 0; i < enic->wq_count; i++) {
1607 "%.11s-tx-0", netdev->name); 1690 intr = enic_msix_wq_intr(enic, i);
1608 enic->msix[ENIC_MSIX_WQ].isr = enic_isr_msix_wq; 1691 sprintf(enic->msix[intr].devname,
1609 enic->msix[ENIC_MSIX_WQ].devid = enic; 1692 "%.11s-tx-%d", netdev->name, i);
1693 enic->msix[intr].isr = enic_isr_msix_wq;
1694 enic->msix[intr].devid = enic;
1695 }
1610 1696
1611 sprintf(enic->msix[ENIC_MSIX_ERR].devname, 1697 intr = enic_msix_err_intr(enic);
1698 sprintf(enic->msix[intr].devname,
1612 "%.11s-err", netdev->name); 1699 "%.11s-err", netdev->name);
1613 enic->msix[ENIC_MSIX_ERR].isr = enic_isr_msix_err; 1700 enic->msix[intr].isr = enic_isr_msix_err;
1614 enic->msix[ENIC_MSIX_ERR].devid = enic; 1701 enic->msix[intr].devid = enic;
1615 1702
1616 sprintf(enic->msix[ENIC_MSIX_NOTIFY].devname, 1703 intr = enic_msix_notify_intr(enic);
1704 sprintf(enic->msix[intr].devname,
1617 "%.11s-notify", netdev->name); 1705 "%.11s-notify", netdev->name);
1618 enic->msix[ENIC_MSIX_NOTIFY].isr = enic_isr_msix_notify; 1706 enic->msix[intr].isr = enic_isr_msix_notify;
1619 enic->msix[ENIC_MSIX_NOTIFY].devid = enic; 1707 enic->msix[intr].devid = enic;
1708
1709 for (i = 0; i < ARRAY_SIZE(enic->msix); i++)
1710 enic->msix[i].requested = 0;
1620 1711
1621 for (i = 0; i < ARRAY_SIZE(enic->msix); i++) { 1712 for (i = 0; i < enic->intr_count; i++) {
1622 err = request_irq(enic->msix_entry[i].vector, 1713 err = request_irq(enic->msix_entry[i].vector,
1623 enic->msix[i].isr, 0, 1714 enic->msix[i].isr, 0,
1624 enic->msix[i].devname, 1715 enic->msix[i].devname,
@@ -1664,10 +1755,12 @@ static int enic_dev_notify_set(struct enic *enic)
1664 spin_lock(&enic->devcmd_lock); 1755 spin_lock(&enic->devcmd_lock);
1665 switch (vnic_dev_get_intr_mode(enic->vdev)) { 1756 switch (vnic_dev_get_intr_mode(enic->vdev)) {
1666 case VNIC_DEV_INTR_MODE_INTX: 1757 case VNIC_DEV_INTR_MODE_INTX:
1667 err = vnic_dev_notify_set(enic->vdev, ENIC_INTX_NOTIFY); 1758 err = vnic_dev_notify_set(enic->vdev,
1759 enic_legacy_notify_intr());
1668 break; 1760 break;
1669 case VNIC_DEV_INTR_MODE_MSIX: 1761 case VNIC_DEV_INTR_MODE_MSIX:
1670 err = vnic_dev_notify_set(enic->vdev, ENIC_MSIX_NOTIFY); 1762 err = vnic_dev_notify_set(enic->vdev,
1763 enic_msix_notify_intr(enic));
1671 break; 1764 break;
1672 default: 1765 default:
1673 err = vnic_dev_notify_set(enic->vdev, -1 /* no intr */); 1766 err = vnic_dev_notify_set(enic->vdev, -1 /* no intr */);
@@ -1694,7 +1787,7 @@ static int enic_dev_enable(struct enic *enic)
1694 int err; 1787 int err;
1695 1788
1696 spin_lock(&enic->devcmd_lock); 1789 spin_lock(&enic->devcmd_lock);
1697 err = vnic_dev_enable(enic->vdev); 1790 err = vnic_dev_enable_wait(enic->vdev);
1698 spin_unlock(&enic->devcmd_lock); 1791 spin_unlock(&enic->devcmd_lock);
1699 1792
1700 return err; 1793 return err;
@@ -1762,7 +1855,10 @@ static int enic_open(struct net_device *netdev)
1762 enic_set_multicast_list(netdev); 1855 enic_set_multicast_list(netdev);
1763 1856
1764 netif_wake_queue(netdev); 1857 netif_wake_queue(netdev);
1765 napi_enable(&enic->napi); 1858
1859 for (i = 0; i < enic->rq_count; i++)
1860 napi_enable(&enic->napi[i]);
1861
1766 enic_dev_enable(enic); 1862 enic_dev_enable(enic);
1767 1863
1768 for (i = 0; i < enic->intr_count; i++) 1864 for (i = 0; i < enic->intr_count; i++)
@@ -1797,7 +1893,10 @@ static int enic_stop(struct net_device *netdev)
1797 del_timer_sync(&enic->notify_timer); 1893 del_timer_sync(&enic->notify_timer);
1798 1894
1799 enic_dev_disable(enic); 1895 enic_dev_disable(enic);
1800 napi_disable(&enic->napi); 1896
1897 for (i = 0; i < enic->rq_count; i++)
1898 napi_disable(&enic->napi[i]);
1899
1801 netif_carrier_off(netdev); 1900 netif_carrier_off(netdev);
1802 netif_tx_disable(netdev); 1901 netif_tx_disable(netdev);
1803 enic_dev_del_station_addr(enic); 1902 enic_dev_del_station_addr(enic);
@@ -1857,11 +1956,16 @@ static void enic_poll_controller(struct net_device *netdev)
1857{ 1956{
1858 struct enic *enic = netdev_priv(netdev); 1957 struct enic *enic = netdev_priv(netdev);
1859 struct vnic_dev *vdev = enic->vdev; 1958 struct vnic_dev *vdev = enic->vdev;
1959 unsigned int i, intr;
1860 1960
1861 switch (vnic_dev_get_intr_mode(vdev)) { 1961 switch (vnic_dev_get_intr_mode(vdev)) {
1862 case VNIC_DEV_INTR_MODE_MSIX: 1962 case VNIC_DEV_INTR_MODE_MSIX:
1863 enic_isr_msix_rq(enic->pdev->irq, enic); 1963 for (i = 0; i < enic->rq_count; i++) {
1864 enic_isr_msix_wq(enic->pdev->irq, enic); 1964 intr = enic_msix_rq_intr(enic, i);
1965 enic_isr_msix_rq(enic->msix_entry[intr].vector, enic);
1966 }
1967 intr = enic_msix_wq_intr(enic, i);
1968 enic_isr_msix_wq(enic->msix_entry[intr].vector, enic);
1865 break; 1969 break;
1866 case VNIC_DEV_INTR_MODE_MSI: 1970 case VNIC_DEV_INTR_MODE_MSI:
1867 enic_isr_msi(enic->pdev->irq, enic); 1971 enic_isr_msi(enic->pdev->irq, enic);
@@ -1936,19 +2040,73 @@ static int enic_dev_hang_reset(struct enic *enic)
1936 return err; 2040 return err;
1937} 2041}
1938 2042
1939static int enic_set_niccfg(struct enic *enic) 2043static int enic_set_rsskey(struct enic *enic)
2044{
2045 u64 rss_key_buf_pa;
2046 union vnic_rss_key *rss_key_buf_va = NULL;
2047 union vnic_rss_key rss_key = {
2048 .key[0].b = {85, 67, 83, 97, 119, 101, 115, 111, 109, 101},
2049 .key[1].b = {80, 65, 76, 79, 117, 110, 105, 113, 117, 101},
2050 .key[2].b = {76, 73, 78, 85, 88, 114, 111, 99, 107, 115},
2051 .key[3].b = {69, 78, 73, 67, 105, 115, 99, 111, 111, 108},
2052 };
2053 int err;
2054
2055 rss_key_buf_va = pci_alloc_consistent(enic->pdev,
2056 sizeof(union vnic_rss_key), &rss_key_buf_pa);
2057 if (!rss_key_buf_va)
2058 return -ENOMEM;
2059
2060 memcpy(rss_key_buf_va, &rss_key, sizeof(union vnic_rss_key));
2061
2062 spin_lock(&enic->devcmd_lock);
2063 err = enic_set_rss_key(enic,
2064 rss_key_buf_pa,
2065 sizeof(union vnic_rss_key));
2066 spin_unlock(&enic->devcmd_lock);
2067
2068 pci_free_consistent(enic->pdev, sizeof(union vnic_rss_key),
2069 rss_key_buf_va, rss_key_buf_pa);
2070
2071 return err;
2072}
2073
2074static int enic_set_rsscpu(struct enic *enic, u8 rss_hash_bits)
2075{
2076 u64 rss_cpu_buf_pa;
2077 union vnic_rss_cpu *rss_cpu_buf_va = NULL;
2078 unsigned int i;
2079 int err;
2080
2081 rss_cpu_buf_va = pci_alloc_consistent(enic->pdev,
2082 sizeof(union vnic_rss_cpu), &rss_cpu_buf_pa);
2083 if (!rss_cpu_buf_va)
2084 return -ENOMEM;
2085
2086 for (i = 0; i < (1 << rss_hash_bits); i++)
2087 (*rss_cpu_buf_va).cpu[i/4].b[i%4] = i % enic->rq_count;
2088
2089 spin_lock(&enic->devcmd_lock);
2090 err = enic_set_rss_cpu(enic,
2091 rss_cpu_buf_pa,
2092 sizeof(union vnic_rss_cpu));
2093 spin_unlock(&enic->devcmd_lock);
2094
2095 pci_free_consistent(enic->pdev, sizeof(union vnic_rss_cpu),
2096 rss_cpu_buf_va, rss_cpu_buf_pa);
2097
2098 return err;
2099}
2100
2101static int enic_set_niccfg(struct enic *enic, u8 rss_default_cpu,
2102 u8 rss_hash_type, u8 rss_hash_bits, u8 rss_base_cpu, u8 rss_enable)
1940{ 2103{
1941 const u8 rss_default_cpu = 0;
1942 const u8 rss_hash_type = 0;
1943 const u8 rss_hash_bits = 0;
1944 const u8 rss_base_cpu = 0;
1945 const u8 rss_enable = 0;
1946 const u8 tso_ipid_split_en = 0; 2104 const u8 tso_ipid_split_en = 0;
1947 const u8 ig_vlan_strip_en = 1; 2105 const u8 ig_vlan_strip_en = 1;
1948 int err; 2106 int err;
1949 2107
1950 /* Enable VLAN tag stripping. RSS not enabled (yet). 2108 /* Enable VLAN tag stripping.
1951 */ 2109 */
1952 2110
1953 spin_lock(&enic->devcmd_lock); 2111 spin_lock(&enic->devcmd_lock);
1954 err = enic_set_nic_cfg(enic, 2112 err = enic_set_nic_cfg(enic,
@@ -1961,6 +2119,35 @@ static int enic_set_niccfg(struct enic *enic)
1961 return err; 2119 return err;
1962} 2120}
1963 2121
2122static int enic_set_rss_nic_cfg(struct enic *enic)
2123{
2124 struct device *dev = enic_get_dev(enic);
2125 const u8 rss_default_cpu = 0;
2126 const u8 rss_hash_type = NIC_CFG_RSS_HASH_TYPE_IPV4 |
2127 NIC_CFG_RSS_HASH_TYPE_TCP_IPV4 |
2128 NIC_CFG_RSS_HASH_TYPE_IPV6 |
2129 NIC_CFG_RSS_HASH_TYPE_TCP_IPV6;
2130 const u8 rss_hash_bits = 7;
2131 const u8 rss_base_cpu = 0;
2132 u8 rss_enable = ENIC_SETTING(enic, RSS) && (enic->rq_count > 1);
2133
2134 if (rss_enable) {
2135 if (!enic_set_rsskey(enic)) {
2136 if (enic_set_rsscpu(enic, rss_hash_bits)) {
2137 rss_enable = 0;
2138 dev_warn(dev, "RSS disabled, "
2139 "Failed to set RSS cpu indirection table.");
2140 }
2141 } else {
2142 rss_enable = 0;
2143 dev_warn(dev, "RSS disabled, Failed to set RSS key.\n");
2144 }
2145 }
2146
2147 return enic_set_niccfg(enic, rss_default_cpu, rss_hash_type,
2148 rss_hash_bits, rss_base_cpu, rss_enable);
2149}
2150
1964static int enic_dev_hang_notify(struct enic *enic) 2151static int enic_dev_hang_notify(struct enic *enic)
1965{ 2152{
1966 int err; 2153 int err;
@@ -1972,7 +2159,7 @@ static int enic_dev_hang_notify(struct enic *enic)
1972 return err; 2159 return err;
1973} 2160}
1974 2161
1975int enic_dev_set_ig_vlan_rewrite_mode(struct enic *enic) 2162static int enic_dev_set_ig_vlan_rewrite_mode(struct enic *enic)
1976{ 2163{
1977 int err; 2164 int err;
1978 2165
@@ -1998,7 +2185,7 @@ static void enic_reset(struct work_struct *work)
1998 enic_dev_hang_reset(enic); 2185 enic_dev_hang_reset(enic);
1999 enic_reset_multicast_list(enic); 2186 enic_reset_multicast_list(enic);
2000 enic_init_vnic_resources(enic); 2187 enic_init_vnic_resources(enic);
2001 enic_set_niccfg(enic); 2188 enic_set_rss_nic_cfg(enic);
2002 enic_dev_set_ig_vlan_rewrite_mode(enic); 2189 enic_dev_set_ig_vlan_rewrite_mode(enic);
2003 enic_open(enic->netdev); 2190 enic_open(enic->netdev);
2004 2191
@@ -2007,12 +2194,12 @@ static void enic_reset(struct work_struct *work)
2007 2194
2008static int enic_set_intr_mode(struct enic *enic) 2195static int enic_set_intr_mode(struct enic *enic)
2009{ 2196{
2010 unsigned int n = 1; 2197 unsigned int n = min_t(unsigned int, enic->rq_count, ENIC_RQ_MAX);
2011 unsigned int m = 1; 2198 unsigned int m = 1;
2012 unsigned int i; 2199 unsigned int i;
2013 2200
2014 /* Set interrupt mode (INTx, MSI, MSI-X) depending 2201 /* Set interrupt mode (INTx, MSI, MSI-X) depending
2015 * system capabilities. 2202 * on system capabilities.
2016 * 2203 *
2017 * Try MSI-X first 2204 * Try MSI-X first
2018 * 2205 *
@@ -2025,21 +2212,47 @@ static int enic_set_intr_mode(struct enic *enic)
2025 for (i = 0; i < n + m + 2; i++) 2212 for (i = 0; i < n + m + 2; i++)
2026 enic->msix_entry[i].entry = i; 2213 enic->msix_entry[i].entry = i;
2027 2214
2028 if (enic->config.intr_mode < 1 && 2215 /* Use multiple RQs if RSS is enabled
2216 */
2217
2218 if (ENIC_SETTING(enic, RSS) &&
2219 enic->config.intr_mode < 1 &&
2029 enic->rq_count >= n && 2220 enic->rq_count >= n &&
2030 enic->wq_count >= m && 2221 enic->wq_count >= m &&
2031 enic->cq_count >= n + m && 2222 enic->cq_count >= n + m &&
2032 enic->intr_count >= n + m + 2 && 2223 enic->intr_count >= n + m + 2) {
2033 !pci_enable_msix(enic->pdev, enic->msix_entry, n + m + 2)) {
2034 2224
2035 enic->rq_count = n; 2225 if (!pci_enable_msix(enic->pdev, enic->msix_entry, n + m + 2)) {
2036 enic->wq_count = m;
2037 enic->cq_count = n + m;
2038 enic->intr_count = n + m + 2;
2039 2226
2040 vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_MSIX); 2227 enic->rq_count = n;
2228 enic->wq_count = m;
2229 enic->cq_count = n + m;
2230 enic->intr_count = n + m + 2;
2041 2231
2042 return 0; 2232 vnic_dev_set_intr_mode(enic->vdev,
2233 VNIC_DEV_INTR_MODE_MSIX);
2234
2235 return 0;
2236 }
2237 }
2238
2239 if (enic->config.intr_mode < 1 &&
2240 enic->rq_count >= 1 &&
2241 enic->wq_count >= m &&
2242 enic->cq_count >= 1 + m &&
2243 enic->intr_count >= 1 + m + 2) {
2244 if (!pci_enable_msix(enic->pdev, enic->msix_entry, 1 + m + 2)) {
2245
2246 enic->rq_count = 1;
2247 enic->wq_count = m;
2248 enic->cq_count = 1 + m;
2249 enic->intr_count = 1 + m + 2;
2250
2251 vnic_dev_set_intr_mode(enic->vdev,
2252 VNIC_DEV_INTR_MODE_MSIX);
2253
2254 return 0;
2255 }
2043 } 2256 }
2044 2257
2045 /* Next try MSI 2258 /* Next try MSI
@@ -2147,17 +2360,22 @@ static const struct net_device_ops enic_netdev_ops = {
2147#endif 2360#endif
2148}; 2361};
2149 2362
2150void enic_dev_deinit(struct enic *enic) 2363static void enic_dev_deinit(struct enic *enic)
2151{ 2364{
2152 netif_napi_del(&enic->napi); 2365 unsigned int i;
2366
2367 for (i = 0; i < enic->rq_count; i++)
2368 netif_napi_del(&enic->napi[i]);
2369
2153 enic_free_vnic_resources(enic); 2370 enic_free_vnic_resources(enic);
2154 enic_clear_intr_mode(enic); 2371 enic_clear_intr_mode(enic);
2155} 2372}
2156 2373
2157int enic_dev_init(struct enic *enic) 2374static int enic_dev_init(struct enic *enic)
2158{ 2375{
2159 struct device *dev = enic_get_dev(enic); 2376 struct device *dev = enic_get_dev(enic);
2160 struct net_device *netdev = enic->netdev; 2377 struct net_device *netdev = enic->netdev;
2378 unsigned int i;
2161 int err; 2379 int err;
2162 2380
2163 /* Get vNIC configuration 2381 /* Get vNIC configuration
@@ -2202,7 +2420,7 @@ int enic_dev_init(struct enic *enic)
2202 goto err_out_free_vnic_resources; 2420 goto err_out_free_vnic_resources;
2203 } 2421 }
2204 2422
2205 err = enic_set_niccfg(enic); 2423 err = enic_set_rss_nic_cfg(enic);
2206 if (err) { 2424 if (err) {
2207 dev_err(dev, "Failed to config nic, aborting\n"); 2425 dev_err(dev, "Failed to config nic, aborting\n");
2208 goto err_out_free_vnic_resources; 2426 goto err_out_free_vnic_resources;
@@ -2210,17 +2428,19 @@ int enic_dev_init(struct enic *enic)
2210 2428
2211 err = enic_dev_set_ig_vlan_rewrite_mode(enic); 2429 err = enic_dev_set_ig_vlan_rewrite_mode(enic);
2212 if (err) { 2430 if (err) {
2213 netdev_err(netdev, 2431 dev_err(dev,
2214 "Failed to set ingress vlan rewrite mode, aborting.\n"); 2432 "Failed to set ingress vlan rewrite mode, aborting.\n");
2215 goto err_out_free_vnic_resources; 2433 goto err_out_free_vnic_resources;
2216 } 2434 }
2217 2435
2218 switch (vnic_dev_get_intr_mode(enic->vdev)) { 2436 switch (vnic_dev_get_intr_mode(enic->vdev)) {
2219 default: 2437 default:
2220 netif_napi_add(netdev, &enic->napi, enic_poll, 64); 2438 netif_napi_add(netdev, &enic->napi[0], enic_poll, 64);
2221 break; 2439 break;
2222 case VNIC_DEV_INTR_MODE_MSIX: 2440 case VNIC_DEV_INTR_MODE_MSIX:
2223 netif_napi_add(netdev, &enic->napi, enic_poll_msix, 64); 2441 for (i = 0; i < enic->rq_count; i++)
2442 netif_napi_add(netdev, &enic->napi[i],
2443 enic_poll_msix, 64);
2224 break; 2444 break;
2225 } 2445 }
2226 2446
diff --git a/drivers/net/enic/enic_res.c b/drivers/net/enic/enic_res.c
index 29ede8a17a2c..f111a37419ce 100644
--- a/drivers/net/enic/enic_res.c
+++ b/drivers/net/enic/enic_res.c
@@ -94,13 +94,14 @@ int enic_get_vnic_config(struct enic *enic)
94 INTR_COALESCE_HW_TO_USEC(VNIC_INTR_TIMER_MAX), 94 INTR_COALESCE_HW_TO_USEC(VNIC_INTR_TIMER_MAX),
95 c->intr_timer_usec); 95 c->intr_timer_usec);
96 96
97 dev_info(enic_get_dev(enic), "vNIC MAC addr %pM wq/rq %d/%d\n", 97 dev_info(enic_get_dev(enic),
98 enic->mac_addr, c->wq_desc_count, c->rq_desc_count); 98 "vNIC MAC addr %pM wq/rq %d/%d mtu %d\n",
99 dev_info(enic_get_dev(enic), "vNIC mtu %d csum tx/rx %d/%d " 99 enic->mac_addr, c->wq_desc_count, c->rq_desc_count, c->mtu);
100 "tso/lro %d/%d intr timer %d usec\n", 100 dev_info(enic_get_dev(enic), "vNIC csum tx/rx %d/%d "
101 c->mtu, ENIC_SETTING(enic, TXCSUM), 101 "tso/lro %d/%d intr timer %d usec rss %d\n",
102 ENIC_SETTING(enic, RXCSUM), ENIC_SETTING(enic, TSO), 102 ENIC_SETTING(enic, TXCSUM), ENIC_SETTING(enic, RXCSUM),
103 ENIC_SETTING(enic, LRO), c->intr_timer_usec); 103 ENIC_SETTING(enic, TSO), ENIC_SETTING(enic, LRO),
104 c->intr_timer_usec, ENIC_SETTING(enic, RSS));
104 105
105 return 0; 106 return 0;
106} 107}
@@ -181,18 +182,11 @@ void enic_free_vnic_resources(struct enic *enic)
181 182
182void enic_get_res_counts(struct enic *enic) 183void enic_get_res_counts(struct enic *enic)
183{ 184{
184 enic->wq_count = min_t(int, 185 enic->wq_count = vnic_dev_get_res_count(enic->vdev, RES_TYPE_WQ);
185 vnic_dev_get_res_count(enic->vdev, RES_TYPE_WQ), 186 enic->rq_count = vnic_dev_get_res_count(enic->vdev, RES_TYPE_RQ);
186 ENIC_WQ_MAX); 187 enic->cq_count = vnic_dev_get_res_count(enic->vdev, RES_TYPE_CQ);
187 enic->rq_count = min_t(int, 188 enic->intr_count = vnic_dev_get_res_count(enic->vdev,
188 vnic_dev_get_res_count(enic->vdev, RES_TYPE_RQ), 189 RES_TYPE_INTR_CTRL);
189 ENIC_RQ_MAX);
190 enic->cq_count = min_t(int,
191 vnic_dev_get_res_count(enic->vdev, RES_TYPE_CQ),
192 ENIC_CQ_MAX);
193 enic->intr_count = min_t(int,
194 vnic_dev_get_res_count(enic->vdev, RES_TYPE_INTR_CTRL),
195 ENIC_INTR_MAX);
196 190
197 dev_info(enic_get_dev(enic), 191 dev_info(enic_get_dev(enic),
198 "vNIC resources avail: wq %d rq %d cq %d intr %d\n", 192 "vNIC resources avail: wq %d rq %d cq %d intr %d\n",
diff --git a/drivers/net/enic/enic_res.h b/drivers/net/enic/enic_res.h
index 83bd172c356c..9a103d9ef9e2 100644
--- a/drivers/net/enic/enic_res.h
+++ b/drivers/net/enic/enic_res.h
@@ -30,7 +30,7 @@
30#define ENIC_MIN_RQ_DESCS 64 30#define ENIC_MIN_RQ_DESCS 64
31#define ENIC_MAX_RQ_DESCS 4096 31#define ENIC_MAX_RQ_DESCS 4096
32 32
33#define ENIC_MIN_MTU 576 /* minimum for IPv4 */ 33#define ENIC_MIN_MTU 68
34#define ENIC_MAX_MTU 9000 34#define ENIC_MAX_MTU 9000
35 35
36#define ENIC_MULTICAST_PERFECT_FILTERS 32 36#define ENIC_MULTICAST_PERFECT_FILTERS 32
diff --git a/drivers/net/enic/vnic_dev.c b/drivers/net/enic/vnic_dev.c
index 08d5d42da260..fb35d8b17668 100644
--- a/drivers/net/enic/vnic_dev.c
+++ b/drivers/net/enic/vnic_dev.c
@@ -186,22 +186,7 @@ void __iomem *vnic_dev_get_res(struct vnic_dev *vdev, enum vnic_res_type type,
186 } 186 }
187} 187}
188 188
189dma_addr_t vnic_dev_get_res_bus_addr(struct vnic_dev *vdev, 189static unsigned int vnic_dev_desc_ring_size(struct vnic_dev_ring *ring,
190 enum vnic_res_type type, unsigned int index)
191{
192 switch (type) {
193 case RES_TYPE_WQ:
194 case RES_TYPE_RQ:
195 case RES_TYPE_CQ:
196 case RES_TYPE_INTR_CTRL:
197 return vdev->res[type].bus_addr +
198 index * VNIC_RES_STRIDE;
199 default:
200 return vdev->res[type].bus_addr;
201 }
202}
203
204unsigned int vnic_dev_desc_ring_size(struct vnic_dev_ring *ring,
205 unsigned int desc_count, unsigned int desc_size) 190 unsigned int desc_count, unsigned int desc_size)
206{ 191{
207 /* The base address of the desc rings must be 512 byte aligned. 192 /* The base address of the desc rings must be 512 byte aligned.
@@ -384,18 +369,6 @@ static int vnic_dev_cmd_no_proxy(struct vnic_dev *vdev,
384 return err; 369 return err;
385} 370}
386 371
387void vnic_dev_cmd_proxy_by_bdf_start(struct vnic_dev *vdev, u16 bdf)
388{
389 vdev->proxy = PROXY_BY_BDF;
390 vdev->proxy_index = bdf;
391}
392
393void vnic_dev_cmd_proxy_end(struct vnic_dev *vdev)
394{
395 vdev->proxy = PROXY_NONE;
396 vdev->proxy_index = 0;
397}
398
399int vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd, 372int vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
400 u64 *a0, u64 *a1, int wait) 373 u64 *a0, u64 *a1, int wait)
401{ 374{
@@ -488,13 +461,6 @@ int vnic_dev_spec(struct vnic_dev *vdev, unsigned int offset, unsigned int size,
488 return err; 461 return err;
489} 462}
490 463
491int vnic_dev_stats_clear(struct vnic_dev *vdev)
492{
493 u64 a0 = 0, a1 = 0;
494 int wait = 1000;
495 return vnic_dev_cmd(vdev, CMD_STATS_CLEAR, &a0, &a1, wait);
496}
497
498int vnic_dev_stats_dump(struct vnic_dev *vdev, struct vnic_stats **stats) 464int vnic_dev_stats_dump(struct vnic_dev *vdev, struct vnic_stats **stats)
499{ 465{
500 u64 a0, a1; 466 u64 a0, a1;
@@ -521,13 +487,6 @@ int vnic_dev_close(struct vnic_dev *vdev)
521 return vnic_dev_cmd(vdev, CMD_CLOSE, &a0, &a1, wait); 487 return vnic_dev_cmd(vdev, CMD_CLOSE, &a0, &a1, wait);
522} 488}
523 489
524int vnic_dev_enable(struct vnic_dev *vdev)
525{
526 u64 a0 = 0, a1 = 0;
527 int wait = 1000;
528 return vnic_dev_cmd(vdev, CMD_ENABLE, &a0, &a1, wait);
529}
530
531int vnic_dev_enable_wait(struct vnic_dev *vdev) 490int vnic_dev_enable_wait(struct vnic_dev *vdev)
532{ 491{
533 u64 a0 = 0, a1 = 0; 492 u64 a0 = 0, a1 = 0;
@@ -572,14 +531,14 @@ int vnic_dev_open_done(struct vnic_dev *vdev, int *done)
572 return 0; 531 return 0;
573} 532}
574 533
575int vnic_dev_soft_reset(struct vnic_dev *vdev, int arg) 534static int vnic_dev_soft_reset(struct vnic_dev *vdev, int arg)
576{ 535{
577 u64 a0 = (u32)arg, a1 = 0; 536 u64 a0 = (u32)arg, a1 = 0;
578 int wait = 1000; 537 int wait = 1000;
579 return vnic_dev_cmd(vdev, CMD_SOFT_RESET, &a0, &a1, wait); 538 return vnic_dev_cmd(vdev, CMD_SOFT_RESET, &a0, &a1, wait);
580} 539}
581 540
582int vnic_dev_soft_reset_done(struct vnic_dev *vdev, int *done) 541static int vnic_dev_soft_reset_done(struct vnic_dev *vdev, int *done)
583{ 542{
584 u64 a0 = 0, a1 = 0; 543 u64 a0 = 0, a1 = 0;
585 int wait = 1000; 544 int wait = 1000;
@@ -680,26 +639,6 @@ int vnic_dev_packet_filter(struct vnic_dev *vdev, int directed, int multicast,
680 return err; 639 return err;
681} 640}
682 641
683int vnic_dev_packet_filter_all(struct vnic_dev *vdev, int directed,
684 int multicast, int broadcast, int promisc, int allmulti)
685{
686 u64 a0, a1 = 0;
687 int wait = 1000;
688 int err;
689
690 a0 = (directed ? CMD_PFILTER_DIRECTED : 0) |
691 (multicast ? CMD_PFILTER_MULTICAST : 0) |
692 (broadcast ? CMD_PFILTER_BROADCAST : 0) |
693 (promisc ? CMD_PFILTER_PROMISCUOUS : 0) |
694 (allmulti ? CMD_PFILTER_ALL_MULTICAST : 0);
695
696 err = vnic_dev_cmd(vdev, CMD_PACKET_FILTER_ALL, &a0, &a1, wait);
697 if (err)
698 pr_err("Can't set packet filter\n");
699
700 return err;
701}
702
703int vnic_dev_add_addr(struct vnic_dev *vdev, u8 *addr) 642int vnic_dev_add_addr(struct vnic_dev *vdev, u8 *addr)
704{ 643{
705 u64 a0 = 0, a1 = 0; 644 u64 a0 = 0, a1 = 0;
@@ -748,20 +687,7 @@ int vnic_dev_set_ig_vlan_rewrite_mode(struct vnic_dev *vdev,
748 return err; 687 return err;
749} 688}
750 689
751int vnic_dev_raise_intr(struct vnic_dev *vdev, u16 intr) 690static int vnic_dev_notify_setcmd(struct vnic_dev *vdev,
752{
753 u64 a0 = intr, a1 = 0;
754 int wait = 1000;
755 int err;
756
757 err = vnic_dev_cmd(vdev, CMD_IAR, &a0, &a1, wait);
758 if (err)
759 pr_err("Failed to raise INTR[%d], err %d\n", intr, err);
760
761 return err;
762}
763
764int vnic_dev_notify_setcmd(struct vnic_dev *vdev,
765 void *notify_addr, dma_addr_t notify_pa, u16 intr) 691 void *notify_addr, dma_addr_t notify_pa, u16 intr)
766{ 692{
767 u64 a0, a1; 693 u64 a0, a1;
@@ -800,7 +726,7 @@ int vnic_dev_notify_set(struct vnic_dev *vdev, u16 intr)
800 return vnic_dev_notify_setcmd(vdev, notify_addr, notify_pa, intr); 726 return vnic_dev_notify_setcmd(vdev, notify_addr, notify_pa, intr);
801} 727}
802 728
803int vnic_dev_notify_unsetcmd(struct vnic_dev *vdev) 729static int vnic_dev_notify_unsetcmd(struct vnic_dev *vdev)
804{ 730{
805 u64 a0, a1; 731 u64 a0, a1;
806 int wait = 1000; 732 int wait = 1000;
@@ -954,30 +880,6 @@ u32 vnic_dev_mtu(struct vnic_dev *vdev)
954 return vdev->notify_copy.mtu; 880 return vdev->notify_copy.mtu;
955} 881}
956 882
957u32 vnic_dev_link_down_cnt(struct vnic_dev *vdev)
958{
959 if (!vnic_dev_notify_ready(vdev))
960 return 0;
961
962 return vdev->notify_copy.link_down_cnt;
963}
964
965u32 vnic_dev_notify_status(struct vnic_dev *vdev)
966{
967 if (!vnic_dev_notify_ready(vdev))
968 return 0;
969
970 return vdev->notify_copy.status;
971}
972
973u32 vnic_dev_uif(struct vnic_dev *vdev)
974{
975 if (!vnic_dev_notify_ready(vdev))
976 return 0;
977
978 return vdev->notify_copy.uif;
979}
980
981void vnic_dev_set_intr_mode(struct vnic_dev *vdev, 883void vnic_dev_set_intr_mode(struct vnic_dev *vdev,
982 enum vnic_dev_intr_mode intr_mode) 884 enum vnic_dev_intr_mode intr_mode)
983{ 885{
diff --git a/drivers/net/enic/vnic_dev.h b/drivers/net/enic/vnic_dev.h
index 3a61873138b6..05f9a24cd459 100644
--- a/drivers/net/enic/vnic_dev.h
+++ b/drivers/net/enic/vnic_dev.h
@@ -84,10 +84,6 @@ unsigned int vnic_dev_get_res_count(struct vnic_dev *vdev,
84 enum vnic_res_type type); 84 enum vnic_res_type type);
85void __iomem *vnic_dev_get_res(struct vnic_dev *vdev, enum vnic_res_type type, 85void __iomem *vnic_dev_get_res(struct vnic_dev *vdev, enum vnic_res_type type,
86 unsigned int index); 86 unsigned int index);
87dma_addr_t vnic_dev_get_res_bus_addr(struct vnic_dev *vdev,
88 enum vnic_res_type type, unsigned int index);
89unsigned int vnic_dev_desc_ring_size(struct vnic_dev_ring *ring,
90 unsigned int desc_count, unsigned int desc_size);
91void vnic_dev_clear_desc_ring(struct vnic_dev_ring *ring); 87void vnic_dev_clear_desc_ring(struct vnic_dev_ring *ring);
92int vnic_dev_alloc_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring, 88int vnic_dev_alloc_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring,
93 unsigned int desc_count, unsigned int desc_size); 89 unsigned int desc_count, unsigned int desc_size);
@@ -95,39 +91,26 @@ void vnic_dev_free_desc_ring(struct vnic_dev *vdev,
95 struct vnic_dev_ring *ring); 91 struct vnic_dev_ring *ring);
96int vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd, 92int vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
97 u64 *a0, u64 *a1, int wait); 93 u64 *a0, u64 *a1, int wait);
98void vnic_dev_cmd_proxy_by_bdf_start(struct vnic_dev *vdev, u16 bdf);
99void vnic_dev_cmd_proxy_end(struct vnic_dev *vdev);
100int vnic_dev_fw_info(struct vnic_dev *vdev, 94int vnic_dev_fw_info(struct vnic_dev *vdev,
101 struct vnic_devcmd_fw_info **fw_info); 95 struct vnic_devcmd_fw_info **fw_info);
102int vnic_dev_hw_version(struct vnic_dev *vdev, 96int vnic_dev_hw_version(struct vnic_dev *vdev,
103 enum vnic_dev_hw_version *hw_ver); 97 enum vnic_dev_hw_version *hw_ver);
104int vnic_dev_spec(struct vnic_dev *vdev, unsigned int offset, unsigned int size, 98int vnic_dev_spec(struct vnic_dev *vdev, unsigned int offset, unsigned int size,
105 void *value); 99 void *value);
106int vnic_dev_stats_clear(struct vnic_dev *vdev);
107int vnic_dev_stats_dump(struct vnic_dev *vdev, struct vnic_stats **stats); 100int vnic_dev_stats_dump(struct vnic_dev *vdev, struct vnic_stats **stats);
108int vnic_dev_hang_notify(struct vnic_dev *vdev); 101int vnic_dev_hang_notify(struct vnic_dev *vdev);
109int vnic_dev_packet_filter(struct vnic_dev *vdev, int directed, int multicast, 102int vnic_dev_packet_filter(struct vnic_dev *vdev, int directed, int multicast,
110 int broadcast, int promisc, int allmulti); 103 int broadcast, int promisc, int allmulti);
111int vnic_dev_packet_filter_all(struct vnic_dev *vdev, int directed,
112 int multicast, int broadcast, int promisc, int allmulti);
113int vnic_dev_add_addr(struct vnic_dev *vdev, u8 *addr); 104int vnic_dev_add_addr(struct vnic_dev *vdev, u8 *addr);
114int vnic_dev_del_addr(struct vnic_dev *vdev, u8 *addr); 105int vnic_dev_del_addr(struct vnic_dev *vdev, u8 *addr);
115int vnic_dev_mac_addr(struct vnic_dev *vdev, u8 *mac_addr); 106int vnic_dev_mac_addr(struct vnic_dev *vdev, u8 *mac_addr);
116int vnic_dev_raise_intr(struct vnic_dev *vdev, u16 intr);
117int vnic_dev_notify_setcmd(struct vnic_dev *vdev,
118 void *notify_addr, dma_addr_t notify_pa, u16 intr);
119int vnic_dev_notify_set(struct vnic_dev *vdev, u16 intr); 107int vnic_dev_notify_set(struct vnic_dev *vdev, u16 intr);
120int vnic_dev_notify_unsetcmd(struct vnic_dev *vdev);
121int vnic_dev_notify_unset(struct vnic_dev *vdev); 108int vnic_dev_notify_unset(struct vnic_dev *vdev);
122int vnic_dev_link_status(struct vnic_dev *vdev); 109int vnic_dev_link_status(struct vnic_dev *vdev);
123u32 vnic_dev_port_speed(struct vnic_dev *vdev); 110u32 vnic_dev_port_speed(struct vnic_dev *vdev);
124u32 vnic_dev_msg_lvl(struct vnic_dev *vdev); 111u32 vnic_dev_msg_lvl(struct vnic_dev *vdev);
125u32 vnic_dev_mtu(struct vnic_dev *vdev); 112u32 vnic_dev_mtu(struct vnic_dev *vdev);
126u32 vnic_dev_link_down_cnt(struct vnic_dev *vdev);
127u32 vnic_dev_notify_status(struct vnic_dev *vdev);
128u32 vnic_dev_uif(struct vnic_dev *vdev);
129int vnic_dev_close(struct vnic_dev *vdev); 113int vnic_dev_close(struct vnic_dev *vdev);
130int vnic_dev_enable(struct vnic_dev *vdev);
131int vnic_dev_enable_wait(struct vnic_dev *vdev); 114int vnic_dev_enable_wait(struct vnic_dev *vdev);
132int vnic_dev_disable(struct vnic_dev *vdev); 115int vnic_dev_disable(struct vnic_dev *vdev);
133int vnic_dev_open(struct vnic_dev *vdev, int arg); 116int vnic_dev_open(struct vnic_dev *vdev, int arg);
@@ -136,8 +119,6 @@ int vnic_dev_init(struct vnic_dev *vdev, int arg);
136int vnic_dev_init_done(struct vnic_dev *vdev, int *done, int *err); 119int vnic_dev_init_done(struct vnic_dev *vdev, int *done, int *err);
137int vnic_dev_init_prov(struct vnic_dev *vdev, u8 *buf, u32 len); 120int vnic_dev_init_prov(struct vnic_dev *vdev, u8 *buf, u32 len);
138int vnic_dev_deinit(struct vnic_dev *vdev); 121int vnic_dev_deinit(struct vnic_dev *vdev);
139int vnic_dev_soft_reset(struct vnic_dev *vdev, int arg);
140int vnic_dev_soft_reset_done(struct vnic_dev *vdev, int *done);
141int vnic_dev_hang_reset(struct vnic_dev *vdev, int arg); 122int vnic_dev_hang_reset(struct vnic_dev *vdev, int arg);
142int vnic_dev_hang_reset_done(struct vnic_dev *vdev, int *done); 123int vnic_dev_hang_reset_done(struct vnic_dev *vdev, int *done);
143void vnic_dev_set_intr_mode(struct vnic_dev *vdev, 124void vnic_dev_set_intr_mode(struct vnic_dev *vdev,
diff --git a/drivers/net/enic/vnic_intr.c b/drivers/net/enic/vnic_intr.c
index 52ab61af2750..3873771d75cc 100644
--- a/drivers/net/enic/vnic_intr.c
+++ b/drivers/net/enic/vnic_intr.c
@@ -65,8 +65,3 @@ void vnic_intr_clean(struct vnic_intr *intr)
65{ 65{
66 iowrite32(0, &intr->ctrl->int_credits); 66 iowrite32(0, &intr->ctrl->int_credits);
67} 67}
68
69void vnic_intr_raise(struct vnic_intr *intr)
70{
71 vnic_dev_raise_intr(intr->vdev, (u16)intr->index);
72}
diff --git a/drivers/net/enic/vnic_rq.c b/drivers/net/enic/vnic_rq.c
index b236d7cbc137..34105e0951a5 100644
--- a/drivers/net/enic/vnic_rq.c
+++ b/drivers/net/enic/vnic_rq.c
@@ -115,7 +115,7 @@ int vnic_rq_alloc(struct vnic_dev *vdev, struct vnic_rq *rq, unsigned int index,
115 return 0; 115 return 0;
116} 116}
117 117
118void vnic_rq_init_start(struct vnic_rq *rq, unsigned int cq_index, 118static void vnic_rq_init_start(struct vnic_rq *rq, unsigned int cq_index,
119 unsigned int fetch_index, unsigned int posted_index, 119 unsigned int fetch_index, unsigned int posted_index,
120 unsigned int error_interrupt_enable, 120 unsigned int error_interrupt_enable,
121 unsigned int error_interrupt_offset) 121 unsigned int error_interrupt_offset)
diff --git a/drivers/net/enic/vnic_rq.h b/drivers/net/enic/vnic_rq.h
index 2dc48f91abf7..37f08de2454a 100644
--- a/drivers/net/enic/vnic_rq.h
+++ b/drivers/net/enic/vnic_rq.h
@@ -143,7 +143,7 @@ static inline void vnic_rq_post(struct vnic_rq *rq,
143 143
144static inline int vnic_rq_posting_soon(struct vnic_rq *rq) 144static inline int vnic_rq_posting_soon(struct vnic_rq *rq)
145{ 145{
146 return ((rq->to_use->index & VNIC_RQ_RETURN_RATE) == 0); 146 return (rq->to_use->index & VNIC_RQ_RETURN_RATE) == 0;
147} 147}
148 148
149static inline void vnic_rq_return_descs(struct vnic_rq *rq, unsigned int count) 149static inline void vnic_rq_return_descs(struct vnic_rq *rq, unsigned int count)
@@ -202,10 +202,6 @@ static inline int vnic_rq_fill(struct vnic_rq *rq,
202void vnic_rq_free(struct vnic_rq *rq); 202void vnic_rq_free(struct vnic_rq *rq);
203int vnic_rq_alloc(struct vnic_dev *vdev, struct vnic_rq *rq, unsigned int index, 203int vnic_rq_alloc(struct vnic_dev *vdev, struct vnic_rq *rq, unsigned int index,
204 unsigned int desc_count, unsigned int desc_size); 204 unsigned int desc_count, unsigned int desc_size);
205void vnic_rq_init_start(struct vnic_rq *rq, unsigned int cq_index,
206 unsigned int fetch_index, unsigned int posted_index,
207 unsigned int error_interrupt_enable,
208 unsigned int error_interrupt_offset);
209void vnic_rq_init(struct vnic_rq *rq, unsigned int cq_index, 205void vnic_rq_init(struct vnic_rq *rq, unsigned int cq_index,
210 unsigned int error_interrupt_enable, 206 unsigned int error_interrupt_enable,
211 unsigned int error_interrupt_offset); 207 unsigned int error_interrupt_offset);
diff --git a/drivers/net/enic/vnic_rss.h b/drivers/net/enic/vnic_rss.h
index f62d18719629..fa421baf45b8 100644
--- a/drivers/net/enic/vnic_rss.h
+++ b/drivers/net/enic/vnic_rss.h
@@ -37,9 +37,4 @@ union vnic_rss_cpu {
37 u64 raw[32]; 37 u64 raw[32];
38}; 38};
39 39
40void vnic_set_rss_key(union vnic_rss_key *rss_key, u8 *key);
41void vnic_set_rss_cpu(union vnic_rss_cpu *rss_cpu, u8 *cpu);
42void vnic_get_rss_key(union vnic_rss_key *rss_key, u8 *key);
43void vnic_get_rss_cpu(union vnic_rss_cpu *rss_cpu, u8 *cpu);
44
45#endif /* _VNIC_RSS_H_ */ 40#endif /* _VNIC_RSS_H_ */
diff --git a/drivers/net/enic/vnic_wq.c b/drivers/net/enic/vnic_wq.c
index 4b2a6c6a569b..df61bd932ea6 100644
--- a/drivers/net/enic/vnic_wq.c
+++ b/drivers/net/enic/vnic_wq.c
@@ -115,7 +115,7 @@ int vnic_wq_alloc(struct vnic_dev *vdev, struct vnic_wq *wq, unsigned int index,
115 return 0; 115 return 0;
116} 116}
117 117
118void vnic_wq_init_start(struct vnic_wq *wq, unsigned int cq_index, 118static void vnic_wq_init_start(struct vnic_wq *wq, unsigned int cq_index,
119 unsigned int fetch_index, unsigned int posted_index, 119 unsigned int fetch_index, unsigned int posted_index,
120 unsigned int error_interrupt_enable, 120 unsigned int error_interrupt_enable,
121 unsigned int error_interrupt_offset) 121 unsigned int error_interrupt_offset)
diff --git a/drivers/net/enic/vnic_wq.h b/drivers/net/enic/vnic_wq.h
index 94ac4621acc5..7dd937ac11c2 100644
--- a/drivers/net/enic/vnic_wq.h
+++ b/drivers/net/enic/vnic_wq.h
@@ -153,10 +153,6 @@ static inline void vnic_wq_service(struct vnic_wq *wq,
153void vnic_wq_free(struct vnic_wq *wq); 153void vnic_wq_free(struct vnic_wq *wq);
154int vnic_wq_alloc(struct vnic_dev *vdev, struct vnic_wq *wq, unsigned int index, 154int vnic_wq_alloc(struct vnic_dev *vdev, struct vnic_wq *wq, unsigned int index,
155 unsigned int desc_count, unsigned int desc_size); 155 unsigned int desc_count, unsigned int desc_size);
156void vnic_wq_init_start(struct vnic_wq *wq, unsigned int cq_index,
157 unsigned int fetch_index, unsigned int posted_index,
158 unsigned int error_interrupt_enable,
159 unsigned int error_interrupt_offset);
160void vnic_wq_init(struct vnic_wq *wq, unsigned int cq_index, 156void vnic_wq_init(struct vnic_wq *wq, unsigned int cq_index,
161 unsigned int error_interrupt_enable, 157 unsigned int error_interrupt_enable,
162 unsigned int error_interrupt_offset); 158 unsigned int error_interrupt_offset);
diff --git a/drivers/net/eql.c b/drivers/net/eql.c
index dda2c7944da9..0cb1cf9cf4b0 100644
--- a/drivers/net/eql.c
+++ b/drivers/net/eql.c
@@ -555,6 +555,8 @@ static int eql_g_master_cfg(struct net_device *dev, master_config_t __user *mcp)
555 equalizer_t *eql; 555 equalizer_t *eql;
556 master_config_t mc; 556 master_config_t mc;
557 557
558 memset(&mc, 0, sizeof(master_config_t));
559
558 if (eql_is_master(dev)) { 560 if (eql_is_master(dev)) {
559 eql = netdev_priv(dev); 561 eql = netdev_priv(dev);
560 mc.max_slaves = eql->max_slaves; 562 mc.max_slaves = eql->max_slaves;
diff --git a/drivers/net/eth16i.c b/drivers/net/eth16i.c
index 10e39f2b31c3..fb717be511f6 100644
--- a/drivers/net/eth16i.c
+++ b/drivers/net/eth16i.c
@@ -637,7 +637,9 @@ static void eth16i_initialize(struct net_device *dev, int boot)
637 637
638 /* Set interface port type */ 638 /* Set interface port type */
639 if(boot) { 639 if(boot) {
640 char *porttype[] = {"BNC", "DIX", "TP", "AUTO", "FROM_EPROM" }; 640 static const char * const porttype[] = {
641 "BNC", "DIX", "TP", "AUTO", "FROM_EPROM"
642 };
641 643
642 switch(dev->if_port) 644 switch(dev->if_port)
643 { 645 {
@@ -794,7 +796,7 @@ static int eth16i_receive_probe_packet(int ioaddr)
794 796
795 if(eth16i_debug > 1) 797 if(eth16i_debug > 1)
796 printk(KERN_DEBUG "RECEIVE_PACKET\n"); 798 printk(KERN_DEBUG "RECEIVE_PACKET\n");
797 return(0); /* Found receive packet */ 799 return 0; /* Found receive packet */
798 } 800 }
799 } 801 }
800 802
@@ -803,7 +805,7 @@ static int eth16i_receive_probe_packet(int ioaddr)
803 printk(KERN_DEBUG "RX_STATUS_REG = %x\n", inb(ioaddr + RX_STATUS_REG)); 805 printk(KERN_DEBUG "RX_STATUS_REG = %x\n", inb(ioaddr + RX_STATUS_REG));
804 } 806 }
805 807
806 return(0); /* Return success */ 808 return 0; /* Return success */
807} 809}
808 810
809#if 0 811#if 0
@@ -839,7 +841,7 @@ static int __init eth16i_get_irq(int ioaddr)
839 841
840 if( ioaddr < 0x1000) { 842 if( ioaddr < 0x1000) {
841 cbyte = inb(ioaddr + JUMPERLESS_CONFIG); 843 cbyte = inb(ioaddr + JUMPERLESS_CONFIG);
842 return( eth16i_irqmap[ ((cbyte & 0xC0) >> 6) ] ); 844 return eth16i_irqmap[((cbyte & 0xC0) >> 6)];
843 } else { /* Oh..the card is EISA so method getting IRQ different */ 845 } else { /* Oh..the card is EISA so method getting IRQ different */
844 unsigned short index = 0; 846 unsigned short index = 0;
845 cbyte = inb(ioaddr + EISA_IRQ_REG); 847 cbyte = inb(ioaddr + EISA_IRQ_REG);
@@ -847,7 +849,7 @@ static int __init eth16i_get_irq(int ioaddr)
847 cbyte = cbyte >> 1; 849 cbyte = cbyte >> 1;
848 index++; 850 index++;
849 } 851 }
850 return( eth32i_irqmap[ index ] ); 852 return eth32i_irqmap[index];
851 } 853 }
852} 854}
853 855
@@ -907,7 +909,7 @@ static int eth16i_read_eeprom(int ioaddr, int offset)
907 data = eth16i_read_eeprom_word(ioaddr); 909 data = eth16i_read_eeprom_word(ioaddr);
908 outb(CS_0 | SK_0, ioaddr + EEPROM_CTRL_REG); 910 outb(CS_0 | SK_0, ioaddr + EEPROM_CTRL_REG);
909 911
910 return(data); 912 return data;
911} 913}
912 914
913static int eth16i_read_eeprom_word(int ioaddr) 915static int eth16i_read_eeprom_word(int ioaddr)
@@ -926,7 +928,7 @@ static int eth16i_read_eeprom_word(int ioaddr)
926 eeprom_slow_io(); 928 eeprom_slow_io();
927 } 929 }
928 930
929 return(data); 931 return data;
930} 932}
931 933
932static void eth16i_eeprom_cmd(int ioaddr, unsigned char command) 934static void eth16i_eeprom_cmd(int ioaddr, unsigned char command)
diff --git a/drivers/net/fec.c b/drivers/net/fec.c
index 768b840aeb6b..e83f67d22fe3 100644
--- a/drivers/net/fec.c
+++ b/drivers/net/fec.c
@@ -1311,6 +1311,9 @@ fec_probe(struct platform_device *pdev)
1311 if (ret) 1311 if (ret)
1312 goto failed_mii_init; 1312 goto failed_mii_init;
1313 1313
1314 /* Carrier starts down, phylib will bring it up */
1315 netif_carrier_off(ndev);
1316
1314 ret = register_netdev(ndev); 1317 ret = register_netdev(ndev);
1315 if (ret) 1318 if (ret)
1316 goto failed_register; 1319 goto failed_register;
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c
index 6a44fe411589..0fa1776563a3 100644
--- a/drivers/net/forcedeth.c
+++ b/drivers/net/forcedeth.c
@@ -2321,14 +2321,11 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb,
2321 NV_TX2_CHECKSUM_L3 | NV_TX2_CHECKSUM_L4 : 0; 2321 NV_TX2_CHECKSUM_L3 | NV_TX2_CHECKSUM_L4 : 0;
2322 2322
2323 /* vlan tag */ 2323 /* vlan tag */
2324 if (likely(!np->vlangrp)) { 2324 if (vlan_tx_tag_present(skb))
2325 start_tx->txvlan = cpu_to_le32(NV_TX3_VLAN_TAG_PRESENT |
2326 vlan_tx_tag_get(skb));
2327 else
2325 start_tx->txvlan = 0; 2328 start_tx->txvlan = 0;
2326 } else {
2327 if (vlan_tx_tag_present(skb))
2328 start_tx->txvlan = cpu_to_le32(NV_TX3_VLAN_TAG_PRESENT | vlan_tx_tag_get(skb));
2329 else
2330 start_tx->txvlan = 0;
2331 }
2332 2329
2333 spin_lock_irqsave(&np->lock, flags); 2330 spin_lock_irqsave(&np->lock, flags);
2334 2331
@@ -4620,7 +4617,7 @@ static int nv_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam*
4620static u32 nv_get_rx_csum(struct net_device *dev) 4617static u32 nv_get_rx_csum(struct net_device *dev)
4621{ 4618{
4622 struct fe_priv *np = netdev_priv(dev); 4619 struct fe_priv *np = netdev_priv(dev);
4623 return (np->rx_csum) != 0; 4620 return np->rx_csum != 0;
4624} 4621}
4625 4622
4626static int nv_set_rx_csum(struct net_device *dev, u32 data) 4623static int nv_set_rx_csum(struct net_device *dev, u32 data)
diff --git a/drivers/net/fsl_pq_mdio.c b/drivers/net/fsl_pq_mdio.c
index d4bf91aac25f..8d3a2ccbc953 100644
--- a/drivers/net/fsl_pq_mdio.c
+++ b/drivers/net/fsl_pq_mdio.c
@@ -125,7 +125,7 @@ int fsl_pq_mdio_write(struct mii_bus *bus, int mii_id, int regnum, u16 value)
125 struct fsl_pq_mdio __iomem *regs = fsl_pq_mdio_get_regs(bus); 125 struct fsl_pq_mdio __iomem *regs = fsl_pq_mdio_get_regs(bus);
126 126
127 /* Write to the local MII regs */ 127 /* Write to the local MII regs */
128 return(fsl_pq_local_mdio_write(regs, mii_id, regnum, value)); 128 return fsl_pq_local_mdio_write(regs, mii_id, regnum, value);
129} 129}
130 130
131/* 131/*
@@ -137,7 +137,7 @@ int fsl_pq_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
137 struct fsl_pq_mdio __iomem *regs = fsl_pq_mdio_get_regs(bus); 137 struct fsl_pq_mdio __iomem *regs = fsl_pq_mdio_get_regs(bus);
138 138
139 /* Read the local MII regs */ 139 /* Read the local MII regs */
140 return(fsl_pq_local_mdio_read(regs, mii_id, regnum)); 140 return fsl_pq_local_mdio_read(regs, mii_id, regnum);
141} 141}
142 142
143/* Reset the MIIM registers, and wait for the bus to free */ 143/* Reset the MIIM registers, and wait for the bus to free */
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c
index f30adbf86bb2..4c4cc80ec0a1 100644
--- a/drivers/net/gianfar.c
+++ b/drivers/net/gianfar.c
@@ -654,9 +654,8 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
654 priv->node = ofdev->dev.of_node; 654 priv->node = ofdev->dev.of_node;
655 priv->ndev = dev; 655 priv->ndev = dev;
656 656
657 dev->num_tx_queues = num_tx_qs;
658 dev->real_num_tx_queues = num_tx_qs;
659 priv->num_tx_queues = num_tx_qs; 657 priv->num_tx_queues = num_tx_qs;
658 netif_set_real_num_rx_queues(dev, num_rx_qs);
660 priv->num_rx_queues = num_rx_qs; 659 priv->num_rx_queues = num_rx_qs;
661 priv->num_grps = 0x0; 660 priv->num_grps = 0x0;
662 661
@@ -2076,7 +2075,7 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
2076 2075
2077 /* make space for additional header when fcb is needed */ 2076 /* make space for additional header when fcb is needed */
2078 if (((skb->ip_summed == CHECKSUM_PARTIAL) || 2077 if (((skb->ip_summed == CHECKSUM_PARTIAL) ||
2079 (priv->vlgrp && vlan_tx_tag_present(skb)) || 2078 vlan_tx_tag_present(skb) ||
2080 unlikely(do_tstamp)) && 2079 unlikely(do_tstamp)) &&
2081 (skb_headroom(skb) < GMAC_FCB_LEN)) { 2080 (skb_headroom(skb) < GMAC_FCB_LEN)) {
2082 struct sk_buff *skb_new; 2081 struct sk_buff *skb_new;
@@ -2162,7 +2161,7 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
2162 gfar_tx_checksum(skb, fcb); 2161 gfar_tx_checksum(skb, fcb);
2163 } 2162 }
2164 2163
2165 if (priv->vlgrp && vlan_tx_tag_present(skb)) { 2164 if (vlan_tx_tag_present(skb)) {
2166 if (unlikely(NULL == fcb)) { 2165 if (unlikely(NULL == fcb)) {
2167 fcb = gfar_add_fcb(skb); 2166 fcb = gfar_add_fcb(skb);
2168 lstatus |= BD_LFLAG(TXBD_TOE); 2167 lstatus |= BD_LFLAG(TXBD_TOE);
diff --git a/drivers/net/gianfar_ethtool.c b/drivers/net/gianfar_ethtool.c
index 9bda023c0235..ae8e5d3c6c1f 100644
--- a/drivers/net/gianfar_ethtool.c
+++ b/drivers/net/gianfar_ethtool.c
@@ -254,7 +254,7 @@ static unsigned int gfar_usecs2ticks(struct gfar_private *priv, unsigned int use
254 254
255 /* Make sure we return a number greater than 0 255 /* Make sure we return a number greater than 0
256 * if usecs > 0 */ 256 * if usecs > 0 */
257 return ((usecs * 1000 + count - 1) / count); 257 return (usecs * 1000 + count - 1) / count;
258} 258}
259 259
260/* Convert ethernet clock ticks to microseconds */ 260/* Convert ethernet clock ticks to microseconds */
@@ -278,7 +278,7 @@ static unsigned int gfar_ticks2usecs(struct gfar_private *priv, unsigned int tic
278 278
279 /* Make sure we return a number greater than 0 */ 279 /* Make sure we return a number greater than 0 */
280 /* if ticks is > 0 */ 280 /* if ticks is > 0 */
281 return ((ticks * count) / 1000); 281 return (ticks * count) / 1000;
282} 282}
283 283
284/* Get the coalescing parameters, and put them in the cvals 284/* Get the coalescing parameters, and put them in the cvals
diff --git a/drivers/net/hamradio/bpqether.c b/drivers/net/hamradio/bpqether.c
index 14f01d156db9..ac1d323c5eb5 100644
--- a/drivers/net/hamradio/bpqether.c
+++ b/drivers/net/hamradio/bpqether.c
@@ -168,7 +168,7 @@ static inline struct net_device *bpq_get_ax25_dev(struct net_device *dev)
168 168
169static inline int dev_is_ethdev(struct net_device *dev) 169static inline int dev_is_ethdev(struct net_device *dev)
170{ 170{
171 return (dev->type == ARPHRD_ETHER && strncmp(dev->name, "dummy", 5)); 171 return dev->type == ARPHRD_ETHER && strncmp(dev->name, "dummy", 5);
172} 172}
173 173
174/* ------------------------------------------------------------------------ */ 174/* ------------------------------------------------------------------------ */
diff --git a/drivers/net/hamradio/hdlcdrv.c b/drivers/net/hamradio/hdlcdrv.c
index b8bdf9d51cd4..5b37579e84b7 100644
--- a/drivers/net/hamradio/hdlcdrv.c
+++ b/drivers/net/hamradio/hdlcdrv.c
@@ -110,7 +110,7 @@ static int calc_crc_ccitt(const unsigned char *buf, int cnt)
110 for (; cnt > 0; cnt--) 110 for (; cnt > 0; cnt--)
111 crc = (crc >> 8) ^ crc_ccitt_table[(crc ^ *buf++) & 0xff]; 111 crc = (crc >> 8) ^ crc_ccitt_table[(crc ^ *buf++) & 0xff];
112 crc ^= 0xffff; 112 crc ^= 0xffff;
113 return (crc & 0xffff); 113 return crc & 0xffff;
114} 114}
115#endif 115#endif
116 116
diff --git a/drivers/net/hp100.c b/drivers/net/hp100.c
index 095b17ecf609..8e2c4601b5f5 100644
--- a/drivers/net/hp100.c
+++ b/drivers/net/hp100.c
@@ -1312,7 +1312,7 @@ static int hp100_build_rx_pdl(hp100_ring_t * ringptr,
1312 for (p = (ringptr->pdl); p < (ringptr->pdl + 5); p++) 1312 for (p = (ringptr->pdl); p < (ringptr->pdl + 5); p++)
1313 printk("hp100: %s: Adr 0x%.8x = 0x%.8x\n", dev->name, (u_int) p, (u_int) * p); 1313 printk("hp100: %s: Adr 0x%.8x = 0x%.8x\n", dev->name, (u_int) p, (u_int) * p);
1314#endif 1314#endif
1315 return (1); 1315 return 1;
1316 } 1316 }
1317 /* else: */ 1317 /* else: */
1318 /* alloc_skb failed (no memory) -> still can receive the header 1318 /* alloc_skb failed (no memory) -> still can receive the header
@@ -1325,7 +1325,7 @@ static int hp100_build_rx_pdl(hp100_ring_t * ringptr,
1325 1325
1326 ringptr->pdl[0] = 0x00010000; /* PDH: Count=1 Fragment */ 1326 ringptr->pdl[0] = 0x00010000; /* PDH: Count=1 Fragment */
1327 1327
1328 return (0); 1328 return 0;
1329} 1329}
1330 1330
1331/* 1331/*
@@ -2752,7 +2752,7 @@ static int hp100_login_to_vg_hub(struct net_device *dev, u_short force_relogin)
2752 hp100_outw(HP100_MISC_ERROR, IRQ_STATUS); 2752 hp100_outw(HP100_MISC_ERROR, IRQ_STATUS);
2753 2753
2754 if (val & HP100_LINK_UP_ST) 2754 if (val & HP100_LINK_UP_ST)
2755 return (0); /* login was ok */ 2755 return 0; /* login was ok */
2756 else { 2756 else {
2757 printk("hp100: %s: Training failed.\n", dev->name); 2757 printk("hp100: %s: Training failed.\n", dev->name);
2758 hp100_down_vg_link(dev); 2758 hp100_down_vg_link(dev);
diff --git a/drivers/net/ibm_newemac/core.c b/drivers/net/ibm_newemac/core.c
index 3506fd6ad726..385dc3204cb7 100644
--- a/drivers/net/ibm_newemac/core.c
+++ b/drivers/net/ibm_newemac/core.c
@@ -2095,11 +2095,11 @@ static void *emac_dump_regs(struct emac_instance *dev, void *buf)
2095 if (emac_has_feature(dev, EMAC_FTR_EMAC4)) { 2095 if (emac_has_feature(dev, EMAC_FTR_EMAC4)) {
2096 hdr->version = EMAC4_ETHTOOL_REGS_VER; 2096 hdr->version = EMAC4_ETHTOOL_REGS_VER;
2097 memcpy_fromio(hdr + 1, dev->emacp, EMAC4_ETHTOOL_REGS_SIZE(dev)); 2097 memcpy_fromio(hdr + 1, dev->emacp, EMAC4_ETHTOOL_REGS_SIZE(dev));
2098 return ((void *)(hdr + 1) + EMAC4_ETHTOOL_REGS_SIZE(dev)); 2098 return (void *)(hdr + 1) + EMAC4_ETHTOOL_REGS_SIZE(dev);
2099 } else { 2099 } else {
2100 hdr->version = EMAC_ETHTOOL_REGS_VER; 2100 hdr->version = EMAC_ETHTOOL_REGS_VER;
2101 memcpy_fromio(hdr + 1, dev->emacp, EMAC_ETHTOOL_REGS_SIZE(dev)); 2101 memcpy_fromio(hdr + 1, dev->emacp, EMAC_ETHTOOL_REGS_SIZE(dev));
2102 return ((void *)(hdr + 1) + EMAC_ETHTOOL_REGS_SIZE(dev)); 2102 return (void *)(hdr + 1) + EMAC_ETHTOOL_REGS_SIZE(dev);
2103 } 2103 }
2104} 2104}
2105 2105
@@ -2293,7 +2293,7 @@ static int __devinit emac_check_deps(struct emac_instance *dev,
2293 if (deps[i].drvdata != NULL) 2293 if (deps[i].drvdata != NULL)
2294 there++; 2294 there++;
2295 } 2295 }
2296 return (there == EMAC_DEP_COUNT); 2296 return there == EMAC_DEP_COUNT;
2297} 2297}
2298 2298
2299static void emac_put_deps(struct emac_instance *dev) 2299static void emac_put_deps(struct emac_instance *dev)
@@ -2928,7 +2928,7 @@ static int __devinit emac_probe(struct platform_device *ofdev,
2928 if (dev->emac_irq != NO_IRQ) 2928 if (dev->emac_irq != NO_IRQ)
2929 irq_dispose_mapping(dev->emac_irq); 2929 irq_dispose_mapping(dev->emac_irq);
2930 err_free: 2930 err_free:
2931 kfree(ndev); 2931 free_netdev(ndev);
2932 err_gone: 2932 err_gone:
2933 /* if we were on the bootlist, remove us as we won't show up and 2933 /* if we were on the bootlist, remove us as we won't show up and
2934 * wake up all waiters to notify them in case they were waiting 2934 * wake up all waiters to notify them in case they were waiting
@@ -2971,7 +2971,7 @@ static int __devexit emac_remove(struct platform_device *ofdev)
2971 if (dev->emac_irq != NO_IRQ) 2971 if (dev->emac_irq != NO_IRQ)
2972 irq_dispose_mapping(dev->emac_irq); 2972 irq_dispose_mapping(dev->emac_irq);
2973 2973
2974 kfree(dev->ndev); 2974 free_netdev(dev->ndev);
2975 2975
2976 return 0; 2976 return 0;
2977} 2977}
diff --git a/drivers/net/ibm_newemac/core.h b/drivers/net/ibm_newemac/core.h
index 9e37e3d9c51d..4fec0844d59d 100644
--- a/drivers/net/ibm_newemac/core.h
+++ b/drivers/net/ibm_newemac/core.h
@@ -410,7 +410,7 @@ static inline u32 *emac_xaht_base(struct emac_instance *dev)
410 else 410 else
411 offset = offsetof(struct emac_regs, u0.emac4.iaht1); 411 offset = offsetof(struct emac_regs, u0.emac4.iaht1);
412 412
413 return ((u32 *)((ptrdiff_t)p + offset)); 413 return (u32 *)((ptrdiff_t)p + offset);
414} 414}
415 415
416static inline u32 *emac_gaht_base(struct emac_instance *dev) 416static inline u32 *emac_gaht_base(struct emac_instance *dev)
@@ -418,7 +418,7 @@ static inline u32 *emac_gaht_base(struct emac_instance *dev)
418 /* GAHT registers always come after an identical number of 418 /* GAHT registers always come after an identical number of
419 * IAHT registers. 419 * IAHT registers.
420 */ 420 */
421 return (emac_xaht_base(dev) + EMAC_XAHT_REGS(dev)); 421 return emac_xaht_base(dev) + EMAC_XAHT_REGS(dev);
422} 422}
423 423
424static inline u32 *emac_iaht_base(struct emac_instance *dev) 424static inline u32 *emac_iaht_base(struct emac_instance *dev)
@@ -426,7 +426,7 @@ static inline u32 *emac_iaht_base(struct emac_instance *dev)
426 /* IAHT registers always come before an identical number of 426 /* IAHT registers always come before an identical number of
427 * GAHT registers. 427 * GAHT registers.
428 */ 428 */
429 return (emac_xaht_base(dev)); 429 return emac_xaht_base(dev);
430} 430}
431 431
432/* Ethtool get_regs complex data. 432/* Ethtool get_regs complex data.
diff --git a/drivers/net/ibmveth.c b/drivers/net/ibmveth.c
index b3e157ed6776..c454b45ca7ec 100644
--- a/drivers/net/ibmveth.c
+++ b/drivers/net/ibmveth.c
@@ -546,9 +546,8 @@ static int ibmveth_open(struct net_device *netdev)
546 if (!adapter->buffer_list_addr || !adapter->filter_list_addr) { 546 if (!adapter->buffer_list_addr || !adapter->filter_list_addr) {
547 netdev_err(netdev, "unable to allocate filter or buffer list " 547 netdev_err(netdev, "unable to allocate filter or buffer list "
548 "pages\n"); 548 "pages\n");
549 ibmveth_cleanup(adapter); 549 rc = -ENOMEM;
550 napi_disable(&adapter->napi); 550 goto err_out;
551 return -ENOMEM;
552 } 551 }
553 552
554 adapter->rx_queue.queue_len = sizeof(struct ibmveth_rx_q_entry) * 553 adapter->rx_queue.queue_len = sizeof(struct ibmveth_rx_q_entry) *
@@ -558,9 +557,8 @@ static int ibmveth_open(struct net_device *netdev)
558 557
559 if (!adapter->rx_queue.queue_addr) { 558 if (!adapter->rx_queue.queue_addr) {
560 netdev_err(netdev, "unable to allocate rx queue pages\n"); 559 netdev_err(netdev, "unable to allocate rx queue pages\n");
561 ibmveth_cleanup(adapter); 560 rc = -ENOMEM;
562 napi_disable(&adapter->napi); 561 goto err_out;
563 return -ENOMEM;
564 } 562 }
565 563
566 dev = &adapter->vdev->dev; 564 dev = &adapter->vdev->dev;
@@ -578,9 +576,8 @@ static int ibmveth_open(struct net_device *netdev)
578 (dma_mapping_error(dev, adapter->rx_queue.queue_dma))) { 576 (dma_mapping_error(dev, adapter->rx_queue.queue_dma))) {
579 netdev_err(netdev, "unable to map filter or buffer list " 577 netdev_err(netdev, "unable to map filter or buffer list "
580 "pages\n"); 578 "pages\n");
581 ibmveth_cleanup(adapter); 579 rc = -ENOMEM;
582 napi_disable(&adapter->napi); 580 goto err_out;
583 return -ENOMEM;
584 } 581 }
585 582
586 adapter->rx_queue.index = 0; 583 adapter->rx_queue.index = 0;
@@ -611,9 +608,8 @@ static int ibmveth_open(struct net_device *netdev)
611 adapter->filter_list_dma, 608 adapter->filter_list_dma,
612 rxq_desc.desc, 609 rxq_desc.desc,
613 mac_address); 610 mac_address);
614 ibmveth_cleanup(adapter); 611 rc = -ENONET;
615 napi_disable(&adapter->napi); 612 goto err_out;
616 return -ENONET;
617 } 613 }
618 614
619 for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) { 615 for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) {
@@ -622,9 +618,8 @@ static int ibmveth_open(struct net_device *netdev)
622 if (ibmveth_alloc_buffer_pool(&adapter->rx_buff_pool[i])) { 618 if (ibmveth_alloc_buffer_pool(&adapter->rx_buff_pool[i])) {
623 netdev_err(netdev, "unable to alloc pool\n"); 619 netdev_err(netdev, "unable to alloc pool\n");
624 adapter->rx_buff_pool[i].active = 0; 620 adapter->rx_buff_pool[i].active = 0;
625 ibmveth_cleanup(adapter); 621 rc = -ENOMEM;
626 napi_disable(&adapter->napi); 622 goto err_out;
627 return -ENOMEM ;
628 } 623 }
629 } 624 }
630 625
@@ -638,27 +633,23 @@ static int ibmveth_open(struct net_device *netdev)
638 rc = h_free_logical_lan(adapter->vdev->unit_address); 633 rc = h_free_logical_lan(adapter->vdev->unit_address);
639 } while (H_IS_LONG_BUSY(rc) || (rc == H_BUSY)); 634 } while (H_IS_LONG_BUSY(rc) || (rc == H_BUSY));
640 635
641 ibmveth_cleanup(adapter); 636 goto err_out;
642 napi_disable(&adapter->napi);
643 return rc;
644 } 637 }
645 638
646 adapter->bounce_buffer = 639 adapter->bounce_buffer =
647 kmalloc(netdev->mtu + IBMVETH_BUFF_OH, GFP_KERNEL); 640 kmalloc(netdev->mtu + IBMVETH_BUFF_OH, GFP_KERNEL);
648 if (!adapter->bounce_buffer) { 641 if (!adapter->bounce_buffer) {
649 netdev_err(netdev, "unable to allocate bounce buffer\n"); 642 netdev_err(netdev, "unable to allocate bounce buffer\n");
650 ibmveth_cleanup(adapter); 643 rc = -ENOMEM;
651 napi_disable(&adapter->napi); 644 goto err_out_free_irq;
652 return -ENOMEM;
653 } 645 }
654 adapter->bounce_buffer_dma = 646 adapter->bounce_buffer_dma =
655 dma_map_single(&adapter->vdev->dev, adapter->bounce_buffer, 647 dma_map_single(&adapter->vdev->dev, adapter->bounce_buffer,
656 netdev->mtu + IBMVETH_BUFF_OH, DMA_BIDIRECTIONAL); 648 netdev->mtu + IBMVETH_BUFF_OH, DMA_BIDIRECTIONAL);
657 if (dma_mapping_error(dev, adapter->bounce_buffer_dma)) { 649 if (dma_mapping_error(dev, adapter->bounce_buffer_dma)) {
658 netdev_err(netdev, "unable to map bounce buffer\n"); 650 netdev_err(netdev, "unable to map bounce buffer\n");
659 ibmveth_cleanup(adapter); 651 rc = -ENOMEM;
660 napi_disable(&adapter->napi); 652 goto err_out_free_irq;
661 return -ENOMEM;
662 } 653 }
663 654
664 netdev_dbg(netdev, "initial replenish cycle\n"); 655 netdev_dbg(netdev, "initial replenish cycle\n");
@@ -669,6 +660,13 @@ static int ibmveth_open(struct net_device *netdev)
669 netdev_dbg(netdev, "open complete\n"); 660 netdev_dbg(netdev, "open complete\n");
670 661
671 return 0; 662 return 0;
663
664err_out_free_irq:
665 free_irq(netdev->irq, netdev);
666err_out:
667 ibmveth_cleanup(adapter);
668 napi_disable(&adapter->napi);
669 return rc;
672} 670}
673 671
674static int ibmveth_close(struct net_device *netdev) 672static int ibmveth_close(struct net_device *netdev)
diff --git a/drivers/net/igb/e1000_82575.c b/drivers/net/igb/e1000_82575.c
index 187622f1c816..bc183f5487cb 100644
--- a/drivers/net/igb/e1000_82575.c
+++ b/drivers/net/igb/e1000_82575.c
@@ -132,6 +132,8 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw)
132 case E1000_DEV_ID_82580_SERDES: 132 case E1000_DEV_ID_82580_SERDES:
133 case E1000_DEV_ID_82580_SGMII: 133 case E1000_DEV_ID_82580_SGMII:
134 case E1000_DEV_ID_82580_COPPER_DUAL: 134 case E1000_DEV_ID_82580_COPPER_DUAL:
135 case E1000_DEV_ID_DH89XXCC_SGMII:
136 case E1000_DEV_ID_DH89XXCC_SERDES:
135 mac->type = e1000_82580; 137 mac->type = e1000_82580;
136 break; 138 break;
137 case E1000_DEV_ID_I350_COPPER: 139 case E1000_DEV_ID_I350_COPPER:
@@ -282,10 +284,18 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw)
282 284
283 /* Verify phy id and set remaining function pointers */ 285 /* Verify phy id and set remaining function pointers */
284 switch (phy->id) { 286 switch (phy->id) {
287 case I347AT4_E_PHY_ID:
288 case M88E1112_E_PHY_ID:
285 case M88E1111_I_PHY_ID: 289 case M88E1111_I_PHY_ID:
286 phy->type = e1000_phy_m88; 290 phy->type = e1000_phy_m88;
287 phy->ops.get_phy_info = igb_get_phy_info_m88; 291 phy->ops.get_phy_info = igb_get_phy_info_m88;
288 phy->ops.get_cable_length = igb_get_cable_length_m88; 292
293 if (phy->id == I347AT4_E_PHY_ID ||
294 phy->id == M88E1112_E_PHY_ID)
295 phy->ops.get_cable_length = igb_get_cable_length_m88_gen2;
296 else
297 phy->ops.get_cable_length = igb_get_cable_length_m88;
298
289 phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_m88; 299 phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_m88;
290 break; 300 break;
291 case IGP03E1000_E_PHY_ID: 301 case IGP03E1000_E_PHY_ID:
@@ -1058,7 +1068,11 @@ static s32 igb_setup_copper_link_82575(struct e1000_hw *hw)
1058 } 1068 }
1059 switch (hw->phy.type) { 1069 switch (hw->phy.type) {
1060 case e1000_phy_m88: 1070 case e1000_phy_m88:
1061 ret_val = igb_copper_link_setup_m88(hw); 1071 if (hw->phy.id == I347AT4_E_PHY_ID ||
1072 hw->phy.id == M88E1112_E_PHY_ID)
1073 ret_val = igb_copper_link_setup_m88_gen2(hw);
1074 else
1075 ret_val = igb_copper_link_setup_m88(hw);
1062 break; 1076 break;
1063 case e1000_phy_igp_3: 1077 case e1000_phy_igp_3:
1064 ret_val = igb_copper_link_setup_igp(hw); 1078 ret_val = igb_copper_link_setup_igp(hw);
diff --git a/drivers/net/igb/e1000_defines.h b/drivers/net/igb/e1000_defines.h
index bbd2ec308eb0..62222796a8b3 100644
--- a/drivers/net/igb/e1000_defines.h
+++ b/drivers/net/igb/e1000_defines.h
@@ -634,6 +634,8 @@
634 * E = External 634 * E = External
635 */ 635 */
636#define M88E1111_I_PHY_ID 0x01410CC0 636#define M88E1111_I_PHY_ID 0x01410CC0
637#define M88E1112_E_PHY_ID 0x01410C90
638#define I347AT4_E_PHY_ID 0x01410DC0
637#define IGP03E1000_E_PHY_ID 0x02A80390 639#define IGP03E1000_E_PHY_ID 0x02A80390
638#define I82580_I_PHY_ID 0x015403A0 640#define I82580_I_PHY_ID 0x015403A0
639#define I350_I_PHY_ID 0x015403B0 641#define I350_I_PHY_ID 0x015403B0
@@ -702,6 +704,35 @@
702#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_1X 0x0100 704#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_1X 0x0100
703#define M88E1000_EPSCR_TX_CLK_25 0x0070 /* 25 MHz TX_CLK */ 705#define M88E1000_EPSCR_TX_CLK_25 0x0070 /* 25 MHz TX_CLK */
704 706
707/* Intel i347-AT4 Registers */
708
709#define I347AT4_PCDL 0x10 /* PHY Cable Diagnostics Length */
710#define I347AT4_PCDC 0x15 /* PHY Cable Diagnostics Control */
711#define I347AT4_PAGE_SELECT 0x16
712
713/* i347-AT4 Extended PHY Specific Control Register */
714
715/*
716 * Number of times we will attempt to autonegotiate before downshifting if we
717 * are the master
718 */
719#define I347AT4_PSCR_DOWNSHIFT_ENABLE 0x0800
720#define I347AT4_PSCR_DOWNSHIFT_MASK 0x7000
721#define I347AT4_PSCR_DOWNSHIFT_1X 0x0000
722#define I347AT4_PSCR_DOWNSHIFT_2X 0x1000
723#define I347AT4_PSCR_DOWNSHIFT_3X 0x2000
724#define I347AT4_PSCR_DOWNSHIFT_4X 0x3000
725#define I347AT4_PSCR_DOWNSHIFT_5X 0x4000
726#define I347AT4_PSCR_DOWNSHIFT_6X 0x5000
727#define I347AT4_PSCR_DOWNSHIFT_7X 0x6000
728#define I347AT4_PSCR_DOWNSHIFT_8X 0x7000
729
730/* i347-AT4 PHY Cable Diagnostics Control */
731#define I347AT4_PCDC_CABLE_LENGTH_UNIT 0x0400 /* 0=cm 1=meters */
732
733/* Marvell 1112 only registers */
734#define M88E1112_VCT_DSP_DISTANCE 0x001A
735
705/* M88EC018 Rev 2 specific DownShift settings */ 736/* M88EC018 Rev 2 specific DownShift settings */
706#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK 0x0E00 737#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK 0x0E00
707#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X 0x0800 738#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X 0x0800
diff --git a/drivers/net/igb/e1000_hw.h b/drivers/net/igb/e1000_hw.h
index cb8db78b1a05..c0b017f8d782 100644
--- a/drivers/net/igb/e1000_hw.h
+++ b/drivers/net/igb/e1000_hw.h
@@ -54,6 +54,8 @@ struct e1000_hw;
54#define E1000_DEV_ID_82580_SERDES 0x1510 54#define E1000_DEV_ID_82580_SERDES 0x1510
55#define E1000_DEV_ID_82580_SGMII 0x1511 55#define E1000_DEV_ID_82580_SGMII 0x1511
56#define E1000_DEV_ID_82580_COPPER_DUAL 0x1516 56#define E1000_DEV_ID_82580_COPPER_DUAL 0x1516
57#define E1000_DEV_ID_DH89XXCC_SGMII 0x0436
58#define E1000_DEV_ID_DH89XXCC_SERDES 0x0438
57#define E1000_DEV_ID_I350_COPPER 0x1521 59#define E1000_DEV_ID_I350_COPPER 0x1521
58#define E1000_DEV_ID_I350_FIBER 0x1522 60#define E1000_DEV_ID_I350_FIBER 0x1522
59#define E1000_DEV_ID_I350_SERDES 0x1523 61#define E1000_DEV_ID_I350_SERDES 0x1523
diff --git a/drivers/net/igb/e1000_phy.c b/drivers/net/igb/e1000_phy.c
index cf1f32300923..ddd036a78999 100644
--- a/drivers/net/igb/e1000_phy.c
+++ b/drivers/net/igb/e1000_phy.c
@@ -570,6 +570,89 @@ out:
570} 570}
571 571
572/** 572/**
573 * igb_copper_link_setup_m88_gen2 - Setup m88 PHY's for copper link
574 * @hw: pointer to the HW structure
575 *
576 * Sets up MDI/MDI-X and polarity for i347-AT4, m88e1322 and m88e1112 PHY's.
577 * Also enables and sets the downshift parameters.
578 **/
579s32 igb_copper_link_setup_m88_gen2(struct e1000_hw *hw)
580{
581 struct e1000_phy_info *phy = &hw->phy;
582 s32 ret_val;
583 u16 phy_data;
584
585 if (phy->reset_disable) {
586 ret_val = 0;
587 goto out;
588 }
589
590 /* Enable CRS on Tx. This must be set for half-duplex operation. */
591 ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
592 if (ret_val)
593 goto out;
594
595 /*
596 * Options:
597 * MDI/MDI-X = 0 (default)
598 * 0 - Auto for all speeds
599 * 1 - MDI mode
600 * 2 - MDI-X mode
601 * 3 - Auto for 1000Base-T only (MDI-X for 10/100Base-T modes)
602 */
603 phy_data &= ~M88E1000_PSCR_AUTO_X_MODE;
604
605 switch (phy->mdix) {
606 case 1:
607 phy_data |= M88E1000_PSCR_MDI_MANUAL_MODE;
608 break;
609 case 2:
610 phy_data |= M88E1000_PSCR_MDIX_MANUAL_MODE;
611 break;
612 case 3:
613 /* M88E1112 does not support this mode) */
614 if (phy->id != M88E1112_E_PHY_ID) {
615 phy_data |= M88E1000_PSCR_AUTO_X_1000T;
616 break;
617 }
618 case 0:
619 default:
620 phy_data |= M88E1000_PSCR_AUTO_X_MODE;
621 break;
622 }
623
624 /*
625 * Options:
626 * disable_polarity_correction = 0 (default)
627 * Automatic Correction for Reversed Cable Polarity
628 * 0 - Disabled
629 * 1 - Enabled
630 */
631 phy_data &= ~M88E1000_PSCR_POLARITY_REVERSAL;
632 if (phy->disable_polarity_correction == 1)
633 phy_data |= M88E1000_PSCR_POLARITY_REVERSAL;
634
635 /* Enable downshift and setting it to X6 */
636 phy_data &= ~I347AT4_PSCR_DOWNSHIFT_MASK;
637 phy_data |= I347AT4_PSCR_DOWNSHIFT_6X;
638 phy_data |= I347AT4_PSCR_DOWNSHIFT_ENABLE;
639
640 ret_val = phy->ops.write_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data);
641 if (ret_val)
642 goto out;
643
644 /* Commit the changes. */
645 ret_val = igb_phy_sw_reset(hw);
646 if (ret_val) {
647 hw_dbg("Error committing the PHY changes\n");
648 goto out;
649 }
650
651out:
652 return ret_val;
653}
654
655/**
573 * igb_copper_link_setup_igp - Setup igp PHY's for copper link 656 * igb_copper_link_setup_igp - Setup igp PHY's for copper link
574 * @hw: pointer to the HW structure 657 * @hw: pointer to the HW structure
575 * 658 *
@@ -1124,18 +1207,25 @@ s32 igb_phy_force_speed_duplex_m88(struct e1000_hw *hw)
1124 goto out; 1207 goto out;
1125 1208
1126 if (!link) { 1209 if (!link) {
1127 /* 1210 if (hw->phy.type != e1000_phy_m88 ||
1128 * We didn't get link. 1211 hw->phy.id == I347AT4_E_PHY_ID ||
1129 * Reset the DSP and cross our fingers. 1212 hw->phy.id == M88E1112_E_PHY_ID) {
1130 */ 1213 hw_dbg("Link taking longer than expected.\n");
1131 ret_val = phy->ops.write_reg(hw, 1214 } else {
1132 M88E1000_PHY_PAGE_SELECT, 1215
1133 0x001d); 1216 /*
1134 if (ret_val) 1217 * We didn't get link.
1135 goto out; 1218 * Reset the DSP and cross our fingers.
1136 ret_val = igb_phy_reset_dsp(hw); 1219 */
1137 if (ret_val) 1220 ret_val = phy->ops.write_reg(hw,
1138 goto out; 1221 M88E1000_PHY_PAGE_SELECT,
1222 0x001d);
1223 if (ret_val)
1224 goto out;
1225 ret_val = igb_phy_reset_dsp(hw);
1226 if (ret_val)
1227 goto out;
1228 }
1139 } 1229 }
1140 1230
1141 /* Try once more */ 1231 /* Try once more */
@@ -1145,6 +1235,11 @@ s32 igb_phy_force_speed_duplex_m88(struct e1000_hw *hw)
1145 goto out; 1235 goto out;
1146 } 1236 }
1147 1237
1238 if (hw->phy.type != e1000_phy_m88 ||
1239 hw->phy.id == I347AT4_E_PHY_ID ||
1240 hw->phy.id == M88E1112_E_PHY_ID)
1241 goto out;
1242
1148 ret_val = phy->ops.read_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_data); 1243 ret_val = phy->ops.read_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_data);
1149 if (ret_val) 1244 if (ret_val)
1150 goto out; 1245 goto out;
@@ -1557,6 +1652,93 @@ out:
1557 return ret_val; 1652 return ret_val;
1558} 1653}
1559 1654
1655s32 igb_get_cable_length_m88_gen2(struct e1000_hw *hw)
1656{
1657 struct e1000_phy_info *phy = &hw->phy;
1658 s32 ret_val;
1659 u16 phy_data, phy_data2, index, default_page, is_cm;
1660
1661 switch (hw->phy.id) {
1662 case I347AT4_E_PHY_ID:
1663 /* Remember the original page select and set it to 7 */
1664 ret_val = phy->ops.read_reg(hw, I347AT4_PAGE_SELECT,
1665 &default_page);
1666 if (ret_val)
1667 goto out;
1668
1669 ret_val = phy->ops.write_reg(hw, I347AT4_PAGE_SELECT, 0x07);
1670 if (ret_val)
1671 goto out;
1672
1673 /* Get cable length from PHY Cable Diagnostics Control Reg */
1674 ret_val = phy->ops.read_reg(hw, (I347AT4_PCDL + phy->addr),
1675 &phy_data);
1676 if (ret_val)
1677 goto out;
1678
1679 /* Check if the unit of cable length is meters or cm */
1680 ret_val = phy->ops.read_reg(hw, I347AT4_PCDC, &phy_data2);
1681 if (ret_val)
1682 goto out;
1683
1684 is_cm = !(phy_data & I347AT4_PCDC_CABLE_LENGTH_UNIT);
1685
1686 /* Populate the phy structure with cable length in meters */
1687 phy->min_cable_length = phy_data / (is_cm ? 100 : 1);
1688 phy->max_cable_length = phy_data / (is_cm ? 100 : 1);
1689 phy->cable_length = phy_data / (is_cm ? 100 : 1);
1690
1691 /* Reset the page selec to its original value */
1692 ret_val = phy->ops.write_reg(hw, I347AT4_PAGE_SELECT,
1693 default_page);
1694 if (ret_val)
1695 goto out;
1696 break;
1697 case M88E1112_E_PHY_ID:
1698 /* Remember the original page select and set it to 5 */
1699 ret_val = phy->ops.read_reg(hw, I347AT4_PAGE_SELECT,
1700 &default_page);
1701 if (ret_val)
1702 goto out;
1703
1704 ret_val = phy->ops.write_reg(hw, I347AT4_PAGE_SELECT, 0x05);
1705 if (ret_val)
1706 goto out;
1707
1708 ret_val = phy->ops.read_reg(hw, M88E1112_VCT_DSP_DISTANCE,
1709 &phy_data);
1710 if (ret_val)
1711 goto out;
1712
1713 index = (phy_data & M88E1000_PSSR_CABLE_LENGTH) >>
1714 M88E1000_PSSR_CABLE_LENGTH_SHIFT;
1715 if (index >= M88E1000_CABLE_LENGTH_TABLE_SIZE - 1) {
1716 ret_val = -E1000_ERR_PHY;
1717 goto out;
1718 }
1719
1720 phy->min_cable_length = e1000_m88_cable_length_table[index];
1721 phy->max_cable_length = e1000_m88_cable_length_table[index + 1];
1722
1723 phy->cable_length = (phy->min_cable_length +
1724 phy->max_cable_length) / 2;
1725
1726 /* Reset the page select to its original value */
1727 ret_val = phy->ops.write_reg(hw, I347AT4_PAGE_SELECT,
1728 default_page);
1729 if (ret_val)
1730 goto out;
1731
1732 break;
1733 default:
1734 ret_val = -E1000_ERR_PHY;
1735 goto out;
1736 }
1737
1738out:
1739 return ret_val;
1740}
1741
1560/** 1742/**
1561 * igb_get_cable_length_igp_2 - Determine cable length for igp2 PHY 1743 * igb_get_cable_length_igp_2 - Determine cable length for igp2 PHY
1562 * @hw: pointer to the HW structure 1744 * @hw: pointer to the HW structure
diff --git a/drivers/net/igb/e1000_phy.h b/drivers/net/igb/e1000_phy.h
index 565a6dbb3714..2cc117705a31 100644
--- a/drivers/net/igb/e1000_phy.h
+++ b/drivers/net/igb/e1000_phy.h
@@ -45,9 +45,11 @@ s32 igb_check_downshift(struct e1000_hw *hw);
45s32 igb_check_reset_block(struct e1000_hw *hw); 45s32 igb_check_reset_block(struct e1000_hw *hw);
46s32 igb_copper_link_setup_igp(struct e1000_hw *hw); 46s32 igb_copper_link_setup_igp(struct e1000_hw *hw);
47s32 igb_copper_link_setup_m88(struct e1000_hw *hw); 47s32 igb_copper_link_setup_m88(struct e1000_hw *hw);
48s32 igb_copper_link_setup_m88_gen2(struct e1000_hw *hw);
48s32 igb_phy_force_speed_duplex_igp(struct e1000_hw *hw); 49s32 igb_phy_force_speed_duplex_igp(struct e1000_hw *hw);
49s32 igb_phy_force_speed_duplex_m88(struct e1000_hw *hw); 50s32 igb_phy_force_speed_duplex_m88(struct e1000_hw *hw);
50s32 igb_get_cable_length_m88(struct e1000_hw *hw); 51s32 igb_get_cable_length_m88(struct e1000_hw *hw);
52s32 igb_get_cable_length_m88_gen2(struct e1000_hw *hw);
51s32 igb_get_cable_length_igp_2(struct e1000_hw *hw); 53s32 igb_get_cable_length_igp_2(struct e1000_hw *hw);
52s32 igb_get_phy_id(struct e1000_hw *hw); 54s32 igb_get_phy_id(struct e1000_hw *hw);
53s32 igb_get_phy_info_igp(struct e1000_hw *hw); 55s32 igb_get_phy_info_igp(struct e1000_hw *hw);
diff --git a/drivers/net/igb/igb.h b/drivers/net/igb/igb.h
index 44e0ff1494e0..edab9c442399 100644
--- a/drivers/net/igb/igb.h
+++ b/drivers/net/igb/igb.h
@@ -159,6 +159,7 @@ struct igb_tx_queue_stats {
159 u64 packets; 159 u64 packets;
160 u64 bytes; 160 u64 bytes;
161 u64 restart_queue; 161 u64 restart_queue;
162 u64 restart_queue2;
162}; 163};
163 164
164struct igb_rx_queue_stats { 165struct igb_rx_queue_stats {
@@ -210,11 +211,14 @@ struct igb_ring {
210 /* TX */ 211 /* TX */
211 struct { 212 struct {
212 struct igb_tx_queue_stats tx_stats; 213 struct igb_tx_queue_stats tx_stats;
214 struct u64_stats_sync tx_syncp;
215 struct u64_stats_sync tx_syncp2;
213 bool detect_tx_hung; 216 bool detect_tx_hung;
214 }; 217 };
215 /* RX */ 218 /* RX */
216 struct { 219 struct {
217 struct igb_rx_queue_stats rx_stats; 220 struct igb_rx_queue_stats rx_stats;
221 struct u64_stats_sync rx_syncp;
218 u32 rx_buffer_len; 222 u32 rx_buffer_len;
219 }; 223 };
220 }; 224 };
@@ -288,6 +292,9 @@ struct igb_adapter {
288 struct timecompare compare; 292 struct timecompare compare;
289 struct hwtstamp_config hwtstamp_config; 293 struct hwtstamp_config hwtstamp_config;
290 294
295 spinlock_t stats64_lock;
296 struct rtnl_link_stats64 stats64;
297
291 /* structs defined in e1000_hw.h */ 298 /* structs defined in e1000_hw.h */
292 struct e1000_hw hw; 299 struct e1000_hw hw;
293 struct e1000_hw_stats stats; 300 struct e1000_hw_stats stats;
@@ -357,7 +364,7 @@ extern netdev_tx_t igb_xmit_frame_ring_adv(struct sk_buff *, struct igb_ring *);
357extern void igb_unmap_and_free_tx_resource(struct igb_ring *, 364extern void igb_unmap_and_free_tx_resource(struct igb_ring *,
358 struct igb_buffer *); 365 struct igb_buffer *);
359extern void igb_alloc_rx_buffers_adv(struct igb_ring *, int); 366extern void igb_alloc_rx_buffers_adv(struct igb_ring *, int);
360extern void igb_update_stats(struct igb_adapter *); 367extern void igb_update_stats(struct igb_adapter *, struct rtnl_link_stats64 *);
361extern bool igb_has_link(struct igb_adapter *adapter); 368extern bool igb_has_link(struct igb_adapter *adapter);
362extern void igb_set_ethtool_ops(struct net_device *); 369extern void igb_set_ethtool_ops(struct net_device *);
363extern void igb_power_up_link(struct igb_adapter *); 370extern void igb_power_up_link(struct igb_adapter *);
diff --git a/drivers/net/igb/igb_ethtool.c b/drivers/net/igb/igb_ethtool.c
index 26bf6a13d1c1..a70e16bcfa7e 100644
--- a/drivers/net/igb/igb_ethtool.c
+++ b/drivers/net/igb/igb_ethtool.c
@@ -90,8 +90,8 @@ static const struct igb_stats igb_gstrings_stats[] = {
90 90
91#define IGB_NETDEV_STAT(_net_stat) { \ 91#define IGB_NETDEV_STAT(_net_stat) { \
92 .stat_string = __stringify(_net_stat), \ 92 .stat_string = __stringify(_net_stat), \
93 .sizeof_stat = FIELD_SIZEOF(struct net_device_stats, _net_stat), \ 93 .sizeof_stat = FIELD_SIZEOF(struct rtnl_link_stats64, _net_stat), \
94 .stat_offset = offsetof(struct net_device_stats, _net_stat) \ 94 .stat_offset = offsetof(struct rtnl_link_stats64, _net_stat) \
95} 95}
96static const struct igb_stats igb_gstrings_net_stats[] = { 96static const struct igb_stats igb_gstrings_net_stats[] = {
97 IGB_NETDEV_STAT(rx_errors), 97 IGB_NETDEV_STAT(rx_errors),
@@ -111,8 +111,9 @@ static const struct igb_stats igb_gstrings_net_stats[] = {
111 (sizeof(igb_gstrings_net_stats) / sizeof(struct igb_stats)) 111 (sizeof(igb_gstrings_net_stats) / sizeof(struct igb_stats))
112#define IGB_RX_QUEUE_STATS_LEN \ 112#define IGB_RX_QUEUE_STATS_LEN \
113 (sizeof(struct igb_rx_queue_stats) / sizeof(u64)) 113 (sizeof(struct igb_rx_queue_stats) / sizeof(u64))
114#define IGB_TX_QUEUE_STATS_LEN \ 114
115 (sizeof(struct igb_tx_queue_stats) / sizeof(u64)) 115#define IGB_TX_QUEUE_STATS_LEN 3 /* packets, bytes, restart_queue */
116
116#define IGB_QUEUE_STATS_LEN \ 117#define IGB_QUEUE_STATS_LEN \
117 ((((struct igb_adapter *)netdev_priv(netdev))->num_rx_queues * \ 118 ((((struct igb_adapter *)netdev_priv(netdev))->num_rx_queues * \
118 IGB_RX_QUEUE_STATS_LEN) + \ 119 IGB_RX_QUEUE_STATS_LEN) + \
@@ -2070,12 +2071,14 @@ static void igb_get_ethtool_stats(struct net_device *netdev,
2070 struct ethtool_stats *stats, u64 *data) 2071 struct ethtool_stats *stats, u64 *data)
2071{ 2072{
2072 struct igb_adapter *adapter = netdev_priv(netdev); 2073 struct igb_adapter *adapter = netdev_priv(netdev);
2073 struct net_device_stats *net_stats = &netdev->stats; 2074 struct rtnl_link_stats64 *net_stats = &adapter->stats64;
2074 u64 *queue_stat; 2075 unsigned int start;
2075 int i, j, k; 2076 struct igb_ring *ring;
2077 int i, j;
2076 char *p; 2078 char *p;
2077 2079
2078 igb_update_stats(adapter); 2080 spin_lock(&adapter->stats64_lock);
2081 igb_update_stats(adapter, net_stats);
2079 2082
2080 for (i = 0; i < IGB_GLOBAL_STATS_LEN; i++) { 2083 for (i = 0; i < IGB_GLOBAL_STATS_LEN; i++) {
2081 p = (char *)adapter + igb_gstrings_stats[i].stat_offset; 2084 p = (char *)adapter + igb_gstrings_stats[i].stat_offset;
@@ -2088,15 +2091,36 @@ static void igb_get_ethtool_stats(struct net_device *netdev,
2088 sizeof(u64)) ? *(u64 *)p : *(u32 *)p; 2091 sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
2089 } 2092 }
2090 for (j = 0; j < adapter->num_tx_queues; j++) { 2093 for (j = 0; j < adapter->num_tx_queues; j++) {
2091 queue_stat = (u64 *)&adapter->tx_ring[j]->tx_stats; 2094 u64 restart2;
2092 for (k = 0; k < IGB_TX_QUEUE_STATS_LEN; k++, i++) 2095
2093 data[i] = queue_stat[k]; 2096 ring = adapter->tx_ring[j];
2097 do {
2098 start = u64_stats_fetch_begin_bh(&ring->tx_syncp);
2099 data[i] = ring->tx_stats.packets;
2100 data[i+1] = ring->tx_stats.bytes;
2101 data[i+2] = ring->tx_stats.restart_queue;
2102 } while (u64_stats_fetch_retry_bh(&ring->tx_syncp, start));
2103 do {
2104 start = u64_stats_fetch_begin_bh(&ring->tx_syncp2);
2105 restart2 = ring->tx_stats.restart_queue2;
2106 } while (u64_stats_fetch_retry_bh(&ring->tx_syncp2, start));
2107 data[i+2] += restart2;
2108
2109 i += IGB_TX_QUEUE_STATS_LEN;
2094 } 2110 }
2095 for (j = 0; j < adapter->num_rx_queues; j++) { 2111 for (j = 0; j < adapter->num_rx_queues; j++) {
2096 queue_stat = (u64 *)&adapter->rx_ring[j]->rx_stats; 2112 ring = adapter->rx_ring[j];
2097 for (k = 0; k < IGB_RX_QUEUE_STATS_LEN; k++, i++) 2113 do {
2098 data[i] = queue_stat[k]; 2114 start = u64_stats_fetch_begin_bh(&ring->rx_syncp);
2115 data[i] = ring->rx_stats.packets;
2116 data[i+1] = ring->rx_stats.bytes;
2117 data[i+2] = ring->rx_stats.drops;
2118 data[i+3] = ring->rx_stats.csum_err;
2119 data[i+4] = ring->rx_stats.alloc_failed;
2120 } while (u64_stats_fetch_retry_bh(&ring->rx_syncp, start));
2121 i += IGB_RX_QUEUE_STATS_LEN;
2099 } 2122 }
2123 spin_unlock(&adapter->stats64_lock);
2100} 2124}
2101 2125
2102static void igb_get_strings(struct net_device *netdev, u32 stringset, u8 *data) 2126static void igb_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c
index c4d861b557ca..75155a27fdde 100644
--- a/drivers/net/igb/igb_main.c
+++ b/drivers/net/igb/igb_main.c
@@ -71,6 +71,8 @@ static DEFINE_PCI_DEVICE_TABLE(igb_pci_tbl) = {
71 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SERDES), board_82575 }, 71 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SERDES), board_82575 },
72 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SGMII), board_82575 }, 72 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SGMII), board_82575 },
73 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER_DUAL), board_82575 }, 73 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER_DUAL), board_82575 },
74 { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SGMII), board_82575 },
75 { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SERDES), board_82575 },
74 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576), board_82575 }, 76 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576), board_82575 },
75 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS), board_82575 }, 77 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS), board_82575 },
76 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS_SERDES), board_82575 }, 78 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS_SERDES), board_82575 },
@@ -94,7 +96,6 @@ static int igb_setup_all_rx_resources(struct igb_adapter *);
94static void igb_free_all_tx_resources(struct igb_adapter *); 96static void igb_free_all_tx_resources(struct igb_adapter *);
95static void igb_free_all_rx_resources(struct igb_adapter *); 97static void igb_free_all_rx_resources(struct igb_adapter *);
96static void igb_setup_mrqc(struct igb_adapter *); 98static void igb_setup_mrqc(struct igb_adapter *);
97void igb_update_stats(struct igb_adapter *);
98static int igb_probe(struct pci_dev *, const struct pci_device_id *); 99static int igb_probe(struct pci_dev *, const struct pci_device_id *);
99static void __devexit igb_remove(struct pci_dev *pdev); 100static void __devexit igb_remove(struct pci_dev *pdev);
100static int igb_sw_init(struct igb_adapter *); 101static int igb_sw_init(struct igb_adapter *);
@@ -111,7 +112,8 @@ static void igb_update_phy_info(unsigned long);
111static void igb_watchdog(unsigned long); 112static void igb_watchdog(unsigned long);
112static void igb_watchdog_task(struct work_struct *); 113static void igb_watchdog_task(struct work_struct *);
113static netdev_tx_t igb_xmit_frame_adv(struct sk_buff *skb, struct net_device *); 114static netdev_tx_t igb_xmit_frame_adv(struct sk_buff *skb, struct net_device *);
114static struct net_device_stats *igb_get_stats(struct net_device *); 115static struct rtnl_link_stats64 *igb_get_stats64(struct net_device *dev,
116 struct rtnl_link_stats64 *stats);
115static int igb_change_mtu(struct net_device *, int); 117static int igb_change_mtu(struct net_device *, int);
116static int igb_set_mac(struct net_device *, void *); 118static int igb_set_mac(struct net_device *, void *);
117static void igb_set_uta(struct igb_adapter *adapter); 119static void igb_set_uta(struct igb_adapter *adapter);
@@ -986,7 +988,7 @@ static void igb_clear_interrupt_scheme(struct igb_adapter *adapter)
986 * Attempt to configure interrupts using the best available 988 * Attempt to configure interrupts using the best available
987 * capabilities of the hardware and kernel. 989 * capabilities of the hardware and kernel.
988 **/ 990 **/
989static void igb_set_interrupt_capability(struct igb_adapter *adapter) 991static int igb_set_interrupt_capability(struct igb_adapter *adapter)
990{ 992{
991 int err; 993 int err;
992 int numvecs, i; 994 int numvecs, i;
@@ -1052,8 +1054,10 @@ msi_only:
1052 if (!pci_enable_msi(adapter->pdev)) 1054 if (!pci_enable_msi(adapter->pdev))
1053 adapter->flags |= IGB_FLAG_HAS_MSI; 1055 adapter->flags |= IGB_FLAG_HAS_MSI;
1054out: 1056out:
1055 /* Notify the stack of the (possibly) reduced Tx Queue count. */ 1057 /* Notify the stack of the (possibly) reduced queue counts. */
1056 adapter->netdev->real_num_tx_queues = adapter->num_tx_queues; 1058 netif_set_real_num_tx_queues(adapter->netdev, adapter->num_tx_queues);
1059 return netif_set_real_num_rx_queues(adapter->netdev,
1060 adapter->num_rx_queues);
1057} 1061}
1058 1062
1059/** 1063/**
@@ -1152,7 +1156,9 @@ static int igb_init_interrupt_scheme(struct igb_adapter *adapter)
1152 struct pci_dev *pdev = adapter->pdev; 1156 struct pci_dev *pdev = adapter->pdev;
1153 int err; 1157 int err;
1154 1158
1155 igb_set_interrupt_capability(adapter); 1159 err = igb_set_interrupt_capability(adapter);
1160 if (err)
1161 return err;
1156 1162
1157 err = igb_alloc_q_vectors(adapter); 1163 err = igb_alloc_q_vectors(adapter);
1158 if (err) { 1164 if (err) {
@@ -1530,7 +1536,9 @@ void igb_down(struct igb_adapter *adapter)
1530 netif_carrier_off(netdev); 1536 netif_carrier_off(netdev);
1531 1537
1532 /* record the stats before reset*/ 1538 /* record the stats before reset*/
1533 igb_update_stats(adapter); 1539 spin_lock(&adapter->stats64_lock);
1540 igb_update_stats(adapter, &adapter->stats64);
1541 spin_unlock(&adapter->stats64_lock);
1534 1542
1535 adapter->link_speed = 0; 1543 adapter->link_speed = 0;
1536 adapter->link_duplex = 0; 1544 adapter->link_duplex = 0;
@@ -1683,7 +1691,7 @@ static const struct net_device_ops igb_netdev_ops = {
1683 .ndo_open = igb_open, 1691 .ndo_open = igb_open,
1684 .ndo_stop = igb_close, 1692 .ndo_stop = igb_close,
1685 .ndo_start_xmit = igb_xmit_frame_adv, 1693 .ndo_start_xmit = igb_xmit_frame_adv,
1686 .ndo_get_stats = igb_get_stats, 1694 .ndo_get_stats64 = igb_get_stats64,
1687 .ndo_set_rx_mode = igb_set_rx_mode, 1695 .ndo_set_rx_mode = igb_set_rx_mode,
1688 .ndo_set_multicast_list = igb_set_rx_mode, 1696 .ndo_set_multicast_list = igb_set_rx_mode,
1689 .ndo_set_mac_address = igb_set_mac, 1697 .ndo_set_mac_address = igb_set_mac,
@@ -1856,8 +1864,10 @@ static int __devinit igb_probe(struct pci_dev *pdev,
1856 netdev->vlan_features |= NETIF_F_IPV6_CSUM; 1864 netdev->vlan_features |= NETIF_F_IPV6_CSUM;
1857 netdev->vlan_features |= NETIF_F_SG; 1865 netdev->vlan_features |= NETIF_F_SG;
1858 1866
1859 if (pci_using_dac) 1867 if (pci_using_dac) {
1860 netdev->features |= NETIF_F_HIGHDMA; 1868 netdev->features |= NETIF_F_HIGHDMA;
1869 netdev->vlan_features |= NETIF_F_HIGHDMA;
1870 }
1861 1871
1862 if (hw->mac.type >= e1000_82576) 1872 if (hw->mac.type >= e1000_82576)
1863 netdev->features |= NETIF_F_SCTP_CSUM; 1873 netdev->features |= NETIF_F_SCTP_CSUM;
@@ -2268,6 +2278,7 @@ static int __devinit igb_sw_init(struct igb_adapter *adapter)
2268 adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN; 2278 adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
2269 adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN; 2279 adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
2270 2280
2281 spin_lock_init(&adapter->stats64_lock);
2271#ifdef CONFIG_PCI_IOV 2282#ifdef CONFIG_PCI_IOV
2272 if (hw->mac.type == e1000_82576) 2283 if (hw->mac.type == e1000_82576)
2273 adapter->vfs_allocated_count = (max_vfs > 7) ? 7 : max_vfs; 2284 adapter->vfs_allocated_count = (max_vfs > 7) ? 7 : max_vfs;
@@ -3475,7 +3486,9 @@ static void igb_watchdog_task(struct work_struct *work)
3475 } 3486 }
3476 } 3487 }
3477 3488
3478 igb_update_stats(adapter); 3489 spin_lock(&adapter->stats64_lock);
3490 igb_update_stats(adapter, &adapter->stats64);
3491 spin_unlock(&adapter->stats64_lock);
3479 3492
3480 for (i = 0; i < adapter->num_tx_queues; i++) { 3493 for (i = 0; i < adapter->num_tx_queues; i++) {
3481 struct igb_ring *tx_ring = adapter->tx_ring[i]; 3494 struct igb_ring *tx_ring = adapter->tx_ring[i];
@@ -3542,6 +3555,8 @@ static void igb_update_ring_itr(struct igb_q_vector *q_vector)
3542 int new_val = q_vector->itr_val; 3555 int new_val = q_vector->itr_val;
3543 int avg_wire_size = 0; 3556 int avg_wire_size = 0;
3544 struct igb_adapter *adapter = q_vector->adapter; 3557 struct igb_adapter *adapter = q_vector->adapter;
3558 struct igb_ring *ring;
3559 unsigned int packets;
3545 3560
3546 /* For non-gigabit speeds, just fix the interrupt rate at 4000 3561 /* For non-gigabit speeds, just fix the interrupt rate at 4000
3547 * ints/sec - ITR timer value of 120 ticks. 3562 * ints/sec - ITR timer value of 120 ticks.
@@ -3551,16 +3566,21 @@ static void igb_update_ring_itr(struct igb_q_vector *q_vector)
3551 goto set_itr_val; 3566 goto set_itr_val;
3552 } 3567 }
3553 3568
3554 if (q_vector->rx_ring && q_vector->rx_ring->total_packets) { 3569 ring = q_vector->rx_ring;
3555 struct igb_ring *ring = q_vector->rx_ring; 3570 if (ring) {
3556 avg_wire_size = ring->total_bytes / ring->total_packets; 3571 packets = ACCESS_ONCE(ring->total_packets);
3572
3573 if (packets)
3574 avg_wire_size = ring->total_bytes / packets;
3557 } 3575 }
3558 3576
3559 if (q_vector->tx_ring && q_vector->tx_ring->total_packets) { 3577 ring = q_vector->tx_ring;
3560 struct igb_ring *ring = q_vector->tx_ring; 3578 if (ring) {
3561 avg_wire_size = max_t(u32, avg_wire_size, 3579 packets = ACCESS_ONCE(ring->total_packets);
3562 (ring->total_bytes / 3580
3563 ring->total_packets)); 3581 if (packets)
3582 avg_wire_size = max_t(u32, avg_wire_size,
3583 ring->total_bytes / packets);
3564 } 3584 }
3565 3585
3566 /* if avg_wire_size isn't set no work was done */ 3586 /* if avg_wire_size isn't set no work was done */
@@ -4069,7 +4089,11 @@ static int __igb_maybe_stop_tx(struct igb_ring *tx_ring, int size)
4069 4089
4070 /* A reprieve! */ 4090 /* A reprieve! */
4071 netif_wake_subqueue(netdev, tx_ring->queue_index); 4091 netif_wake_subqueue(netdev, tx_ring->queue_index);
4072 tx_ring->tx_stats.restart_queue++; 4092
4093 u64_stats_update_begin(&tx_ring->tx_syncp2);
4094 tx_ring->tx_stats.restart_queue2++;
4095 u64_stats_update_end(&tx_ring->tx_syncp2);
4096
4073 return 0; 4097 return 0;
4074} 4098}
4075 4099
@@ -4104,7 +4128,7 @@ netdev_tx_t igb_xmit_frame_ring_adv(struct sk_buff *skb,
4104 tx_flags |= IGB_TX_FLAGS_TSTAMP; 4128 tx_flags |= IGB_TX_FLAGS_TSTAMP;
4105 } 4129 }
4106 4130
4107 if (vlan_tx_tag_present(skb) && adapter->vlgrp) { 4131 if (vlan_tx_tag_present(skb)) {
4108 tx_flags |= IGB_TX_FLAGS_VLAN; 4132 tx_flags |= IGB_TX_FLAGS_VLAN;
4109 tx_flags |= (vlan_tx_tag_get(skb) << IGB_TX_FLAGS_VLAN_SHIFT); 4133 tx_flags |= (vlan_tx_tag_get(skb) << IGB_TX_FLAGS_VLAN_SHIFT);
4110 } 4134 }
@@ -4206,16 +4230,22 @@ static void igb_reset_task(struct work_struct *work)
4206} 4230}
4207 4231
4208/** 4232/**
4209 * igb_get_stats - Get System Network Statistics 4233 * igb_get_stats64 - Get System Network Statistics
4210 * @netdev: network interface device structure 4234 * @netdev: network interface device structure
4235 * @stats: rtnl_link_stats64 pointer
4211 * 4236 *
4212 * Returns the address of the device statistics structure.
4213 * The statistics are actually updated from the timer callback.
4214 **/ 4237 **/
4215static struct net_device_stats *igb_get_stats(struct net_device *netdev) 4238static struct rtnl_link_stats64 *igb_get_stats64(struct net_device *netdev,
4239 struct rtnl_link_stats64 *stats)
4216{ 4240{
4217 /* only return the current stats */ 4241 struct igb_adapter *adapter = netdev_priv(netdev);
4218 return &netdev->stats; 4242
4243 spin_lock(&adapter->stats64_lock);
4244 igb_update_stats(adapter, &adapter->stats64);
4245 memcpy(stats, &adapter->stats64, sizeof(*stats));
4246 spin_unlock(&adapter->stats64_lock);
4247
4248 return stats;
4219} 4249}
4220 4250
4221/** 4251/**
@@ -4297,15 +4327,17 @@ static int igb_change_mtu(struct net_device *netdev, int new_mtu)
4297 * @adapter: board private structure 4327 * @adapter: board private structure
4298 **/ 4328 **/
4299 4329
4300void igb_update_stats(struct igb_adapter *adapter) 4330void igb_update_stats(struct igb_adapter *adapter,
4331 struct rtnl_link_stats64 *net_stats)
4301{ 4332{
4302 struct net_device_stats *net_stats = igb_get_stats(adapter->netdev);
4303 struct e1000_hw *hw = &adapter->hw; 4333 struct e1000_hw *hw = &adapter->hw;
4304 struct pci_dev *pdev = adapter->pdev; 4334 struct pci_dev *pdev = adapter->pdev;
4305 u32 reg, mpc; 4335 u32 reg, mpc;
4306 u16 phy_tmp; 4336 u16 phy_tmp;
4307 int i; 4337 int i;
4308 u64 bytes, packets; 4338 u64 bytes, packets;
4339 unsigned int start;
4340 u64 _bytes, _packets;
4309 4341
4310#define PHY_IDLE_ERROR_COUNT_MASK 0x00FF 4342#define PHY_IDLE_ERROR_COUNT_MASK 0x00FF
4311 4343
@@ -4323,10 +4355,17 @@ void igb_update_stats(struct igb_adapter *adapter)
4323 for (i = 0; i < adapter->num_rx_queues; i++) { 4355 for (i = 0; i < adapter->num_rx_queues; i++) {
4324 u32 rqdpc_tmp = rd32(E1000_RQDPC(i)) & 0x0FFF; 4356 u32 rqdpc_tmp = rd32(E1000_RQDPC(i)) & 0x0FFF;
4325 struct igb_ring *ring = adapter->rx_ring[i]; 4357 struct igb_ring *ring = adapter->rx_ring[i];
4358
4326 ring->rx_stats.drops += rqdpc_tmp; 4359 ring->rx_stats.drops += rqdpc_tmp;
4327 net_stats->rx_fifo_errors += rqdpc_tmp; 4360 net_stats->rx_fifo_errors += rqdpc_tmp;
4328 bytes += ring->rx_stats.bytes; 4361
4329 packets += ring->rx_stats.packets; 4362 do {
4363 start = u64_stats_fetch_begin_bh(&ring->rx_syncp);
4364 _bytes = ring->rx_stats.bytes;
4365 _packets = ring->rx_stats.packets;
4366 } while (u64_stats_fetch_retry_bh(&ring->rx_syncp, start));
4367 bytes += _bytes;
4368 packets += _packets;
4330 } 4369 }
4331 4370
4332 net_stats->rx_bytes = bytes; 4371 net_stats->rx_bytes = bytes;
@@ -4336,8 +4375,13 @@ void igb_update_stats(struct igb_adapter *adapter)
4336 packets = 0; 4375 packets = 0;
4337 for (i = 0; i < adapter->num_tx_queues; i++) { 4376 for (i = 0; i < adapter->num_tx_queues; i++) {
4338 struct igb_ring *ring = adapter->tx_ring[i]; 4377 struct igb_ring *ring = adapter->tx_ring[i];
4339 bytes += ring->tx_stats.bytes; 4378 do {
4340 packets += ring->tx_stats.packets; 4379 start = u64_stats_fetch_begin_bh(&ring->tx_syncp);
4380 _bytes = ring->tx_stats.bytes;
4381 _packets = ring->tx_stats.packets;
4382 } while (u64_stats_fetch_retry_bh(&ring->tx_syncp, start));
4383 bytes += _bytes;
4384 packets += _packets;
4341 } 4385 }
4342 net_stats->tx_bytes = bytes; 4386 net_stats->tx_bytes = bytes;
4343 net_stats->tx_packets = packets; 4387 net_stats->tx_packets = packets;
@@ -4659,12 +4703,13 @@ static int igb_set_vf_promisc(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
4659 u32 vmolr = rd32(E1000_VMOLR(vf)); 4703 u32 vmolr = rd32(E1000_VMOLR(vf));
4660 struct vf_data_storage *vf_data = &adapter->vf_data[vf]; 4704 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
4661 4705
4662 vf_data->flags |= ~(IGB_VF_FLAG_UNI_PROMISC | 4706 vf_data->flags &= ~(IGB_VF_FLAG_UNI_PROMISC |
4663 IGB_VF_FLAG_MULTI_PROMISC); 4707 IGB_VF_FLAG_MULTI_PROMISC);
4664 vmolr &= ~(E1000_VMOLR_ROPE | E1000_VMOLR_ROMPE | E1000_VMOLR_MPME); 4708 vmolr &= ~(E1000_VMOLR_ROPE | E1000_VMOLR_ROMPE | E1000_VMOLR_MPME);
4665 4709
4666 if (*msgbuf & E1000_VF_SET_PROMISC_MULTICAST) { 4710 if (*msgbuf & E1000_VF_SET_PROMISC_MULTICAST) {
4667 vmolr |= E1000_VMOLR_MPME; 4711 vmolr |= E1000_VMOLR_MPME;
4712 vf_data->flags |= IGB_VF_FLAG_MULTI_PROMISC;
4668 *msgbuf &= ~E1000_VF_SET_PROMISC_MULTICAST; 4713 *msgbuf &= ~E1000_VF_SET_PROMISC_MULTICAST;
4669 } else { 4714 } else {
4670 /* 4715 /*
@@ -5388,7 +5433,10 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
5388 if (__netif_subqueue_stopped(netdev, tx_ring->queue_index) && 5433 if (__netif_subqueue_stopped(netdev, tx_ring->queue_index) &&
5389 !(test_bit(__IGB_DOWN, &adapter->state))) { 5434 !(test_bit(__IGB_DOWN, &adapter->state))) {
5390 netif_wake_subqueue(netdev, tx_ring->queue_index); 5435 netif_wake_subqueue(netdev, tx_ring->queue_index);
5436
5437 u64_stats_update_begin(&tx_ring->tx_syncp);
5391 tx_ring->tx_stats.restart_queue++; 5438 tx_ring->tx_stats.restart_queue++;
5439 u64_stats_update_end(&tx_ring->tx_syncp);
5392 } 5440 }
5393 } 5441 }
5394 5442
@@ -5428,9 +5476,11 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
5428 } 5476 }
5429 tx_ring->total_bytes += total_bytes; 5477 tx_ring->total_bytes += total_bytes;
5430 tx_ring->total_packets += total_packets; 5478 tx_ring->total_packets += total_packets;
5479 u64_stats_update_begin(&tx_ring->tx_syncp);
5431 tx_ring->tx_stats.bytes += total_bytes; 5480 tx_ring->tx_stats.bytes += total_bytes;
5432 tx_ring->tx_stats.packets += total_packets; 5481 tx_ring->tx_stats.packets += total_packets;
5433 return (count < tx_ring->count); 5482 u64_stats_update_end(&tx_ring->tx_syncp);
5483 return count < tx_ring->count;
5434} 5484}
5435 5485
5436/** 5486/**
@@ -5471,9 +5521,11 @@ static inline void igb_rx_checksum_adv(struct igb_ring *ring,
5471 * packets, (aka let the stack check the crc32c) 5521 * packets, (aka let the stack check the crc32c)
5472 */ 5522 */
5473 if ((skb->len == 60) && 5523 if ((skb->len == 60) &&
5474 (ring->flags & IGB_RING_FLAG_RX_SCTP_CSUM)) 5524 (ring->flags & IGB_RING_FLAG_RX_SCTP_CSUM)) {
5525 u64_stats_update_begin(&ring->rx_syncp);
5475 ring->rx_stats.csum_err++; 5526 ring->rx_stats.csum_err++;
5476 5527 u64_stats_update_end(&ring->rx_syncp);
5528 }
5477 /* let the stack verify checksum errors */ 5529 /* let the stack verify checksum errors */
5478 return; 5530 return;
5479 } 5531 }
@@ -5660,8 +5712,10 @@ next_desc:
5660 5712
5661 rx_ring->total_packets += total_packets; 5713 rx_ring->total_packets += total_packets;
5662 rx_ring->total_bytes += total_bytes; 5714 rx_ring->total_bytes += total_bytes;
5715 u64_stats_update_begin(&rx_ring->rx_syncp);
5663 rx_ring->rx_stats.packets += total_packets; 5716 rx_ring->rx_stats.packets += total_packets;
5664 rx_ring->rx_stats.bytes += total_bytes; 5717 rx_ring->rx_stats.bytes += total_bytes;
5718 u64_stats_update_end(&rx_ring->rx_syncp);
5665 return cleaned; 5719 return cleaned;
5666} 5720}
5667 5721
@@ -5689,8 +5743,10 @@ void igb_alloc_rx_buffers_adv(struct igb_ring *rx_ring, int cleaned_count)
5689 if ((bufsz < IGB_RXBUFFER_1024) && !buffer_info->page_dma) { 5743 if ((bufsz < IGB_RXBUFFER_1024) && !buffer_info->page_dma) {
5690 if (!buffer_info->page) { 5744 if (!buffer_info->page) {
5691 buffer_info->page = netdev_alloc_page(netdev); 5745 buffer_info->page = netdev_alloc_page(netdev);
5692 if (!buffer_info->page) { 5746 if (unlikely(!buffer_info->page)) {
5747 u64_stats_update_begin(&rx_ring->rx_syncp);
5693 rx_ring->rx_stats.alloc_failed++; 5748 rx_ring->rx_stats.alloc_failed++;
5749 u64_stats_update_end(&rx_ring->rx_syncp);
5694 goto no_buffers; 5750 goto no_buffers;
5695 } 5751 }
5696 buffer_info->page_offset = 0; 5752 buffer_info->page_offset = 0;
@@ -5705,7 +5761,9 @@ void igb_alloc_rx_buffers_adv(struct igb_ring *rx_ring, int cleaned_count)
5705 if (dma_mapping_error(rx_ring->dev, 5761 if (dma_mapping_error(rx_ring->dev,
5706 buffer_info->page_dma)) { 5762 buffer_info->page_dma)) {
5707 buffer_info->page_dma = 0; 5763 buffer_info->page_dma = 0;
5764 u64_stats_update_begin(&rx_ring->rx_syncp);
5708 rx_ring->rx_stats.alloc_failed++; 5765 rx_ring->rx_stats.alloc_failed++;
5766 u64_stats_update_end(&rx_ring->rx_syncp);
5709 goto no_buffers; 5767 goto no_buffers;
5710 } 5768 }
5711 } 5769 }
@@ -5713,8 +5771,10 @@ void igb_alloc_rx_buffers_adv(struct igb_ring *rx_ring, int cleaned_count)
5713 skb = buffer_info->skb; 5771 skb = buffer_info->skb;
5714 if (!skb) { 5772 if (!skb) {
5715 skb = netdev_alloc_skb_ip_align(netdev, bufsz); 5773 skb = netdev_alloc_skb_ip_align(netdev, bufsz);
5716 if (!skb) { 5774 if (unlikely(!skb)) {
5775 u64_stats_update_begin(&rx_ring->rx_syncp);
5717 rx_ring->rx_stats.alloc_failed++; 5776 rx_ring->rx_stats.alloc_failed++;
5777 u64_stats_update_end(&rx_ring->rx_syncp);
5718 goto no_buffers; 5778 goto no_buffers;
5719 } 5779 }
5720 5780
@@ -5728,7 +5788,9 @@ void igb_alloc_rx_buffers_adv(struct igb_ring *rx_ring, int cleaned_count)
5728 if (dma_mapping_error(rx_ring->dev, 5788 if (dma_mapping_error(rx_ring->dev,
5729 buffer_info->dma)) { 5789 buffer_info->dma)) {
5730 buffer_info->dma = 0; 5790 buffer_info->dma = 0;
5791 u64_stats_update_begin(&rx_ring->rx_syncp);
5731 rx_ring->rx_stats.alloc_failed++; 5792 rx_ring->rx_stats.alloc_failed++;
5793 u64_stats_update_end(&rx_ring->rx_syncp);
5732 goto no_buffers; 5794 goto no_buffers;
5733 } 5795 }
5734 } 5796 }
@@ -6091,7 +6153,7 @@ static void igb_restore_vlan(struct igb_adapter *adapter)
6091 6153
6092 if (adapter->vlgrp) { 6154 if (adapter->vlgrp) {
6093 u16 vid; 6155 u16 vid;
6094 for (vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) { 6156 for (vid = 0; vid < VLAN_N_VID; vid++) {
6095 if (!vlan_group_get_device(adapter->vlgrp, vid)) 6157 if (!vlan_group_get_device(adapter->vlgrp, vid))
6096 continue; 6158 continue;
6097 igb_vlan_rx_add_vid(adapter->netdev, vid); 6159 igb_vlan_rx_add_vid(adapter->netdev, vid);
@@ -6106,6 +6168,13 @@ int igb_set_spd_dplx(struct igb_adapter *adapter, u16 spddplx)
6106 6168
6107 mac->autoneg = 0; 6169 mac->autoneg = 0;
6108 6170
6171 /* Fiber NIC's only allow 1000 Gbps Full duplex */
6172 if ((adapter->hw.phy.media_type == e1000_media_type_internal_serdes) &&
6173 spddplx != (SPEED_1000 + DUPLEX_FULL)) {
6174 dev_err(&pdev->dev, "Unsupported Speed/Duplex configuration\n");
6175 return -EINVAL;
6176 }
6177
6109 switch (spddplx) { 6178 switch (spddplx) {
6110 case SPEED_10 + DUPLEX_HALF: 6179 case SPEED_10 + DUPLEX_HALF:
6111 mac->forced_speed_duplex = ADVERTISE_10_HALF; 6180 mac->forced_speed_duplex = ADVERTISE_10_HALF;
diff --git a/drivers/net/igbvf/ethtool.c b/drivers/net/igbvf/ethtool.c
index 103b3aa1afc2..33add708bcbe 100644
--- a/drivers/net/igbvf/ethtool.c
+++ b/drivers/net/igbvf/ethtool.c
@@ -153,7 +153,7 @@ static int igbvf_set_rx_csum(struct net_device *netdev, u32 data)
153 153
154static u32 igbvf_get_tx_csum(struct net_device *netdev) 154static u32 igbvf_get_tx_csum(struct net_device *netdev)
155{ 155{
156 return ((netdev->features & NETIF_F_IP_CSUM) != 0); 156 return (netdev->features & NETIF_F_IP_CSUM) != 0;
157} 157}
158 158
159static int igbvf_set_tx_csum(struct net_device *netdev, u32 data) 159static int igbvf_set_tx_csum(struct net_device *netdev, u32 data)
diff --git a/drivers/net/igbvf/netdev.c b/drivers/net/igbvf/netdev.c
index c7fab80d2490..ebfaa68ee630 100644
--- a/drivers/net/igbvf/netdev.c
+++ b/drivers/net/igbvf/netdev.c
@@ -41,14 +41,12 @@
41#include <linux/mii.h> 41#include <linux/mii.h>
42#include <linux/ethtool.h> 42#include <linux/ethtool.h>
43#include <linux/if_vlan.h> 43#include <linux/if_vlan.h>
44#include <linux/pm_qos_params.h>
45 44
46#include "igbvf.h" 45#include "igbvf.h"
47 46
48#define DRV_VERSION "1.0.0-k0" 47#define DRV_VERSION "1.0.0-k0"
49char igbvf_driver_name[] = "igbvf"; 48char igbvf_driver_name[] = "igbvf";
50const char igbvf_driver_version[] = DRV_VERSION; 49const char igbvf_driver_version[] = DRV_VERSION;
51static struct pm_qos_request_list igbvf_driver_pm_qos_req;
52static const char igbvf_driver_string[] = 50static const char igbvf_driver_string[] =
53 "Intel(R) Virtual Function Network Driver"; 51 "Intel(R) Virtual Function Network Driver";
54static const char igbvf_copyright[] = "Copyright (c) 2009 Intel Corporation."; 52static const char igbvf_copyright[] = "Copyright (c) 2009 Intel Corporation.";
@@ -845,7 +843,7 @@ static bool igbvf_clean_tx_irq(struct igbvf_ring *tx_ring)
845 } 843 }
846 adapter->net_stats.tx_bytes += total_bytes; 844 adapter->net_stats.tx_bytes += total_bytes;
847 adapter->net_stats.tx_packets += total_packets; 845 adapter->net_stats.tx_packets += total_packets;
848 return (count < tx_ring->count); 846 return count < tx_ring->count;
849} 847}
850 848
851static irqreturn_t igbvf_msix_other(int irq, void *data) 849static irqreturn_t igbvf_msix_other(int irq, void *data)
@@ -1256,7 +1254,7 @@ static void igbvf_restore_vlan(struct igbvf_adapter *adapter)
1256 if (!adapter->vlgrp) 1254 if (!adapter->vlgrp)
1257 return; 1255 return;
1258 1256
1259 for (vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) { 1257 for (vid = 0; vid < VLAN_N_VID; vid++) {
1260 if (!vlan_group_get_device(adapter->vlgrp, vid)) 1258 if (!vlan_group_get_device(adapter->vlgrp, vid))
1261 continue; 1259 continue;
1262 igbvf_vlan_rx_add_vid(adapter->netdev, vid); 1260 igbvf_vlan_rx_add_vid(adapter->netdev, vid);
@@ -2904,8 +2902,6 @@ static int __init igbvf_init_module(void)
2904 printk(KERN_INFO "%s\n", igbvf_copyright); 2902 printk(KERN_INFO "%s\n", igbvf_copyright);
2905 2903
2906 ret = pci_register_driver(&igbvf_driver); 2904 ret = pci_register_driver(&igbvf_driver);
2907 pm_qos_add_request(&igbvf_driver_pm_qos_req, PM_QOS_CPU_DMA_LATENCY,
2908 PM_QOS_DEFAULT_VALUE);
2909 2905
2910 return ret; 2906 return ret;
2911} 2907}
@@ -2920,7 +2916,6 @@ module_init(igbvf_init_module);
2920static void __exit igbvf_exit_module(void) 2916static void __exit igbvf_exit_module(void)
2921{ 2917{
2922 pci_unregister_driver(&igbvf_driver); 2918 pci_unregister_driver(&igbvf_driver);
2923 pm_qos_remove_request(&igbvf_driver_pm_qos_req);
2924} 2919}
2925module_exit(igbvf_exit_module); 2920module_exit(igbvf_exit_module);
2926 2921
diff --git a/drivers/net/irda/donauboe.c b/drivers/net/irda/donauboe.c
index 48bd5ec9f29b..b626cccbccd1 100644
--- a/drivers/net/irda/donauboe.c
+++ b/drivers/net/irda/donauboe.c
@@ -217,7 +217,7 @@ toshoboe_checkfcs (unsigned char *buf, int len)
217 for (i = 0; i < len; ++i) 217 for (i = 0; i < len; ++i)
218 fcs.value = irda_fcs (fcs.value, *(buf++)); 218 fcs.value = irda_fcs (fcs.value, *(buf++));
219 219
220 return (fcs.value == GOOD_FCS); 220 return fcs.value == GOOD_FCS;
221} 221}
222 222
223/***********************************************************************/ 223/***********************************************************************/
@@ -759,7 +759,7 @@ toshoboe_maketestpacket (unsigned char *buf, int badcrc, int fir)
759 if (fir) 759 if (fir)
760 { 760 {
761 memset (buf, 0, TT_LEN); 761 memset (buf, 0, TT_LEN);
762 return (TT_LEN); 762 return TT_LEN;
763 } 763 }
764 764
765 fcs.value = INIT_FCS; 765 fcs.value = INIT_FCS;
diff --git a/drivers/net/irda/irda-usb.c b/drivers/net/irda/irda-usb.c
index 4441fa3389c2..e4ea61944c22 100644
--- a/drivers/net/irda/irda-usb.c
+++ b/drivers/net/irda/irda-usb.c
@@ -1124,11 +1124,11 @@ static int stir421x_patch_device(struct irda_usb_cb *self)
1124 * The actual image starts after the "STMP" keyword 1124 * The actual image starts after the "STMP" keyword
1125 * so forward to the firmware header tag 1125 * so forward to the firmware header tag
1126 */ 1126 */
1127 for (i = 0; (fw->data[i] != STIR421X_PATCH_END_OF_HDR_TAG) && 1127 for (i = 0; i < fw->size && fw->data[i] !=
1128 (i < fw->size); i++) ; 1128 STIR421X_PATCH_END_OF_HDR_TAG; i++) ;
1129 /* here we check for the out of buffer case */ 1129 /* here we check for the out of buffer case */
1130 if ((STIR421X_PATCH_END_OF_HDR_TAG == fw->data[i]) && 1130 if (i < STIR421X_PATCH_CODE_OFFSET && i < fw->size &&
1131 (i < STIR421X_PATCH_CODE_OFFSET)) { 1131 STIR421X_PATCH_END_OF_HDR_TAG == fw->data[i]) {
1132 if (!memcmp(fw->data + i + 1, STIR421X_PATCH_STMP_TAG, 1132 if (!memcmp(fw->data + i + 1, STIR421X_PATCH_STMP_TAG,
1133 sizeof(STIR421X_PATCH_STMP_TAG) - 1)) { 1133 sizeof(STIR421X_PATCH_STMP_TAG) - 1)) {
1134 1134
@@ -1514,7 +1514,7 @@ static inline int irda_usb_parse_endpoints(struct irda_usb_cb *self, struct usb_
1514 IRDA_DEBUG(0, "%s(), And our endpoints are : in=%02X, out=%02X (%d), int=%02X\n", 1514 IRDA_DEBUG(0, "%s(), And our endpoints are : in=%02X, out=%02X (%d), int=%02X\n",
1515 __func__, self->bulk_in_ep, self->bulk_out_ep, self->bulk_out_mtu, self->bulk_int_ep); 1515 __func__, self->bulk_in_ep, self->bulk_out_ep, self->bulk_out_mtu, self->bulk_int_ep);
1516 1516
1517 return((self->bulk_in_ep != 0) && (self->bulk_out_ep != 0)); 1517 return (self->bulk_in_ep != 0) && (self->bulk_out_ep != 0);
1518} 1518}
1519 1519
1520#ifdef IU_DUMP_CLASS_DESC 1520#ifdef IU_DUMP_CLASS_DESC
diff --git a/drivers/net/irda/nsc-ircc.c b/drivers/net/irda/nsc-ircc.c
index e30cdbb14745..559fe854d76d 100644
--- a/drivers/net/irda/nsc-ircc.c
+++ b/drivers/net/irda/nsc-ircc.c
@@ -1348,7 +1348,7 @@ static __u8 nsc_ircc_change_speed(struct nsc_ircc_cb *self, __u32 speed)
1348 outb(bank, iobase+BSR); 1348 outb(bank, iobase+BSR);
1349 1349
1350 /* Make sure interrupt handlers keep the proper interrupt mask */ 1350 /* Make sure interrupt handlers keep the proper interrupt mask */
1351 return(ier); 1351 return ier;
1352} 1352}
1353 1353
1354/* 1354/*
diff --git a/drivers/net/irda/sir_dev.c b/drivers/net/irda/sir_dev.c
index 1b051dab7b29..39d6e6f15d4f 100644
--- a/drivers/net/irda/sir_dev.c
+++ b/drivers/net/irda/sir_dev.c
@@ -336,7 +336,7 @@ static int sirdev_is_receiving(struct sir_dev *dev)
336 if (!atomic_read(&dev->enable_rx)) 336 if (!atomic_read(&dev->enable_rx))
337 return 0; 337 return 0;
338 338
339 return (dev->rx_buff.state != OUTSIDE_FRAME); 339 return dev->rx_buff.state != OUTSIDE_FRAME;
340} 340}
341 341
342int sirdev_set_dongle(struct sir_dev *dev, IRDA_DONGLE type) 342int sirdev_set_dongle(struct sir_dev *dev, IRDA_DONGLE type)
diff --git a/drivers/net/irda/smsc-ircc2.c b/drivers/net/irda/smsc-ircc2.c
index 850ca1c5ee19..8c57bfb5f098 100644
--- a/drivers/net/irda/smsc-ircc2.c
+++ b/drivers/net/irda/smsc-ircc2.c
@@ -2051,7 +2051,7 @@ static int smsc_ircc_sir_write(int iobase, int fifo_size, __u8 *buf, int len)
2051 */ 2051 */
2052static int smsc_ircc_is_receiving(struct smsc_ircc_cb *self) 2052static int smsc_ircc_is_receiving(struct smsc_ircc_cb *self)
2053{ 2053{
2054 return (self->rx_buff.state != OUTSIDE_FRAME); 2054 return self->rx_buff.state != OUTSIDE_FRAME;
2055} 2055}
2056 2056
2057 2057
diff --git a/drivers/net/irda/stir4200.c b/drivers/net/irda/stir4200.c
index e5698fa30a4f..41c96b3d8152 100644
--- a/drivers/net/irda/stir4200.c
+++ b/drivers/net/irda/stir4200.c
@@ -219,7 +219,7 @@ static inline int read_reg(struct stir_cb *stir, __u16 reg,
219 219
220static inline int isfir(u32 speed) 220static inline int isfir(u32 speed)
221{ 221{
222 return (speed == 4000000); 222 return speed == 4000000;
223} 223}
224 224
225/* 225/*
diff --git a/drivers/net/irda/via-ircc.h b/drivers/net/irda/via-ircc.h
index 5a84822b5a43..c6f58482b769 100644
--- a/drivers/net/irda/via-ircc.h
+++ b/drivers/net/irda/via-ircc.h
@@ -238,7 +238,7 @@ static void WriteLPCReg(int iRegNum, unsigned char iVal)
238 238
239static __u8 ReadReg(unsigned int BaseAddr, int iRegNum) 239static __u8 ReadReg(unsigned int BaseAddr, int iRegNum)
240{ 240{
241 return ((__u8) inb(BaseAddr + iRegNum)); 241 return (__u8) inb(BaseAddr + iRegNum);
242} 242}
243 243
244static void WriteReg(unsigned int BaseAddr, int iRegNum, unsigned char iVal) 244static void WriteReg(unsigned int BaseAddr, int iRegNum, unsigned char iVal)
diff --git a/drivers/net/irda/vlsi_ir.h b/drivers/net/irda/vlsi_ir.h
index 3f24a1f33022..d66fab854bf1 100644
--- a/drivers/net/irda/vlsi_ir.h
+++ b/drivers/net/irda/vlsi_ir.h
@@ -595,7 +595,7 @@ struct ring_descr {
595 595
596static inline int rd_is_active(struct ring_descr *rd) 596static inline int rd_is_active(struct ring_descr *rd)
597{ 597{
598 return ((rd->hw->rd_status & RD_ACTIVE) != 0); 598 return (rd->hw->rd_status & RD_ACTIVE) != 0;
599} 599}
600 600
601static inline void rd_activate(struct ring_descr *rd) 601static inline void rd_activate(struct ring_descr *rd)
diff --git a/drivers/net/ixgb/ixgb_ee.c b/drivers/net/ixgb/ixgb_ee.c
index 813993f9c65c..c982ab9f9005 100644
--- a/drivers/net/ixgb/ixgb_ee.c
+++ b/drivers/net/ixgb/ixgb_ee.c
@@ -296,12 +296,12 @@ ixgb_wait_eeprom_command(struct ixgb_hw *hw)
296 eecd_reg = IXGB_READ_REG(hw, EECD); 296 eecd_reg = IXGB_READ_REG(hw, EECD);
297 297
298 if (eecd_reg & IXGB_EECD_DO) 298 if (eecd_reg & IXGB_EECD_DO)
299 return (true); 299 return true;
300 300
301 udelay(50); 301 udelay(50);
302 } 302 }
303 ASSERT(0); 303 ASSERT(0);
304 return (false); 304 return false;
305} 305}
306 306
307/****************************************************************************** 307/******************************************************************************
@@ -327,9 +327,9 @@ ixgb_validate_eeprom_checksum(struct ixgb_hw *hw)
327 checksum += ixgb_read_eeprom(hw, i); 327 checksum += ixgb_read_eeprom(hw, i);
328 328
329 if (checksum == (u16) EEPROM_SUM) 329 if (checksum == (u16) EEPROM_SUM)
330 return (true); 330 return true;
331 else 331 else
332 return (false); 332 return false;
333} 333}
334 334
335/****************************************************************************** 335/******************************************************************************
@@ -439,7 +439,7 @@ ixgb_read_eeprom(struct ixgb_hw *hw,
439 /* End this read operation */ 439 /* End this read operation */
440 ixgb_standby_eeprom(hw); 440 ixgb_standby_eeprom(hw);
441 441
442 return (data); 442 return data;
443} 443}
444 444
445/****************************************************************************** 445/******************************************************************************
@@ -476,16 +476,16 @@ ixgb_get_eeprom_data(struct ixgb_hw *hw)
476 /* clear the init_ctrl_reg_1 to signify that the cache is 476 /* clear the init_ctrl_reg_1 to signify that the cache is
477 * invalidated */ 477 * invalidated */
478 ee_map->init_ctrl_reg_1 = cpu_to_le16(EEPROM_ICW1_SIGNATURE_CLEAR); 478 ee_map->init_ctrl_reg_1 = cpu_to_le16(EEPROM_ICW1_SIGNATURE_CLEAR);
479 return (false); 479 return false;
480 } 480 }
481 481
482 if ((ee_map->init_ctrl_reg_1 & cpu_to_le16(EEPROM_ICW1_SIGNATURE_MASK)) 482 if ((ee_map->init_ctrl_reg_1 & cpu_to_le16(EEPROM_ICW1_SIGNATURE_MASK))
483 != cpu_to_le16(EEPROM_ICW1_SIGNATURE_VALID)) { 483 != cpu_to_le16(EEPROM_ICW1_SIGNATURE_VALID)) {
484 pr_debug("Signature invalid\n"); 484 pr_debug("Signature invalid\n");
485 return(false); 485 return false;
486 } 486 }
487 487
488 return(true); 488 return true;
489} 489}
490 490
491/****************************************************************************** 491/******************************************************************************
@@ -505,7 +505,7 @@ ixgb_check_and_get_eeprom_data (struct ixgb_hw* hw)
505 505
506 if ((ee_map->init_ctrl_reg_1 & cpu_to_le16(EEPROM_ICW1_SIGNATURE_MASK)) 506 if ((ee_map->init_ctrl_reg_1 & cpu_to_le16(EEPROM_ICW1_SIGNATURE_MASK))
507 == cpu_to_le16(EEPROM_ICW1_SIGNATURE_VALID)) { 507 == cpu_to_le16(EEPROM_ICW1_SIGNATURE_VALID)) {
508 return (true); 508 return true;
509 } else { 509 } else {
510 return ixgb_get_eeprom_data(hw); 510 return ixgb_get_eeprom_data(hw);
511 } 511 }
@@ -526,10 +526,10 @@ ixgb_get_eeprom_word(struct ixgb_hw *hw, u16 index)
526 526
527 if ((index < IXGB_EEPROM_SIZE) && 527 if ((index < IXGB_EEPROM_SIZE) &&
528 (ixgb_check_and_get_eeprom_data(hw) == true)) { 528 (ixgb_check_and_get_eeprom_data(hw) == true)) {
529 return(hw->eeprom[index]); 529 return hw->eeprom[index];
530 } 530 }
531 531
532 return(0); 532 return 0;
533} 533}
534 534
535/****************************************************************************** 535/******************************************************************************
@@ -570,10 +570,10 @@ u32
570ixgb_get_ee_pba_number(struct ixgb_hw *hw) 570ixgb_get_ee_pba_number(struct ixgb_hw *hw)
571{ 571{
572 if (ixgb_check_and_get_eeprom_data(hw) == true) 572 if (ixgb_check_and_get_eeprom_data(hw) == true)
573 return (le16_to_cpu(hw->eeprom[EEPROM_PBA_1_2_REG]) 573 return le16_to_cpu(hw->eeprom[EEPROM_PBA_1_2_REG])
574 | (le16_to_cpu(hw->eeprom[EEPROM_PBA_3_4_REG])<<16)); 574 | (le16_to_cpu(hw->eeprom[EEPROM_PBA_3_4_REG])<<16);
575 575
576 return(0); 576 return 0;
577} 577}
578 578
579 579
@@ -591,8 +591,8 @@ ixgb_get_ee_device_id(struct ixgb_hw *hw)
591 struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom; 591 struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom;
592 592
593 if (ixgb_check_and_get_eeprom_data(hw) == true) 593 if (ixgb_check_and_get_eeprom_data(hw) == true)
594 return (le16_to_cpu(ee_map->device_id)); 594 return le16_to_cpu(ee_map->device_id);
595 595
596 return (0); 596 return 0;
597} 597}
598 598
diff --git a/drivers/net/ixgb/ixgb_ethtool.c b/drivers/net/ixgb/ixgb_ethtool.c
index a4ed96caae69..43994c199991 100644
--- a/drivers/net/ixgb/ixgb_ethtool.c
+++ b/drivers/net/ixgb/ixgb_ethtool.c
@@ -410,7 +410,7 @@ static int
410ixgb_get_eeprom_len(struct net_device *netdev) 410ixgb_get_eeprom_len(struct net_device *netdev)
411{ 411{
412 /* return size in bytes */ 412 /* return size in bytes */
413 return (IXGB_EEPROM_SIZE << 1); 413 return IXGB_EEPROM_SIZE << 1;
414} 414}
415 415
416static int 416static int
diff --git a/drivers/net/ixgb/ixgb_hw.c b/drivers/net/ixgb/ixgb_hw.c
index 397acabccab6..6cb2e42ff4c1 100644
--- a/drivers/net/ixgb/ixgb_hw.c
+++ b/drivers/net/ixgb/ixgb_hw.c
@@ -167,7 +167,7 @@ ixgb_adapter_stop(struct ixgb_hw *hw)
167 /* Clear any pending interrupt events. */ 167 /* Clear any pending interrupt events. */
168 icr_reg = IXGB_READ_REG(hw, ICR); 168 icr_reg = IXGB_READ_REG(hw, ICR);
169 169
170 return (ctrl_reg & IXGB_CTRL0_RST); 170 return ctrl_reg & IXGB_CTRL0_RST;
171} 171}
172 172
173 173
@@ -209,7 +209,7 @@ ixgb_identify_xpak_vendor(struct ixgb_hw *hw)
209 xpak_vendor = ixgb_xpak_vendor_infineon; 209 xpak_vendor = ixgb_xpak_vendor_infineon;
210 } 210 }
211 211
212 return (xpak_vendor); 212 return xpak_vendor;
213} 213}
214 214
215/****************************************************************************** 215/******************************************************************************
@@ -273,7 +273,7 @@ ixgb_identify_phy(struct ixgb_hw *hw)
273 if (hw->subsystem_vendor_id == SUN_SUBVENDOR_ID) 273 if (hw->subsystem_vendor_id == SUN_SUBVENDOR_ID)
274 phy_type = ixgb_phy_type_bcm; 274 phy_type = ixgb_phy_type_bcm;
275 275
276 return (phy_type); 276 return phy_type;
277} 277}
278 278
279/****************************************************************************** 279/******************************************************************************
@@ -366,7 +366,7 @@ ixgb_init_hw(struct ixgb_hw *hw)
366 /* 82597EX errata: Call check-for-link in case lane deskew is locked */ 366 /* 82597EX errata: Call check-for-link in case lane deskew is locked */
367 ixgb_check_for_link(hw); 367 ixgb_check_for_link(hw);
368 368
369 return (status); 369 return status;
370} 370}
371 371
372/****************************************************************************** 372/******************************************************************************
@@ -531,7 +531,7 @@ ixgb_hash_mc_addr(struct ixgb_hw *hw,
531 } 531 }
532 532
533 hash_value &= 0xFFF; 533 hash_value &= 0xFFF;
534 return (hash_value); 534 return hash_value;
535} 535}
536 536
537/****************************************************************************** 537/******************************************************************************
@@ -715,7 +715,7 @@ ixgb_setup_fc(struct ixgb_hw *hw)
715 } 715 }
716 IXGB_WRITE_REG(hw, FCRTH, hw->fc.high_water); 716 IXGB_WRITE_REG(hw, FCRTH, hw->fc.high_water);
717 } 717 }
718 return (status); 718 return status;
719} 719}
720 720
721/****************************************************************************** 721/******************************************************************************
@@ -1140,7 +1140,7 @@ mac_addr_valid(u8 *mac_addr)
1140 pr_debug("MAC address is all zeros\n"); 1140 pr_debug("MAC address is all zeros\n");
1141 is_valid = false; 1141 is_valid = false;
1142 } 1142 }
1143 return (is_valid); 1143 return is_valid;
1144} 1144}
1145 1145
1146/****************************************************************************** 1146/******************************************************************************
diff --git a/drivers/net/ixgb/ixgb_main.c b/drivers/net/ixgb/ixgb_main.c
index c2f6e71e1181..666207a9c039 100644
--- a/drivers/net/ixgb/ixgb_main.c
+++ b/drivers/net/ixgb/ixgb_main.c
@@ -446,8 +446,10 @@ ixgb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
446 NETIF_F_HW_VLAN_FILTER; 446 NETIF_F_HW_VLAN_FILTER;
447 netdev->features |= NETIF_F_TSO; 447 netdev->features |= NETIF_F_TSO;
448 448
449 if (pci_using_dac) 449 if (pci_using_dac) {
450 netdev->features |= NETIF_F_HIGHDMA; 450 netdev->features |= NETIF_F_HIGHDMA;
451 netdev->vlan_features |= NETIF_F_HIGHDMA;
452 }
451 453
452 /* make sure the EEPROM is good */ 454 /* make sure the EEPROM is good */
453 455
@@ -2221,7 +2223,7 @@ ixgb_restore_vlan(struct ixgb_adapter *adapter)
2221 2223
2222 if (adapter->vlgrp) { 2224 if (adapter->vlgrp) {
2223 u16 vid; 2225 u16 vid;
2224 for (vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) { 2226 for (vid = 0; vid < VLAN_N_VID; vid++) {
2225 if (!vlan_group_get_device(adapter->vlgrp, vid)) 2227 if (!vlan_group_get_device(adapter->vlgrp, vid))
2226 continue; 2228 continue;
2227 ixgb_vlan_rx_add_vid(adapter->netdev, vid); 2229 ixgb_vlan_rx_add_vid(adapter->netdev, vid);
diff --git a/drivers/net/ixgbe/ixgbe.h b/drivers/net/ixgbe/ixgbe.h
index 5cebc3755b64..ed8703cfffb7 100644
--- a/drivers/net/ixgbe/ixgbe.h
+++ b/drivers/net/ixgbe/ixgbe.h
@@ -28,10 +28,13 @@
28#ifndef _IXGBE_H_ 28#ifndef _IXGBE_H_
29#define _IXGBE_H_ 29#define _IXGBE_H_
30 30
31#include <linux/bitops.h>
31#include <linux/types.h> 32#include <linux/types.h>
32#include <linux/pci.h> 33#include <linux/pci.h>
33#include <linux/netdevice.h> 34#include <linux/netdevice.h>
35#include <linux/cpumask.h>
34#include <linux/aer.h> 36#include <linux/aer.h>
37#include <linux/if_vlan.h>
35 38
36#include "ixgbe_type.h" 39#include "ixgbe_type.h"
37#include "ixgbe_common.h" 40#include "ixgbe_common.h"
@@ -179,8 +182,9 @@ struct ixgbe_ring {
179 */ 182 */
180 183
181 struct ixgbe_queue_stats stats; 184 struct ixgbe_queue_stats stats;
182 unsigned long reinit_state; 185 struct u64_stats_sync syncp;
183 int numa_node; 186 int numa_node;
187 unsigned long reinit_state;
184 u64 rsc_count; /* stat for coalesced packets */ 188 u64 rsc_count; /* stat for coalesced packets */
185 u64 rsc_flush; /* stats for flushed packets */ 189 u64 rsc_flush; /* stats for flushed packets */
186 u32 restart_queue; /* track tx queue restarts */ 190 u32 restart_queue; /* track tx queue restarts */
@@ -241,6 +245,7 @@ struct ixgbe_q_vector {
241 u8 tx_itr; 245 u8 tx_itr;
242 u8 rx_itr; 246 u8 rx_itr;
243 u32 eitr; 247 u32 eitr;
248 cpumask_var_t affinity_mask;
244}; 249};
245 250
246/* Helper macros to switch between ints/sec and what the register uses. 251/* Helper macros to switch between ints/sec and what the register uses.
@@ -285,7 +290,7 @@ struct ixgbe_q_vector {
285/* board specific private data structure */ 290/* board specific private data structure */
286struct ixgbe_adapter { 291struct ixgbe_adapter {
287 struct timer_list watchdog_timer; 292 struct timer_list watchdog_timer;
288 struct vlan_group *vlgrp; 293 unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
289 u16 bd_number; 294 u16 bd_number;
290 struct work_struct reset_task; 295 struct work_struct reset_task;
291 struct ixgbe_q_vector *q_vector[MAX_MSIX_Q_VECTORS]; 296 struct ixgbe_q_vector *q_vector[MAX_MSIX_Q_VECTORS];
diff --git a/drivers/net/ixgbe/ixgbe_82599.c b/drivers/net/ixgbe/ixgbe_82599.c
index 3e06a61da921..0bd8fbb5bfd0 100644
--- a/drivers/net/ixgbe/ixgbe_82599.c
+++ b/drivers/net/ixgbe/ixgbe_82599.c
@@ -39,20 +39,20 @@
39#define IXGBE_82599_MC_TBL_SIZE 128 39#define IXGBE_82599_MC_TBL_SIZE 128
40#define IXGBE_82599_VFT_TBL_SIZE 128 40#define IXGBE_82599_VFT_TBL_SIZE 128
41 41
42void ixgbe_disable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw); 42static void ixgbe_disable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw);
43void ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw); 43static void ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw);
44void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw); 44static void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw);
45s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw, 45static s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
46 ixgbe_link_speed speed, 46 ixgbe_link_speed speed,
47 bool autoneg, 47 bool autoneg,
48 bool autoneg_wait_to_complete); 48 bool autoneg_wait_to_complete);
49static s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw, 49static s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
50 ixgbe_link_speed speed, 50 ixgbe_link_speed speed,
51 bool autoneg, 51 bool autoneg,
52 bool autoneg_wait_to_complete); 52 bool autoneg_wait_to_complete);
53s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw, 53static s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw,
54 bool autoneg_wait_to_complete); 54 bool autoneg_wait_to_complete);
55s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw, 55static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
56 ixgbe_link_speed speed, 56 ixgbe_link_speed speed,
57 bool autoneg, 57 bool autoneg,
58 bool autoneg_wait_to_complete); 58 bool autoneg_wait_to_complete);
@@ -369,7 +369,7 @@ out:
369 * Configures link settings based on values in the ixgbe_hw struct. 369 * Configures link settings based on values in the ixgbe_hw struct.
370 * Restarts the link. Performs autonegotiation if needed. 370 * Restarts the link. Performs autonegotiation if needed.
371 **/ 371 **/
372s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw, 372static s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw,
373 bool autoneg_wait_to_complete) 373 bool autoneg_wait_to_complete)
374{ 374{
375 u32 autoc_reg; 375 u32 autoc_reg;
@@ -418,7 +418,7 @@ s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw,
418 * PHY states. This includes selectively shutting down the Tx 418 * PHY states. This includes selectively shutting down the Tx
419 * laser on the PHY, effectively halting physical link. 419 * laser on the PHY, effectively halting physical link.
420 **/ 420 **/
421void ixgbe_disable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw) 421static void ixgbe_disable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
422{ 422{
423 u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP); 423 u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP);
424 424
@@ -437,7 +437,7 @@ void ixgbe_disable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
437 * PHY states. This includes selectively turning on the Tx 437 * PHY states. This includes selectively turning on the Tx
438 * laser on the PHY, effectively starting physical link. 438 * laser on the PHY, effectively starting physical link.
439 **/ 439 **/
440void ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw) 440static void ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
441{ 441{
442 u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP); 442 u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP);
443 443
@@ -460,7 +460,7 @@ void ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
460 * end. This is consistent with true clause 37 autoneg, which also 460 * end. This is consistent with true clause 37 autoneg, which also
461 * involves a loss of signal. 461 * involves a loss of signal.
462 **/ 462 **/
463void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw) 463static void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
464{ 464{
465 hw_dbg(hw, "ixgbe_flap_tx_laser_multispeed_fiber\n"); 465 hw_dbg(hw, "ixgbe_flap_tx_laser_multispeed_fiber\n");
466 466
@@ -729,7 +729,7 @@ out:
729 * 729 *
730 * Set the link speed in the AUTOC register and restarts link. 730 * Set the link speed in the AUTOC register and restarts link.
731 **/ 731 **/
732s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw, 732static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
733 ixgbe_link_speed speed, bool autoneg, 733 ixgbe_link_speed speed, bool autoneg,
734 bool autoneg_wait_to_complete) 734 bool autoneg_wait_to_complete)
735{ 735{
@@ -1415,92 +1415,6 @@ s32 ixgbe_atr_set_dst_ipv4_82599(struct ixgbe_atr_input *input, u32 dst_addr)
1415} 1415}
1416 1416
1417/** 1417/**
1418 * ixgbe_atr_set_src_ipv6_82599 - Sets the source IPv6 address
1419 * @input: input stream to modify
1420 * @src_addr_1: the first 4 bytes of the IP address to load
1421 * @src_addr_2: the second 4 bytes of the IP address to load
1422 * @src_addr_3: the third 4 bytes of the IP address to load
1423 * @src_addr_4: the fourth 4 bytes of the IP address to load
1424 **/
1425s32 ixgbe_atr_set_src_ipv6_82599(struct ixgbe_atr_input *input,
1426 u32 src_addr_1, u32 src_addr_2,
1427 u32 src_addr_3, u32 src_addr_4)
1428{
1429 input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET] = src_addr_4 & 0xff;
1430 input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 1] =
1431 (src_addr_4 >> 8) & 0xff;
1432 input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 2] =
1433 (src_addr_4 >> 16) & 0xff;
1434 input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 3] = src_addr_4 >> 24;
1435
1436 input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 4] = src_addr_3 & 0xff;
1437 input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 5] =
1438 (src_addr_3 >> 8) & 0xff;
1439 input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 6] =
1440 (src_addr_3 >> 16) & 0xff;
1441 input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 7] = src_addr_3 >> 24;
1442
1443 input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 8] = src_addr_2 & 0xff;
1444 input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 9] =
1445 (src_addr_2 >> 8) & 0xff;
1446 input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 10] =
1447 (src_addr_2 >> 16) & 0xff;
1448 input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 11] = src_addr_2 >> 24;
1449
1450 input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 12] = src_addr_1 & 0xff;
1451 input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 13] =
1452 (src_addr_1 >> 8) & 0xff;
1453 input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 14] =
1454 (src_addr_1 >> 16) & 0xff;
1455 input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 15] = src_addr_1 >> 24;
1456
1457 return 0;
1458}
1459
1460/**
1461 * ixgbe_atr_set_dst_ipv6_82599 - Sets the destination IPv6 address
1462 * @input: input stream to modify
1463 * @dst_addr_1: the first 4 bytes of the IP address to load
1464 * @dst_addr_2: the second 4 bytes of the IP address to load
1465 * @dst_addr_3: the third 4 bytes of the IP address to load
1466 * @dst_addr_4: the fourth 4 bytes of the IP address to load
1467 **/
1468s32 ixgbe_atr_set_dst_ipv6_82599(struct ixgbe_atr_input *input,
1469 u32 dst_addr_1, u32 dst_addr_2,
1470 u32 dst_addr_3, u32 dst_addr_4)
1471{
1472 input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET] = dst_addr_4 & 0xff;
1473 input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 1] =
1474 (dst_addr_4 >> 8) & 0xff;
1475 input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 2] =
1476 (dst_addr_4 >> 16) & 0xff;
1477 input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 3] = dst_addr_4 >> 24;
1478
1479 input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 4] = dst_addr_3 & 0xff;
1480 input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 5] =
1481 (dst_addr_3 >> 8) & 0xff;
1482 input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 6] =
1483 (dst_addr_3 >> 16) & 0xff;
1484 input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 7] = dst_addr_3 >> 24;
1485
1486 input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 8] = dst_addr_2 & 0xff;
1487 input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 9] =
1488 (dst_addr_2 >> 8) & 0xff;
1489 input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 10] =
1490 (dst_addr_2 >> 16) & 0xff;
1491 input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 11] = dst_addr_2 >> 24;
1492
1493 input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 12] = dst_addr_1 & 0xff;
1494 input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 13] =
1495 (dst_addr_1 >> 8) & 0xff;
1496 input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 14] =
1497 (dst_addr_1 >> 16) & 0xff;
1498 input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 15] = dst_addr_1 >> 24;
1499
1500 return 0;
1501}
1502
1503/**
1504 * ixgbe_atr_set_src_port_82599 - Sets the source port 1418 * ixgbe_atr_set_src_port_82599 - Sets the source port
1505 * @input: input stream to modify 1419 * @input: input stream to modify
1506 * @src_port: the source port to load 1420 * @src_port: the source port to load
@@ -1540,19 +1454,6 @@ s32 ixgbe_atr_set_flex_byte_82599(struct ixgbe_atr_input *input, u16 flex_byte)
1540} 1454}
1541 1455
1542/** 1456/**
1543 * ixgbe_atr_set_vm_pool_82599 - Sets the Virtual Machine pool
1544 * @input: input stream to modify
1545 * @vm_pool: the Virtual Machine pool to load
1546 **/
1547s32 ixgbe_atr_set_vm_pool_82599(struct ixgbe_atr_input *input,
1548 u8 vm_pool)
1549{
1550 input->byte_stream[IXGBE_ATR_VM_POOL_OFFSET] = vm_pool;
1551
1552 return 0;
1553}
1554
1555/**
1556 * ixgbe_atr_set_l4type_82599 - Sets the layer 4 packet type 1457 * ixgbe_atr_set_l4type_82599 - Sets the layer 4 packet type
1557 * @input: input stream to modify 1458 * @input: input stream to modify
1558 * @l4type: the layer 4 type value to load 1459 * @l4type: the layer 4 type value to load
@@ -1645,41 +1546,6 @@ static s32 ixgbe_atr_get_src_ipv6_82599(struct ixgbe_atr_input *input,
1645} 1546}
1646 1547
1647/** 1548/**
1648 * ixgbe_atr_get_dst_ipv6_82599 - Gets the destination IPv6 address
1649 * @input: input stream to search
1650 * @dst_addr_1: the first 4 bytes of the IP address to load
1651 * @dst_addr_2: the second 4 bytes of the IP address to load
1652 * @dst_addr_3: the third 4 bytes of the IP address to load
1653 * @dst_addr_4: the fourth 4 bytes of the IP address to load
1654 **/
1655s32 ixgbe_atr_get_dst_ipv6_82599(struct ixgbe_atr_input *input,
1656 u32 *dst_addr_1, u32 *dst_addr_2,
1657 u32 *dst_addr_3, u32 *dst_addr_4)
1658{
1659 *dst_addr_1 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 12];
1660 *dst_addr_1 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 13] << 8;
1661 *dst_addr_1 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 14] << 16;
1662 *dst_addr_1 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 15] << 24;
1663
1664 *dst_addr_2 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 8];
1665 *dst_addr_2 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 9] << 8;
1666 *dst_addr_2 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 10] << 16;
1667 *dst_addr_2 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 11] << 24;
1668
1669 *dst_addr_3 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 4];
1670 *dst_addr_3 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 5] << 8;
1671 *dst_addr_3 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 6] << 16;
1672 *dst_addr_3 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 7] << 24;
1673
1674 *dst_addr_4 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET];
1675 *dst_addr_4 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 1] << 8;
1676 *dst_addr_4 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 2] << 16;
1677 *dst_addr_4 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 3] << 24;
1678
1679 return 0;
1680}
1681
1682/**
1683 * ixgbe_atr_get_src_port_82599 - Gets the source port 1549 * ixgbe_atr_get_src_port_82599 - Gets the source port
1684 * @input: input stream to modify 1550 * @input: input stream to modify
1685 * @src_port: the source port to load 1551 * @src_port: the source port to load
@@ -1732,19 +1598,6 @@ static s32 ixgbe_atr_get_flex_byte_82599(struct ixgbe_atr_input *input,
1732} 1598}
1733 1599
1734/** 1600/**
1735 * ixgbe_atr_get_vm_pool_82599 - Gets the Virtual Machine pool
1736 * @input: input stream to modify
1737 * @vm_pool: the Virtual Machine pool to load
1738 **/
1739s32 ixgbe_atr_get_vm_pool_82599(struct ixgbe_atr_input *input,
1740 u8 *vm_pool)
1741{
1742 *vm_pool = input->byte_stream[IXGBE_ATR_VM_POOL_OFFSET];
1743
1744 return 0;
1745}
1746
1747/**
1748 * ixgbe_atr_get_l4type_82599 - Gets the layer 4 packet type 1601 * ixgbe_atr_get_l4type_82599 - Gets the layer 4 packet type
1749 * @input: input stream to modify 1602 * @input: input stream to modify
1750 * @l4type: the layer 4 type value to load 1603 * @l4type: the layer 4 type value to load
@@ -1910,56 +1763,27 @@ s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw,
1910 (dst_port << IXGBE_FDIRPORT_DESTINATION_SHIFT))); 1763 (dst_port << IXGBE_FDIRPORT_DESTINATION_SHIFT)));
1911 1764
1912 /* 1765 /*
1913 * Program the relevant mask registers. If src/dst_port or src/dst_addr 1766 * Program the relevant mask registers. L4type cannot be
1914 * are zero, then assume a full mask for that field. Also assume that 1767 * masked out in this implementation.
1915 * a VLAN of 0 is unspecified, so mask that out as well. L4type
1916 * cannot be masked out in this implementation.
1917 * 1768 *
1918 * This also assumes IPv4 only. IPv6 masking isn't supported at this 1769 * This also assumes IPv4 only. IPv6 masking isn't supported at this
1919 * point in time. 1770 * point in time.
1920 */ 1771 */
1921 if (src_ipv4 == 0) 1772 IXGBE_WRITE_REG(hw, IXGBE_FDIRSIP4M, input_masks->src_ip_mask);
1922 IXGBE_WRITE_REG(hw, IXGBE_FDIRSIP4M, 0xffffffff); 1773 IXGBE_WRITE_REG(hw, IXGBE_FDIRDIP4M, input_masks->dst_ip_mask);
1923 else
1924 IXGBE_WRITE_REG(hw, IXGBE_FDIRSIP4M, input_masks->src_ip_mask);
1925
1926 if (dst_ipv4 == 0)
1927 IXGBE_WRITE_REG(hw, IXGBE_FDIRDIP4M, 0xffffffff);
1928 else
1929 IXGBE_WRITE_REG(hw, IXGBE_FDIRDIP4M, input_masks->dst_ip_mask);
1930 1774
1931 switch (l4type & IXGBE_ATR_L4TYPE_MASK) { 1775 switch (l4type & IXGBE_ATR_L4TYPE_MASK) {
1932 case IXGBE_ATR_L4TYPE_TCP: 1776 case IXGBE_ATR_L4TYPE_TCP:
1933 if (src_port == 0) 1777 IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, input_masks->src_port_mask);
1934 IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, 0xffff); 1778 IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM,
1935 else 1779 (IXGBE_READ_REG(hw, IXGBE_FDIRTCPM) |
1936 IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, 1780 (input_masks->dst_port_mask << 16)));
1937 input_masks->src_port_mask);
1938
1939 if (dst_port == 0)
1940 IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM,
1941 (IXGBE_READ_REG(hw, IXGBE_FDIRTCPM) |
1942 (0xffff << 16)));
1943 else
1944 IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM,
1945 (IXGBE_READ_REG(hw, IXGBE_FDIRTCPM) |
1946 (input_masks->dst_port_mask << 16)));
1947 break; 1781 break;
1948 case IXGBE_ATR_L4TYPE_UDP: 1782 case IXGBE_ATR_L4TYPE_UDP:
1949 if (src_port == 0) 1783 IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, input_masks->src_port_mask);
1950 IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, 0xffff); 1784 IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM,
1951 else 1785 (IXGBE_READ_REG(hw, IXGBE_FDIRUDPM) |
1952 IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, 1786 (input_masks->src_port_mask << 16)));
1953 input_masks->src_port_mask);
1954
1955 if (dst_port == 0)
1956 IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM,
1957 (IXGBE_READ_REG(hw, IXGBE_FDIRUDPM) |
1958 (0xffff << 16)));
1959 else
1960 IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM,
1961 (IXGBE_READ_REG(hw, IXGBE_FDIRUDPM) |
1962 (input_masks->src_port_mask << 16)));
1963 break; 1787 break;
1964 default: 1788 default:
1965 /* this already would have failed above */ 1789 /* this already would have failed above */
@@ -1967,11 +1791,11 @@ s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw,
1967 } 1791 }
1968 1792
1969 /* Program the last mask register, FDIRM */ 1793 /* Program the last mask register, FDIRM */
1970 if (input_masks->vlan_id_mask || !vlan_id) 1794 if (input_masks->vlan_id_mask)
1971 /* Mask both VLAN and VLANP - bits 0 and 1 */ 1795 /* Mask both VLAN and VLANP - bits 0 and 1 */
1972 fdirm |= 0x3; 1796 fdirm |= 0x3;
1973 1797
1974 if (input_masks->data_mask || !flex_bytes) 1798 if (input_masks->data_mask)
1975 /* Flex bytes need masking, so mask the whole thing - bit 4 */ 1799 /* Flex bytes need masking, so mask the whole thing - bit 4 */
1976 fdirm |= 0x10; 1800 fdirm |= 0x10;
1977 1801
diff --git a/drivers/net/ixgbe/ixgbe_common.c b/drivers/net/ixgbe/ixgbe_common.c
index 9595b1bfb8dd..e3eca1316389 100644
--- a/drivers/net/ixgbe/ixgbe_common.c
+++ b/drivers/net/ixgbe/ixgbe_common.c
@@ -52,6 +52,7 @@ static void ixgbe_disable_rar(struct ixgbe_hw *hw, u32 index);
52static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr); 52static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr);
53static void ixgbe_add_uc_addr(struct ixgbe_hw *hw, u8 *addr, u32 vmdq); 53static void ixgbe_add_uc_addr(struct ixgbe_hw *hw, u8 *addr, u32 vmdq);
54static s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num); 54static s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num);
55static s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg);
55 56
56/** 57/**
57 * ixgbe_start_hw_generic - Prepare hardware for Tx/Rx 58 * ixgbe_start_hw_generic - Prepare hardware for Tx/Rx
@@ -637,7 +638,7 @@ out:
637 * Polls the status bit (bit 1) of the EERD or EEWR to determine when the 638 * Polls the status bit (bit 1) of the EERD or EEWR to determine when the
638 * read or write is done respectively. 639 * read or write is done respectively.
639 **/ 640 **/
640s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg) 641static s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg)
641{ 642{
642 u32 i; 643 u32 i;
643 u32 reg; 644 u32 reg;
@@ -2449,7 +2450,7 @@ s32 ixgbe_init_uta_tables_generic(struct ixgbe_hw *hw)
2449 * return the VLVF index where this VLAN id should be placed 2450 * return the VLVF index where this VLAN id should be placed
2450 * 2451 *
2451 **/ 2452 **/
2452s32 ixgbe_find_vlvf_slot(struct ixgbe_hw *hw, u32 vlan) 2453static s32 ixgbe_find_vlvf_slot(struct ixgbe_hw *hw, u32 vlan)
2453{ 2454{
2454 u32 bits = 0; 2455 u32 bits = 0;
2455 u32 first_empty_slot = 0; 2456 u32 first_empty_slot = 0;
@@ -2704,48 +2705,3 @@ s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
2704 2705
2705 return 0; 2706 return 0;
2706} 2707}
2707
2708/**
2709 * ixgbe_get_wwn_prefix_generic - Get alternative WWNN/WWPN prefix from
2710 * the EEPROM
2711 * @hw: pointer to hardware structure
2712 * @wwnn_prefix: the alternative WWNN prefix
2713 * @wwpn_prefix: the alternative WWPN prefix
2714 *
2715 * This function will read the EEPROM from the alternative SAN MAC address
2716 * block to check the support for the alternative WWNN/WWPN prefix support.
2717 **/
2718s32 ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix,
2719 u16 *wwpn_prefix)
2720{
2721 u16 offset, caps;
2722 u16 alt_san_mac_blk_offset;
2723
2724 /* clear output first */
2725 *wwnn_prefix = 0xFFFF;
2726 *wwpn_prefix = 0xFFFF;
2727
2728 /* check if alternative SAN MAC is supported */
2729 hw->eeprom.ops.read(hw, IXGBE_ALT_SAN_MAC_ADDR_BLK_PTR,
2730 &alt_san_mac_blk_offset);
2731
2732 if ((alt_san_mac_blk_offset == 0) ||
2733 (alt_san_mac_blk_offset == 0xFFFF))
2734 goto wwn_prefix_out;
2735
2736 /* check capability in alternative san mac address block */
2737 offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_CAPS_OFFSET;
2738 hw->eeprom.ops.read(hw, offset, &caps);
2739 if (!(caps & IXGBE_ALT_SAN_MAC_ADDR_CAPS_ALTWWN))
2740 goto wwn_prefix_out;
2741
2742 /* get the corresponding prefix for WWNN/WWPN */
2743 offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWNN_OFFSET;
2744 hw->eeprom.ops.read(hw, offset, wwnn_prefix);
2745
2746 offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWPN_OFFSET;
2747 hw->eeprom.ops.read(hw, offset, wwpn_prefix);
2748
2749wwn_prefix_out:
2750 return 0;
2751}
diff --git a/drivers/net/ixgbe/ixgbe_common.h b/drivers/net/ixgbe/ixgbe_common.h
index 5cf15aa11cac..424c223437dc 100644
--- a/drivers/net/ixgbe/ixgbe_common.h
+++ b/drivers/net/ixgbe/ixgbe_common.h
@@ -52,7 +52,6 @@ s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
52s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw, 52s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw,
53 u16 *checksum_val); 53 u16 *checksum_val);
54s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw); 54s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw);
55s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg);
56 55
57s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq, 56s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
58 u32 enable_addr); 57 u32 enable_addr);
diff --git a/drivers/net/ixgbe/ixgbe_dcb.c b/drivers/net/ixgbe/ixgbe_dcb.c
index 9aea4f04bbd2..8bb9ddb6dffe 100644
--- a/drivers/net/ixgbe/ixgbe_dcb.c
+++ b/drivers/net/ixgbe/ixgbe_dcb.c
@@ -34,98 +34,6 @@
34#include "ixgbe_dcb_82599.h" 34#include "ixgbe_dcb_82599.h"
35 35
36/** 36/**
37 * ixgbe_dcb_config - Struct containing DCB settings.
38 * @dcb_config: Pointer to DCB config structure
39 *
40 * This function checks DCB rules for DCB settings.
41 * The following rules are checked:
42 * 1. The sum of bandwidth percentages of all Bandwidth Groups must total 100%.
43 * 2. The sum of bandwidth percentages of all Traffic Classes within a Bandwidth
44 * Group must total 100.
45 * 3. A Traffic Class should not be set to both Link Strict Priority
46 * and Group Strict Priority.
47 * 4. Link strict Bandwidth Groups can only have link strict traffic classes
48 * with zero bandwidth.
49 */
50s32 ixgbe_dcb_check_config(struct ixgbe_dcb_config *dcb_config)
51{
52 struct tc_bw_alloc *p;
53 s32 ret_val = 0;
54 u8 i, j, bw = 0, bw_id;
55 u8 bw_sum[2][MAX_BW_GROUP];
56 bool link_strict[2][MAX_BW_GROUP];
57
58 memset(bw_sum, 0, sizeof(bw_sum));
59 memset(link_strict, 0, sizeof(link_strict));
60
61 /* First Tx, then Rx */
62 for (i = 0; i < 2; i++) {
63 /* Check each traffic class for rule violation */
64 for (j = 0; j < MAX_TRAFFIC_CLASS; j++) {
65 p = &dcb_config->tc_config[j].path[i];
66
67 bw = p->bwg_percent;
68 bw_id = p->bwg_id;
69
70 if (bw_id >= MAX_BW_GROUP) {
71 ret_val = DCB_ERR_CONFIG;
72 goto err_config;
73 }
74 if (p->prio_type == prio_link) {
75 link_strict[i][bw_id] = true;
76 /* Link strict should have zero bandwidth */
77 if (bw) {
78 ret_val = DCB_ERR_LS_BW_NONZERO;
79 goto err_config;
80 }
81 } else if (!bw) {
82 /*
83 * Traffic classes without link strict
84 * should have non-zero bandwidth.
85 */
86 ret_val = DCB_ERR_TC_BW_ZERO;
87 goto err_config;
88 }
89 bw_sum[i][bw_id] += bw;
90 }
91
92 bw = 0;
93
94 /* Check each bandwidth group for rule violation */
95 for (j = 0; j < MAX_BW_GROUP; j++) {
96 bw += dcb_config->bw_percentage[i][j];
97 /*
98 * Sum of bandwidth percentages of all traffic classes
99 * within a Bandwidth Group must total 100 except for
100 * link strict group (zero bandwidth).
101 */
102 if (link_strict[i][j]) {
103 if (bw_sum[i][j]) {
104 /*
105 * Link strict group should have zero
106 * bandwidth.
107 */
108 ret_val = DCB_ERR_LS_BWG_NONZERO;
109 goto err_config;
110 }
111 } else if (bw_sum[i][j] != BW_PERCENT &&
112 bw_sum[i][j] != 0) {
113 ret_val = DCB_ERR_TC_BW;
114 goto err_config;
115 }
116 }
117
118 if (bw != BW_PERCENT) {
119 ret_val = DCB_ERR_BW_GROUP;
120 goto err_config;
121 }
122 }
123
124err_config:
125 return ret_val;
126}
127
128/**
129 * ixgbe_dcb_calculate_tc_credits - Calculates traffic class credits 37 * ixgbe_dcb_calculate_tc_credits - Calculates traffic class credits
130 * @ixgbe_dcb_config: Struct containing DCB settings. 38 * @ixgbe_dcb_config: Struct containing DCB settings.
131 * @direction: Configuring either Tx or Rx. 39 * @direction: Configuring either Tx or Rx.
@@ -203,133 +111,6 @@ out:
203} 111}
204 112
205/** 113/**
206 * ixgbe_dcb_get_tc_stats - Returns status of each traffic class
207 * @hw: pointer to hardware structure
208 * @stats: pointer to statistics structure
209 * @tc_count: Number of elements in bwg_array.
210 *
211 * This function returns the status data for each of the Traffic Classes in use.
212 */
213s32 ixgbe_dcb_get_tc_stats(struct ixgbe_hw *hw, struct ixgbe_hw_stats *stats,
214 u8 tc_count)
215{
216 s32 ret = 0;
217 if (hw->mac.type == ixgbe_mac_82598EB)
218 ret = ixgbe_dcb_get_tc_stats_82598(hw, stats, tc_count);
219 else if (hw->mac.type == ixgbe_mac_82599EB)
220 ret = ixgbe_dcb_get_tc_stats_82599(hw, stats, tc_count);
221 return ret;
222}
223
224/**
225 * ixgbe_dcb_get_pfc_stats - Returns CBFC status of each traffic class
226 * hw - pointer to hardware structure
227 * stats - pointer to statistics structure
228 * tc_count - Number of elements in bwg_array.
229 *
230 * This function returns the CBFC status data for each of the Traffic Classes.
231 */
232s32 ixgbe_dcb_get_pfc_stats(struct ixgbe_hw *hw, struct ixgbe_hw_stats *stats,
233 u8 tc_count)
234{
235 s32 ret = 0;
236 if (hw->mac.type == ixgbe_mac_82598EB)
237 ret = ixgbe_dcb_get_pfc_stats_82598(hw, stats, tc_count);
238 else if (hw->mac.type == ixgbe_mac_82599EB)
239 ret = ixgbe_dcb_get_pfc_stats_82599(hw, stats, tc_count);
240 return ret;
241}
242
243/**
244 * ixgbe_dcb_config_rx_arbiter - Config Rx arbiter
245 * @hw: pointer to hardware structure
246 * @dcb_config: pointer to ixgbe_dcb_config structure
247 *
248 * Configure Rx Data Arbiter and credits for each traffic class.
249 */
250s32 ixgbe_dcb_config_rx_arbiter(struct ixgbe_hw *hw,
251 struct ixgbe_dcb_config *dcb_config)
252{
253 s32 ret = 0;
254 if (hw->mac.type == ixgbe_mac_82598EB)
255 ret = ixgbe_dcb_config_rx_arbiter_82598(hw, dcb_config);
256 else if (hw->mac.type == ixgbe_mac_82599EB)
257 ret = ixgbe_dcb_config_rx_arbiter_82599(hw, dcb_config);
258 return ret;
259}
260
261/**
262 * ixgbe_dcb_config_tx_desc_arbiter - Config Tx Desc arbiter
263 * @hw: pointer to hardware structure
264 * @dcb_config: pointer to ixgbe_dcb_config structure
265 *
266 * Configure Tx Descriptor Arbiter and credits for each traffic class.
267 */
268s32 ixgbe_dcb_config_tx_desc_arbiter(struct ixgbe_hw *hw,
269 struct ixgbe_dcb_config *dcb_config)
270{
271 s32 ret = 0;
272 if (hw->mac.type == ixgbe_mac_82598EB)
273 ret = ixgbe_dcb_config_tx_desc_arbiter_82598(hw, dcb_config);
274 else if (hw->mac.type == ixgbe_mac_82599EB)
275 ret = ixgbe_dcb_config_tx_desc_arbiter_82599(hw, dcb_config);
276 return ret;
277}
278
279/**
280 * ixgbe_dcb_config_tx_data_arbiter - Config Tx data arbiter
281 * @hw: pointer to hardware structure
282 * @dcb_config: pointer to ixgbe_dcb_config structure
283 *
284 * Configure Tx Data Arbiter and credits for each traffic class.
285 */
286s32 ixgbe_dcb_config_tx_data_arbiter(struct ixgbe_hw *hw,
287 struct ixgbe_dcb_config *dcb_config)
288{
289 s32 ret = 0;
290 if (hw->mac.type == ixgbe_mac_82598EB)
291 ret = ixgbe_dcb_config_tx_data_arbiter_82598(hw, dcb_config);
292 else if (hw->mac.type == ixgbe_mac_82599EB)
293 ret = ixgbe_dcb_config_tx_data_arbiter_82599(hw, dcb_config);
294 return ret;
295}
296
297/**
298 * ixgbe_dcb_config_pfc - Config priority flow control
299 * @hw: pointer to hardware structure
300 * @dcb_config: pointer to ixgbe_dcb_config structure
301 *
302 * Configure Priority Flow Control for each traffic class.
303 */
304s32 ixgbe_dcb_config_pfc(struct ixgbe_hw *hw,
305 struct ixgbe_dcb_config *dcb_config)
306{
307 s32 ret = 0;
308 if (hw->mac.type == ixgbe_mac_82598EB)
309 ret = ixgbe_dcb_config_pfc_82598(hw, dcb_config);
310 else if (hw->mac.type == ixgbe_mac_82599EB)
311 ret = ixgbe_dcb_config_pfc_82599(hw, dcb_config);
312 return ret;
313}
314
315/**
316 * ixgbe_dcb_config_tc_stats - Config traffic class statistics
317 * @hw: pointer to hardware structure
318 *
319 * Configure queue statistics registers, all queues belonging to same traffic
320 * class uses a single set of queue statistics counters.
321 */
322s32 ixgbe_dcb_config_tc_stats(struct ixgbe_hw *hw)
323{
324 s32 ret = 0;
325 if (hw->mac.type == ixgbe_mac_82598EB)
326 ret = ixgbe_dcb_config_tc_stats_82598(hw);
327 else if (hw->mac.type == ixgbe_mac_82599EB)
328 ret = ixgbe_dcb_config_tc_stats_82599(hw);
329 return ret;
330}
331
332/**
333 * ixgbe_dcb_hw_config - Config and enable DCB 114 * ixgbe_dcb_hw_config - Config and enable DCB
334 * @hw: pointer to hardware structure 115 * @hw: pointer to hardware structure
335 * @dcb_config: pointer to ixgbe_dcb_config structure 116 * @dcb_config: pointer to ixgbe_dcb_config structure
diff --git a/drivers/net/ixgbe/ixgbe_dcb.h b/drivers/net/ixgbe/ixgbe_dcb.h
index 5caafd4afbc3..eb1059f09da0 100644
--- a/drivers/net/ixgbe/ixgbe_dcb.h
+++ b/drivers/net/ixgbe/ixgbe_dcb.h
@@ -149,27 +149,9 @@ struct ixgbe_dcb_config {
149 149
150/* DCB driver APIs */ 150/* DCB driver APIs */
151 151
152/* DCB rule checking function.*/
153s32 ixgbe_dcb_check_config(struct ixgbe_dcb_config *config);
154
155/* DCB credits calculation */ 152/* DCB credits calculation */
156s32 ixgbe_dcb_calculate_tc_credits(struct ixgbe_dcb_config *, u8); 153s32 ixgbe_dcb_calculate_tc_credits(struct ixgbe_dcb_config *, u8);
157 154
158/* DCB PFC functions */
159s32 ixgbe_dcb_config_pfc(struct ixgbe_hw *, struct ixgbe_dcb_config *g);
160s32 ixgbe_dcb_get_pfc_stats(struct ixgbe_hw *, struct ixgbe_hw_stats *, u8);
161
162/* DCB traffic class stats */
163s32 ixgbe_dcb_config_tc_stats(struct ixgbe_hw *);
164s32 ixgbe_dcb_get_tc_stats(struct ixgbe_hw *, struct ixgbe_hw_stats *, u8);
165
166/* DCB config arbiters */
167s32 ixgbe_dcb_config_tx_desc_arbiter(struct ixgbe_hw *,
168 struct ixgbe_dcb_config *);
169s32 ixgbe_dcb_config_tx_data_arbiter(struct ixgbe_hw *,
170 struct ixgbe_dcb_config *);
171s32 ixgbe_dcb_config_rx_arbiter(struct ixgbe_hw *, struct ixgbe_dcb_config *);
172
173/* DCB hw initialization */ 155/* DCB hw initialization */
174s32 ixgbe_dcb_hw_config(struct ixgbe_hw *, struct ixgbe_dcb_config *); 156s32 ixgbe_dcb_hw_config(struct ixgbe_hw *, struct ixgbe_dcb_config *);
175 157
diff --git a/drivers/net/ixgbe/ixgbe_dcb_82598.c b/drivers/net/ixgbe/ixgbe_dcb_82598.c
index f0e9279d4669..50288bcadc59 100644
--- a/drivers/net/ixgbe/ixgbe_dcb_82598.c
+++ b/drivers/net/ixgbe/ixgbe_dcb_82598.c
@@ -32,65 +32,6 @@
32#include "ixgbe_dcb_82598.h" 32#include "ixgbe_dcb_82598.h"
33 33
34/** 34/**
35 * ixgbe_dcb_get_tc_stats_82598 - Return status data for each traffic class
36 * @hw: pointer to hardware structure
37 * @stats: pointer to statistics structure
38 * @tc_count: Number of elements in bwg_array.
39 *
40 * This function returns the status data for each of the Traffic Classes in use.
41 */
42s32 ixgbe_dcb_get_tc_stats_82598(struct ixgbe_hw *hw,
43 struct ixgbe_hw_stats *stats,
44 u8 tc_count)
45{
46 int tc;
47
48 if (tc_count > MAX_TRAFFIC_CLASS)
49 return DCB_ERR_PARAM;
50
51 /* Statistics pertaining to each traffic class */
52 for (tc = 0; tc < tc_count; tc++) {
53 /* Transmitted Packets */
54 stats->qptc[tc] += IXGBE_READ_REG(hw, IXGBE_QPTC(tc));
55 /* Transmitted Bytes */
56 stats->qbtc[tc] += IXGBE_READ_REG(hw, IXGBE_QBTC(tc));
57 /* Received Packets */
58 stats->qprc[tc] += IXGBE_READ_REG(hw, IXGBE_QPRC(tc));
59 /* Received Bytes */
60 stats->qbrc[tc] += IXGBE_READ_REG(hw, IXGBE_QBRC(tc));
61 }
62
63 return 0;
64}
65
66/**
67 * ixgbe_dcb_get_pfc_stats_82598 - Returns CBFC status data
68 * @hw: pointer to hardware structure
69 * @stats: pointer to statistics structure
70 * @tc_count: Number of elements in bwg_array.
71 *
72 * This function returns the CBFC status data for each of the Traffic Classes.
73 */
74s32 ixgbe_dcb_get_pfc_stats_82598(struct ixgbe_hw *hw,
75 struct ixgbe_hw_stats *stats,
76 u8 tc_count)
77{
78 int tc;
79
80 if (tc_count > MAX_TRAFFIC_CLASS)
81 return DCB_ERR_PARAM;
82
83 for (tc = 0; tc < tc_count; tc++) {
84 /* Priority XOFF Transmitted */
85 stats->pxofftxc[tc] += IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(tc));
86 /* Priority XOFF Received */
87 stats->pxoffrxc[tc] += IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(tc));
88 }
89
90 return 0;
91}
92
93/**
94 * ixgbe_dcb_config_packet_buffers_82598 - Configure packet buffers 35 * ixgbe_dcb_config_packet_buffers_82598 - Configure packet buffers
95 * @hw: pointer to hardware structure 36 * @hw: pointer to hardware structure
96 * @dcb_config: pointer to ixgbe_dcb_config structure 37 * @dcb_config: pointer to ixgbe_dcb_config structure
@@ -137,7 +78,7 @@ static s32 ixgbe_dcb_config_packet_buffers_82598(struct ixgbe_hw *hw,
137 * 78 *
138 * Configure Rx Data Arbiter and credits for each traffic class. 79 * Configure Rx Data Arbiter and credits for each traffic class.
139 */ 80 */
140s32 ixgbe_dcb_config_rx_arbiter_82598(struct ixgbe_hw *hw, 81static s32 ixgbe_dcb_config_rx_arbiter_82598(struct ixgbe_hw *hw,
141 struct ixgbe_dcb_config *dcb_config) 82 struct ixgbe_dcb_config *dcb_config)
142{ 83{
143 struct tc_bw_alloc *p; 84 struct tc_bw_alloc *p;
@@ -194,7 +135,7 @@ s32 ixgbe_dcb_config_rx_arbiter_82598(struct ixgbe_hw *hw,
194 * 135 *
195 * Configure Tx Descriptor Arbiter and credits for each traffic class. 136 * Configure Tx Descriptor Arbiter and credits for each traffic class.
196 */ 137 */
197s32 ixgbe_dcb_config_tx_desc_arbiter_82598(struct ixgbe_hw *hw, 138static s32 ixgbe_dcb_config_tx_desc_arbiter_82598(struct ixgbe_hw *hw,
198 struct ixgbe_dcb_config *dcb_config) 139 struct ixgbe_dcb_config *dcb_config)
199{ 140{
200 struct tc_bw_alloc *p; 141 struct tc_bw_alloc *p;
@@ -242,7 +183,7 @@ s32 ixgbe_dcb_config_tx_desc_arbiter_82598(struct ixgbe_hw *hw,
242 * 183 *
243 * Configure Tx Data Arbiter and credits for each traffic class. 184 * Configure Tx Data Arbiter and credits for each traffic class.
244 */ 185 */
245s32 ixgbe_dcb_config_tx_data_arbiter_82598(struct ixgbe_hw *hw, 186static s32 ixgbe_dcb_config_tx_data_arbiter_82598(struct ixgbe_hw *hw,
246 struct ixgbe_dcb_config *dcb_config) 187 struct ixgbe_dcb_config *dcb_config)
247{ 188{
248 struct tc_bw_alloc *p; 189 struct tc_bw_alloc *p;
@@ -355,7 +296,7 @@ out:
355 * Configure queue statistics registers, all queues belonging to same traffic 296 * Configure queue statistics registers, all queues belonging to same traffic
356 * class uses a single set of queue statistics counters. 297 * class uses a single set of queue statistics counters.
357 */ 298 */
358s32 ixgbe_dcb_config_tc_stats_82598(struct ixgbe_hw *hw) 299static s32 ixgbe_dcb_config_tc_stats_82598(struct ixgbe_hw *hw)
359{ 300{
360 u32 reg = 0; 301 u32 reg = 0;
361 u8 i = 0; 302 u8 i = 0;
diff --git a/drivers/net/ixgbe/ixgbe_dcb_82598.h b/drivers/net/ixgbe/ixgbe_dcb_82598.h
index cc728fa092e2..abc03ccfa088 100644
--- a/drivers/net/ixgbe/ixgbe_dcb_82598.h
+++ b/drivers/net/ixgbe/ixgbe_dcb_82598.h
@@ -72,21 +72,6 @@
72 72
73/* DCB PFC functions */ 73/* DCB PFC functions */
74s32 ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *, struct ixgbe_dcb_config *); 74s32 ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *, struct ixgbe_dcb_config *);
75s32 ixgbe_dcb_get_pfc_stats_82598(struct ixgbe_hw *, struct ixgbe_hw_stats *,
76 u8);
77
78/* DCB traffic class stats */
79s32 ixgbe_dcb_config_tc_stats_82598(struct ixgbe_hw *);
80s32 ixgbe_dcb_get_tc_stats_82598(struct ixgbe_hw *, struct ixgbe_hw_stats *,
81 u8);
82
83/* DCB config arbiters */
84s32 ixgbe_dcb_config_tx_desc_arbiter_82598(struct ixgbe_hw *,
85 struct ixgbe_dcb_config *);
86s32 ixgbe_dcb_config_tx_data_arbiter_82598(struct ixgbe_hw *,
87 struct ixgbe_dcb_config *);
88s32 ixgbe_dcb_config_rx_arbiter_82598(struct ixgbe_hw *,
89 struct ixgbe_dcb_config *);
90 75
91/* DCB hw initialization */ 76/* DCB hw initialization */
92s32 ixgbe_dcb_hw_config_82598(struct ixgbe_hw *, struct ixgbe_dcb_config *); 77s32 ixgbe_dcb_hw_config_82598(struct ixgbe_hw *, struct ixgbe_dcb_config *);
diff --git a/drivers/net/ixgbe/ixgbe_dcb_82599.c b/drivers/net/ixgbe/ixgbe_dcb_82599.c
index 25b02fb425ac..67c219f86c3a 100644
--- a/drivers/net/ixgbe/ixgbe_dcb_82599.c
+++ b/drivers/net/ixgbe/ixgbe_dcb_82599.c
@@ -31,70 +31,13 @@
31#include "ixgbe_dcb_82599.h" 31#include "ixgbe_dcb_82599.h"
32 32
33/** 33/**
34 * ixgbe_dcb_get_tc_stats_82599 - Returns status for each traffic class
35 * @hw: pointer to hardware structure
36 * @stats: pointer to statistics structure
37 * @tc_count: Number of elements in bwg_array.
38 *
39 * This function returns the status data for each of the Traffic Classes in use.
40 */
41s32 ixgbe_dcb_get_tc_stats_82599(struct ixgbe_hw *hw,
42 struct ixgbe_hw_stats *stats,
43 u8 tc_count)
44{
45 int tc;
46
47 if (tc_count > MAX_TRAFFIC_CLASS)
48 return DCB_ERR_PARAM;
49 /* Statistics pertaining to each traffic class */
50 for (tc = 0; tc < tc_count; tc++) {
51 /* Transmitted Packets */
52 stats->qptc[tc] += IXGBE_READ_REG(hw, IXGBE_QPTC(tc));
53 /* Transmitted Bytes */
54 stats->qbtc[tc] += IXGBE_READ_REG(hw, IXGBE_QBTC(tc));
55 /* Received Packets */
56 stats->qprc[tc] += IXGBE_READ_REG(hw, IXGBE_QPRC(tc));
57 /* Received Bytes */
58 stats->qbrc[tc] += IXGBE_READ_REG(hw, IXGBE_QBRC(tc));
59 }
60
61 return 0;
62}
63
64/**
65 * ixgbe_dcb_get_pfc_stats_82599 - Return CBFC status data
66 * @hw: pointer to hardware structure
67 * @stats: pointer to statistics structure
68 * @tc_count: Number of elements in bwg_array.
69 *
70 * This function returns the CBFC status data for each of the Traffic Classes.
71 */
72s32 ixgbe_dcb_get_pfc_stats_82599(struct ixgbe_hw *hw,
73 struct ixgbe_hw_stats *stats,
74 u8 tc_count)
75{
76 int tc;
77
78 if (tc_count > MAX_TRAFFIC_CLASS)
79 return DCB_ERR_PARAM;
80 for (tc = 0; tc < tc_count; tc++) {
81 /* Priority XOFF Transmitted */
82 stats->pxofftxc[tc] += IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(tc));
83 /* Priority XOFF Received */
84 stats->pxoffrxc[tc] += IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(tc));
85 }
86
87 return 0;
88}
89
90/**
91 * ixgbe_dcb_config_packet_buffers_82599 - Configure DCB packet buffers 34 * ixgbe_dcb_config_packet_buffers_82599 - Configure DCB packet buffers
92 * @hw: pointer to hardware structure 35 * @hw: pointer to hardware structure
93 * @dcb_config: pointer to ixgbe_dcb_config structure 36 * @dcb_config: pointer to ixgbe_dcb_config structure
94 * 37 *
95 * Configure packet buffers for DCB mode. 38 * Configure packet buffers for DCB mode.
96 */ 39 */
97s32 ixgbe_dcb_config_packet_buffers_82599(struct ixgbe_hw *hw, 40static s32 ixgbe_dcb_config_packet_buffers_82599(struct ixgbe_hw *hw,
98 struct ixgbe_dcb_config *dcb_config) 41 struct ixgbe_dcb_config *dcb_config)
99{ 42{
100 s32 ret_val = 0; 43 s32 ret_val = 0;
@@ -136,7 +79,7 @@ s32 ixgbe_dcb_config_packet_buffers_82599(struct ixgbe_hw *hw,
136 * 79 *
137 * Configure Rx Packet Arbiter and credits for each traffic class. 80 * Configure Rx Packet Arbiter and credits for each traffic class.
138 */ 81 */
139s32 ixgbe_dcb_config_rx_arbiter_82599(struct ixgbe_hw *hw, 82static s32 ixgbe_dcb_config_rx_arbiter_82599(struct ixgbe_hw *hw,
140 struct ixgbe_dcb_config *dcb_config) 83 struct ixgbe_dcb_config *dcb_config)
141{ 84{
142 struct tc_bw_alloc *p; 85 struct tc_bw_alloc *p;
@@ -191,7 +134,7 @@ s32 ixgbe_dcb_config_rx_arbiter_82599(struct ixgbe_hw *hw,
191 * 134 *
192 * Configure Tx Descriptor Arbiter and credits for each traffic class. 135 * Configure Tx Descriptor Arbiter and credits for each traffic class.
193 */ 136 */
194s32 ixgbe_dcb_config_tx_desc_arbiter_82599(struct ixgbe_hw *hw, 137static s32 ixgbe_dcb_config_tx_desc_arbiter_82599(struct ixgbe_hw *hw,
195 struct ixgbe_dcb_config *dcb_config) 138 struct ixgbe_dcb_config *dcb_config)
196{ 139{
197 struct tc_bw_alloc *p; 140 struct tc_bw_alloc *p;
@@ -238,7 +181,7 @@ s32 ixgbe_dcb_config_tx_desc_arbiter_82599(struct ixgbe_hw *hw,
238 * 181 *
239 * Configure Tx Packet Arbiter and credits for each traffic class. 182 * Configure Tx Packet Arbiter and credits for each traffic class.
240 */ 183 */
241s32 ixgbe_dcb_config_tx_data_arbiter_82599(struct ixgbe_hw *hw, 184static s32 ixgbe_dcb_config_tx_data_arbiter_82599(struct ixgbe_hw *hw,
242 struct ixgbe_dcb_config *dcb_config) 185 struct ixgbe_dcb_config *dcb_config)
243{ 186{
244 struct tc_bw_alloc *p; 187 struct tc_bw_alloc *p;
@@ -359,7 +302,7 @@ out:
359 * Configure queue statistics registers, all queues belonging to same traffic 302 * Configure queue statistics registers, all queues belonging to same traffic
360 * class uses a single set of queue statistics counters. 303 * class uses a single set of queue statistics counters.
361 */ 304 */
362s32 ixgbe_dcb_config_tc_stats_82599(struct ixgbe_hw *hw) 305static s32 ixgbe_dcb_config_tc_stats_82599(struct ixgbe_hw *hw)
363{ 306{
364 u32 reg = 0; 307 u32 reg = 0;
365 u8 i = 0; 308 u8 i = 0;
@@ -412,7 +355,7 @@ s32 ixgbe_dcb_config_tc_stats_82599(struct ixgbe_hw *hw)
412 * 355 *
413 * Configure general DCB parameters. 356 * Configure general DCB parameters.
414 */ 357 */
415s32 ixgbe_dcb_config_82599(struct ixgbe_hw *hw) 358static s32 ixgbe_dcb_config_82599(struct ixgbe_hw *hw)
416{ 359{
417 u32 reg; 360 u32 reg;
418 u32 q; 361 u32 q;
diff --git a/drivers/net/ixgbe/ixgbe_dcb_82599.h b/drivers/net/ixgbe/ixgbe_dcb_82599.h
index 0f3f791e1e1d..18d7fbf6c292 100644
--- a/drivers/net/ixgbe/ixgbe_dcb_82599.h
+++ b/drivers/net/ixgbe/ixgbe_dcb_82599.h
@@ -101,24 +101,6 @@
101/* DCB PFC functions */ 101/* DCB PFC functions */
102s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw, 102s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw,
103 struct ixgbe_dcb_config *dcb_config); 103 struct ixgbe_dcb_config *dcb_config);
104s32 ixgbe_dcb_get_pfc_stats_82599(struct ixgbe_hw *hw,
105 struct ixgbe_hw_stats *stats,
106 u8 tc_count);
107
108/* DCB traffic class stats */
109s32 ixgbe_dcb_config_tc_stats_82599(struct ixgbe_hw *hw);
110s32 ixgbe_dcb_get_tc_stats_82599(struct ixgbe_hw *hw,
111 struct ixgbe_hw_stats *stats,
112 u8 tc_count);
113
114/* DCB config arbiters */
115s32 ixgbe_dcb_config_tx_desc_arbiter_82599(struct ixgbe_hw *hw,
116 struct ixgbe_dcb_config *dcb_config);
117s32 ixgbe_dcb_config_tx_data_arbiter_82599(struct ixgbe_hw *hw,
118 struct ixgbe_dcb_config *dcb_config);
119s32 ixgbe_dcb_config_rx_arbiter_82599(struct ixgbe_hw *hw,
120 struct ixgbe_dcb_config *dcb_config);
121
122 104
123/* DCB hw initialization */ 105/* DCB hw initialization */
124s32 ixgbe_dcb_hw_config_82599(struct ixgbe_hw *hw, 106s32 ixgbe_dcb_hw_config_82599(struct ixgbe_hw *hw,
diff --git a/drivers/net/ixgbe/ixgbe_ethtool.c b/drivers/net/ixgbe/ixgbe_ethtool.c
index 25ef8b197373..3dc731c22ff2 100644
--- a/drivers/net/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ixgbe/ixgbe_ethtool.c
@@ -401,7 +401,7 @@ static int ixgbe_set_pauseparam(struct net_device *netdev,
401static u32 ixgbe_get_rx_csum(struct net_device *netdev) 401static u32 ixgbe_get_rx_csum(struct net_device *netdev)
402{ 402{
403 struct ixgbe_adapter *adapter = netdev_priv(netdev); 403 struct ixgbe_adapter *adapter = netdev_priv(netdev);
404 return (adapter->flags & IXGBE_FLAG_RX_CSUM_ENABLED); 404 return adapter->flags & IXGBE_FLAG_RX_CSUM_ENABLED;
405} 405}
406 406
407static int ixgbe_set_rx_csum(struct net_device *netdev, u32 data) 407static int ixgbe_set_rx_csum(struct net_device *netdev, u32 data)
@@ -988,8 +988,8 @@ static int ixgbe_get_sset_count(struct net_device *netdev, int sset)
988 case ETH_SS_STATS: 988 case ETH_SS_STATS:
989 return IXGBE_STATS_LEN; 989 return IXGBE_STATS_LEN;
990 case ETH_SS_NTUPLE_FILTERS: 990 case ETH_SS_NTUPLE_FILTERS:
991 return (ETHTOOL_MAX_NTUPLE_LIST_ENTRY * 991 return ETHTOOL_MAX_NTUPLE_LIST_ENTRY *
992 ETHTOOL_MAX_NTUPLE_STRING_PER_ENTRY); 992 ETHTOOL_MAX_NTUPLE_STRING_PER_ENTRY;
993 default: 993 default:
994 return -EOPNOTSUPP; 994 return -EOPNOTSUPP;
995 } 995 }
@@ -999,12 +999,11 @@ static void ixgbe_get_ethtool_stats(struct net_device *netdev,
999 struct ethtool_stats *stats, u64 *data) 999 struct ethtool_stats *stats, u64 *data)
1000{ 1000{
1001 struct ixgbe_adapter *adapter = netdev_priv(netdev); 1001 struct ixgbe_adapter *adapter = netdev_priv(netdev);
1002 u64 *queue_stat;
1003 int stat_count = sizeof(struct ixgbe_queue_stats) / sizeof(u64);
1004 struct rtnl_link_stats64 temp; 1002 struct rtnl_link_stats64 temp;
1005 const struct rtnl_link_stats64 *net_stats; 1003 const struct rtnl_link_stats64 *net_stats;
1006 int j, k; 1004 unsigned int start;
1007 int i; 1005 struct ixgbe_ring *ring;
1006 int i, j;
1008 char *p = NULL; 1007 char *p = NULL;
1009 1008
1010 ixgbe_update_stats(adapter); 1009 ixgbe_update_stats(adapter);
@@ -1025,16 +1024,22 @@ static void ixgbe_get_ethtool_stats(struct net_device *netdev,
1025 sizeof(u64)) ? *(u64 *)p : *(u32 *)p; 1024 sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
1026 } 1025 }
1027 for (j = 0; j < adapter->num_tx_queues; j++) { 1026 for (j = 0; j < adapter->num_tx_queues; j++) {
1028 queue_stat = (u64 *)&adapter->tx_ring[j]->stats; 1027 ring = adapter->tx_ring[j];
1029 for (k = 0; k < stat_count; k++) 1028 do {
1030 data[i + k] = queue_stat[k]; 1029 start = u64_stats_fetch_begin_bh(&ring->syncp);
1031 i += k; 1030 data[i] = ring->stats.packets;
1031 data[i+1] = ring->stats.bytes;
1032 } while (u64_stats_fetch_retry_bh(&ring->syncp, start));
1033 i += 2;
1032 } 1034 }
1033 for (j = 0; j < adapter->num_rx_queues; j++) { 1035 for (j = 0; j < adapter->num_rx_queues; j++) {
1034 queue_stat = (u64 *)&adapter->rx_ring[j]->stats; 1036 ring = adapter->rx_ring[j];
1035 for (k = 0; k < stat_count; k++) 1037 do {
1036 data[i + k] = queue_stat[k]; 1038 start = u64_stats_fetch_begin_bh(&ring->syncp);
1037 i += k; 1039 data[i] = ring->stats.packets;
1040 data[i+1] = ring->stats.bytes;
1041 } while (u64_stats_fetch_retry_bh(&ring->syncp, start));
1042 i += 2;
1038 } 1043 }
1039 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { 1044 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
1040 for (j = 0; j < MAX_TX_PACKET_BUFFERS; j++) { 1045 for (j = 0; j < MAX_TX_PACKET_BUFFERS; j++) {
@@ -2113,7 +2118,17 @@ static int ixgbe_set_flags(struct net_device *netdev, u32 data)
2113 bool need_reset = false; 2118 bool need_reset = false;
2114 int rc; 2119 int rc;
2115 2120
2116 rc = ethtool_op_set_flags(netdev, data, ETH_FLAG_LRO | ETH_FLAG_NTUPLE); 2121#ifdef CONFIG_IXGBE_DCB
2122 if ((adapter->flags & IXGBE_FLAG_DCB_ENABLED) &&
2123 !(data & ETH_FLAG_RXVLAN))
2124 return -EINVAL;
2125#endif
2126
2127 need_reset = (data & ETH_FLAG_RXVLAN) !=
2128 (netdev->features & NETIF_F_HW_VLAN_RX);
2129
2130 rc = ethtool_op_set_flags(netdev, data, ETH_FLAG_LRO |
2131 ETH_FLAG_RXVLAN | ETH_FLAG_TXVLAN);
2117 if (rc) 2132 if (rc)
2118 return rc; 2133 return rc;
2119 2134
diff --git a/drivers/net/ixgbe/ixgbe_fcoe.c b/drivers/net/ixgbe/ixgbe_fcoe.c
index 2f1de8b90f9e..05efa6a8ce8e 100644
--- a/drivers/net/ixgbe/ixgbe_fcoe.c
+++ b/drivers/net/ixgbe/ixgbe_fcoe.c
@@ -604,11 +604,13 @@ int ixgbe_fcoe_enable(struct net_device *netdev)
604{ 604{
605 int rc = -EINVAL; 605 int rc = -EINVAL;
606 struct ixgbe_adapter *adapter = netdev_priv(netdev); 606 struct ixgbe_adapter *adapter = netdev_priv(netdev);
607 struct ixgbe_fcoe *fcoe = &adapter->fcoe;
607 608
608 609
609 if (!(adapter->flags & IXGBE_FLAG_FCOE_CAPABLE)) 610 if (!(adapter->flags & IXGBE_FLAG_FCOE_CAPABLE))
610 goto out_enable; 611 goto out_enable;
611 612
613 atomic_inc(&fcoe->refcnt);
612 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) 614 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)
613 goto out_enable; 615 goto out_enable;
614 616
@@ -648,6 +650,7 @@ int ixgbe_fcoe_disable(struct net_device *netdev)
648{ 650{
649 int rc = -EINVAL; 651 int rc = -EINVAL;
650 struct ixgbe_adapter *adapter = netdev_priv(netdev); 652 struct ixgbe_adapter *adapter = netdev_priv(netdev);
653 struct ixgbe_fcoe *fcoe = &adapter->fcoe;
651 654
652 if (!(adapter->flags & IXGBE_FLAG_FCOE_CAPABLE)) 655 if (!(adapter->flags & IXGBE_FLAG_FCOE_CAPABLE))
653 goto out_disable; 656 goto out_disable;
@@ -655,6 +658,9 @@ int ixgbe_fcoe_disable(struct net_device *netdev)
655 if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) 658 if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED))
656 goto out_disable; 659 goto out_disable;
657 660
661 if (!atomic_dec_and_test(&fcoe->refcnt))
662 goto out_disable;
663
658 e_info(drv, "Disabling FCoE offload features.\n"); 664 e_info(drv, "Disabling FCoE offload features.\n");
659 netdev->features &= ~NETIF_F_FCOE_CRC; 665 netdev->features &= ~NETIF_F_FCOE_CRC;
660 netdev->features &= ~NETIF_F_FSO; 666 netdev->features &= ~NETIF_F_FSO;
diff --git a/drivers/net/ixgbe/ixgbe_fcoe.h b/drivers/net/ixgbe/ixgbe_fcoe.h
index abf4b2b3f252..4bc2c551c8db 100644
--- a/drivers/net/ixgbe/ixgbe_fcoe.h
+++ b/drivers/net/ixgbe/ixgbe_fcoe.h
@@ -66,6 +66,7 @@ struct ixgbe_fcoe {
66 u8 tc; 66 u8 tc;
67 u8 up; 67 u8 up;
68#endif 68#endif
69 atomic_t refcnt;
69 spinlock_t lock; 70 spinlock_t lock;
70 struct pci_pool *pool; 71 struct pci_pool *pool;
71 struct ixgbe_fcoe_ddp ddp[IXGBE_FCOE_DDP_MAX]; 72 struct ixgbe_fcoe_ddp ddp[IXGBE_FCOE_DDP_MAX];
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index d03eef96c0ba..f85631263af8 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -824,9 +824,11 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
824 824
825 tx_ring->total_bytes += total_bytes; 825 tx_ring->total_bytes += total_bytes;
826 tx_ring->total_packets += total_packets; 826 tx_ring->total_packets += total_packets;
827 u64_stats_update_begin(&tx_ring->syncp);
827 tx_ring->stats.packets += total_packets; 828 tx_ring->stats.packets += total_packets;
828 tx_ring->stats.bytes += total_bytes; 829 tx_ring->stats.bytes += total_bytes;
829 return (count < tx_ring->work_limit); 830 u64_stats_update_end(&tx_ring->syncp);
831 return count < tx_ring->work_limit;
830} 832}
831 833
832#ifdef CONFIG_IXGBE_DCA 834#ifdef CONFIG_IXGBE_DCA
@@ -954,17 +956,13 @@ static void ixgbe_receive_skb(struct ixgbe_q_vector *q_vector,
954 bool is_vlan = (status & IXGBE_RXD_STAT_VP); 956 bool is_vlan = (status & IXGBE_RXD_STAT_VP);
955 u16 tag = le16_to_cpu(rx_desc->wb.upper.vlan); 957 u16 tag = le16_to_cpu(rx_desc->wb.upper.vlan);
956 958
957 if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL)) { 959 if (is_vlan && (tag & VLAN_VID_MASK))
958 if (adapter->vlgrp && is_vlan && (tag & VLAN_VID_MASK)) 960 __vlan_hwaccel_put_tag(skb, tag);
959 vlan_gro_receive(napi, adapter->vlgrp, tag, skb); 961
960 else 962 if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL))
961 napi_gro_receive(napi, skb); 963 napi_gro_receive(napi, skb);
962 } else { 964 else
963 if (adapter->vlgrp && is_vlan && (tag & VLAN_VID_MASK)) 965 netif_rx(skb);
964 vlan_hwaccel_rx(skb, adapter->vlgrp, tag);
965 else
966 netif_rx(skb);
967 }
968} 966}
969 967
970/** 968/**
@@ -1172,7 +1170,6 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
1172 int *work_done, int work_to_do) 1170 int *work_done, int work_to_do)
1173{ 1171{
1174 struct ixgbe_adapter *adapter = q_vector->adapter; 1172 struct ixgbe_adapter *adapter = q_vector->adapter;
1175 struct net_device *netdev = adapter->netdev;
1176 struct pci_dev *pdev = adapter->pdev; 1173 struct pci_dev *pdev = adapter->pdev;
1177 union ixgbe_adv_rx_desc *rx_desc, *next_rxd; 1174 union ixgbe_adv_rx_desc *rx_desc, *next_rxd;
1178 struct ixgbe_rx_buffer *rx_buffer_info, *next_buffer; 1175 struct ixgbe_rx_buffer *rx_buffer_info, *next_buffer;
@@ -1298,8 +1295,10 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
1298 rx_ring->rsc_count++; 1295 rx_ring->rsc_count++;
1299 rx_ring->rsc_flush++; 1296 rx_ring->rsc_flush++;
1300 } 1297 }
1298 u64_stats_update_begin(&rx_ring->syncp);
1301 rx_ring->stats.packets++; 1299 rx_ring->stats.packets++;
1302 rx_ring->stats.bytes += skb->len; 1300 rx_ring->stats.bytes += skb->len;
1301 u64_stats_update_end(&rx_ring->syncp);
1303 } else { 1302 } else {
1304 if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) { 1303 if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) {
1305 rx_buffer_info->skb = next_buffer->skb; 1304 rx_buffer_info->skb = next_buffer->skb;
@@ -1375,8 +1374,6 @@ next_desc:
1375 1374
1376 rx_ring->total_packets += total_rx_packets; 1375 rx_ring->total_packets += total_rx_packets;
1377 rx_ring->total_bytes += total_rx_bytes; 1376 rx_ring->total_bytes += total_rx_bytes;
1378 netdev->stats.rx_bytes += total_rx_bytes;
1379 netdev->stats.rx_packets += total_rx_packets;
1380 1377
1381 return cleaned; 1378 return cleaned;
1382} 1379}
@@ -1433,6 +1430,21 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
1433 q_vector->eitr = adapter->rx_eitr_param; 1430 q_vector->eitr = adapter->rx_eitr_param;
1434 1431
1435 ixgbe_write_eitr(q_vector); 1432 ixgbe_write_eitr(q_vector);
1433 /* If Flow Director is enabled, set interrupt affinity */
1434 if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) ||
1435 (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)) {
1436 /*
1437 * Allocate the affinity_hint cpumask, assign the mask
1438 * for this vector, and set our affinity_hint for
1439 * this irq.
1440 */
1441 if (!alloc_cpumask_var(&q_vector->affinity_mask,
1442 GFP_KERNEL))
1443 return;
1444 cpumask_set_cpu(v_idx, q_vector->affinity_mask);
1445 irq_set_affinity_hint(adapter->msix_entries[v_idx].vector,
1446 q_vector->affinity_mask);
1447 }
1436 } 1448 }
1437 1449
1438 if (adapter->hw.mac.type == ixgbe_mac_82598EB) 1450 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
@@ -2233,7 +2245,8 @@ static void ixgbe_set_itr(struct ixgbe_adapter *adapter)
2233 * ixgbe_irq_enable - Enable default interrupt generation settings 2245 * ixgbe_irq_enable - Enable default interrupt generation settings
2234 * @adapter: board private structure 2246 * @adapter: board private structure
2235 **/ 2247 **/
2236static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter) 2248static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter, bool queues,
2249 bool flush)
2237{ 2250{
2238 u32 mask; 2251 u32 mask;
2239 2252
@@ -2254,8 +2267,10 @@ static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter)
2254 mask |= IXGBE_EIMS_FLOW_DIR; 2267 mask |= IXGBE_EIMS_FLOW_DIR;
2255 2268
2256 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask); 2269 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask);
2257 ixgbe_irq_enable_queues(adapter, ~0); 2270 if (queues)
2258 IXGBE_WRITE_FLUSH(&adapter->hw); 2271 ixgbe_irq_enable_queues(adapter, ~0);
2272 if (flush)
2273 IXGBE_WRITE_FLUSH(&adapter->hw);
2259 2274
2260 if (adapter->num_vfs > 32) { 2275 if (adapter->num_vfs > 32) {
2261 u32 eitrsel = (1 << (adapter->num_vfs - 32)) - 1; 2276 u32 eitrsel = (1 << (adapter->num_vfs - 32)) - 1;
@@ -2277,7 +2292,7 @@ static irqreturn_t ixgbe_intr(int irq, void *data)
2277 u32 eicr; 2292 u32 eicr;
2278 2293
2279 /* 2294 /*
2280 * Workaround for silicon errata. Mask the interrupts 2295 * Workaround for silicon errata on 82598. Mask the interrupts
2281 * before the read of EICR. 2296 * before the read of EICR.
2282 */ 2297 */
2283 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK); 2298 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK);
@@ -2286,10 +2301,15 @@ static irqreturn_t ixgbe_intr(int irq, void *data)
2286 * therefore no explict interrupt disable is necessary */ 2301 * therefore no explict interrupt disable is necessary */
2287 eicr = IXGBE_READ_REG(hw, IXGBE_EICR); 2302 eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
2288 if (!eicr) { 2303 if (!eicr) {
2289 /* shared interrupt alert! 2304 /*
2305 * shared interrupt alert!
2290 * make sure interrupts are enabled because the read will 2306 * make sure interrupts are enabled because the read will
2291 * have disabled interrupts due to EIAM */ 2307 * have disabled interrupts due to EIAM
2292 ixgbe_irq_enable(adapter); 2308 * finish the workaround of silicon errata on 82598. Unmask
2309 * the interrupt that we masked before the EICR read.
2310 */
2311 if (!test_bit(__IXGBE_DOWN, &adapter->state))
2312 ixgbe_irq_enable(adapter, true, true);
2293 return IRQ_NONE; /* Not our interrupt */ 2313 return IRQ_NONE; /* Not our interrupt */
2294 } 2314 }
2295 2315
@@ -2313,6 +2333,14 @@ static irqreturn_t ixgbe_intr(int irq, void *data)
2313 __napi_schedule(&(q_vector->napi)); 2333 __napi_schedule(&(q_vector->napi));
2314 } 2334 }
2315 2335
2336 /*
2337 * re-enable link(maybe) and non-queue interrupts, no flush.
2338 * ixgbe_poll will re-enable the queue interrupts
2339 */
2340
2341 if (!test_bit(__IXGBE_DOWN, &adapter->state))
2342 ixgbe_irq_enable(adapter, false, false);
2343
2316 return IRQ_HANDLED; 2344 return IRQ_HANDLED;
2317} 2345}
2318 2346
@@ -3034,6 +3062,7 @@ static void ixgbe_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
3034 3062
3035 /* add VID to filter table */ 3063 /* add VID to filter table */
3036 hw->mac.ops.set_vfta(&adapter->hw, vid, pool_ndx, true); 3064 hw->mac.ops.set_vfta(&adapter->hw, vid, pool_ndx, true);
3065 set_bit(vid, adapter->active_vlans);
3037} 3066}
3038 3067
3039static void ixgbe_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) 3068static void ixgbe_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
@@ -3042,16 +3071,9 @@ static void ixgbe_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
3042 struct ixgbe_hw *hw = &adapter->hw; 3071 struct ixgbe_hw *hw = &adapter->hw;
3043 int pool_ndx = adapter->num_vfs; 3072 int pool_ndx = adapter->num_vfs;
3044 3073
3045 if (!test_bit(__IXGBE_DOWN, &adapter->state))
3046 ixgbe_irq_disable(adapter);
3047
3048 vlan_group_set_device(adapter->vlgrp, vid, NULL);
3049
3050 if (!test_bit(__IXGBE_DOWN, &adapter->state))
3051 ixgbe_irq_enable(adapter);
3052
3053 /* remove VID from filter table */ 3074 /* remove VID from filter table */
3054 hw->mac.ops.set_vfta(&adapter->hw, vid, pool_ndx, false); 3075 hw->mac.ops.set_vfta(&adapter->hw, vid, pool_ndx, false);
3076 clear_bit(vid, adapter->active_vlans);
3055} 3077}
3056 3078
3057/** 3079/**
@@ -3061,27 +3083,45 @@ static void ixgbe_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
3061static void ixgbe_vlan_filter_disable(struct ixgbe_adapter *adapter) 3083static void ixgbe_vlan_filter_disable(struct ixgbe_adapter *adapter)
3062{ 3084{
3063 struct ixgbe_hw *hw = &adapter->hw; 3085 struct ixgbe_hw *hw = &adapter->hw;
3064 u32 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); 3086 u32 vlnctrl;
3087
3088 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
3089 vlnctrl &= ~(IXGBE_VLNCTRL_VFE | IXGBE_VLNCTRL_CFIEN);
3090 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
3091}
3092
3093/**
3094 * ixgbe_vlan_filter_enable - helper to enable hw vlan filtering
3095 * @adapter: driver data
3096 */
3097static void ixgbe_vlan_filter_enable(struct ixgbe_adapter *adapter)
3098{
3099 struct ixgbe_hw *hw = &adapter->hw;
3100 u32 vlnctrl;
3101
3102 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
3103 vlnctrl |= IXGBE_VLNCTRL_VFE;
3104 vlnctrl &= ~IXGBE_VLNCTRL_CFIEN;
3105 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
3106}
3107
3108/**
3109 * ixgbe_vlan_strip_disable - helper to disable hw vlan stripping
3110 * @adapter: driver data
3111 */
3112static void ixgbe_vlan_strip_disable(struct ixgbe_adapter *adapter)
3113{
3114 struct ixgbe_hw *hw = &adapter->hw;
3115 u32 vlnctrl;
3065 int i, j; 3116 int i, j;
3066 3117
3067 switch (hw->mac.type) { 3118 switch (hw->mac.type) {
3068 case ixgbe_mac_82598EB: 3119 case ixgbe_mac_82598EB:
3069 vlnctrl &= ~IXGBE_VLNCTRL_VFE; 3120 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
3070#ifdef CONFIG_IXGBE_DCB 3121 vlnctrl &= ~IXGBE_VLNCTRL_VME;
3071 if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED))
3072 vlnctrl &= ~IXGBE_VLNCTRL_VME;
3073#endif
3074 vlnctrl &= ~IXGBE_VLNCTRL_CFIEN;
3075 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); 3122 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
3076 break; 3123 break;
3077 case ixgbe_mac_82599EB: 3124 case ixgbe_mac_82599EB:
3078 vlnctrl &= ~IXGBE_VLNCTRL_VFE;
3079 vlnctrl &= ~IXGBE_VLNCTRL_CFIEN;
3080 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
3081#ifdef CONFIG_IXGBE_DCB
3082 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED)
3083 break;
3084#endif
3085 for (i = 0; i < adapter->num_rx_queues; i++) { 3125 for (i = 0; i < adapter->num_rx_queues; i++) {
3086 j = adapter->rx_ring[i]->reg_idx; 3126 j = adapter->rx_ring[i]->reg_idx;
3087 vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j)); 3127 vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j));
@@ -3095,25 +3135,22 @@ static void ixgbe_vlan_filter_disable(struct ixgbe_adapter *adapter)
3095} 3135}
3096 3136
3097/** 3137/**
3098 * ixgbe_vlan_filter_enable - helper to enable hw vlan filtering 3138 * ixgbe_vlan_strip_enable - helper to enable hw vlan stripping
3099 * @adapter: driver data 3139 * @adapter: driver data
3100 */ 3140 */
3101static void ixgbe_vlan_filter_enable(struct ixgbe_adapter *adapter) 3141static void ixgbe_vlan_strip_enable(struct ixgbe_adapter *adapter)
3102{ 3142{
3103 struct ixgbe_hw *hw = &adapter->hw; 3143 struct ixgbe_hw *hw = &adapter->hw;
3104 u32 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); 3144 u32 vlnctrl;
3105 int i, j; 3145 int i, j;
3106 3146
3107 switch (hw->mac.type) { 3147 switch (hw->mac.type) {
3108 case ixgbe_mac_82598EB: 3148 case ixgbe_mac_82598EB:
3109 vlnctrl |= IXGBE_VLNCTRL_VME | IXGBE_VLNCTRL_VFE; 3149 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
3110 vlnctrl &= ~IXGBE_VLNCTRL_CFIEN; 3150 vlnctrl |= IXGBE_VLNCTRL_VME;
3111 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); 3151 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
3112 break; 3152 break;
3113 case ixgbe_mac_82599EB: 3153 case ixgbe_mac_82599EB:
3114 vlnctrl |= IXGBE_VLNCTRL_VFE;
3115 vlnctrl &= ~IXGBE_VLNCTRL_CFIEN;
3116 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
3117 for (i = 0; i < adapter->num_rx_queues; i++) { 3154 for (i = 0; i < adapter->num_rx_queues; i++) {
3118 j = adapter->rx_ring[i]->reg_idx; 3155 j = adapter->rx_ring[i]->reg_idx;
3119 vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j)); 3156 vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j));
@@ -3126,40 +3163,14 @@ static void ixgbe_vlan_filter_enable(struct ixgbe_adapter *adapter)
3126 } 3163 }
3127} 3164}
3128 3165
3129static void ixgbe_vlan_rx_register(struct net_device *netdev,
3130 struct vlan_group *grp)
3131{
3132 struct ixgbe_adapter *adapter = netdev_priv(netdev);
3133
3134 if (!test_bit(__IXGBE_DOWN, &adapter->state))
3135 ixgbe_irq_disable(adapter);
3136 adapter->vlgrp = grp;
3137
3138 /*
3139 * For a DCB driver, always enable VLAN tag stripping so we can
3140 * still receive traffic from a DCB-enabled host even if we're
3141 * not in DCB mode.
3142 */
3143 ixgbe_vlan_filter_enable(adapter);
3144
3145 ixgbe_vlan_rx_add_vid(netdev, 0);
3146
3147 if (!test_bit(__IXGBE_DOWN, &adapter->state))
3148 ixgbe_irq_enable(adapter);
3149}
3150
3151static void ixgbe_restore_vlan(struct ixgbe_adapter *adapter) 3166static void ixgbe_restore_vlan(struct ixgbe_adapter *adapter)
3152{ 3167{
3153 ixgbe_vlan_rx_register(adapter->netdev, adapter->vlgrp); 3168 u16 vid;
3154 3169
3155 if (adapter->vlgrp) { 3170 ixgbe_vlan_rx_add_vid(adapter->netdev, 0);
3156 u16 vid; 3171
3157 for (vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) { 3172 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
3158 if (!vlan_group_get_device(adapter->vlgrp, vid)) 3173 ixgbe_vlan_rx_add_vid(adapter->netdev, vid);
3159 continue;
3160 ixgbe_vlan_rx_add_vid(adapter->netdev, vid);
3161 }
3162 }
3163} 3174}
3164 3175
3165/** 3176/**
@@ -3274,6 +3285,11 @@ void ixgbe_set_rx_mode(struct net_device *netdev)
3274 } 3285 }
3275 3286
3276 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); 3287 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
3288
3289 if (netdev->features & NETIF_F_HW_VLAN_RX)
3290 ixgbe_vlan_strip_enable(adapter);
3291 else
3292 ixgbe_vlan_strip_disable(adapter);
3277} 3293}
3278 3294
3279static void ixgbe_napi_enable_all(struct ixgbe_adapter *adapter) 3295static void ixgbe_napi_enable_all(struct ixgbe_adapter *adapter)
@@ -3343,7 +3359,6 @@ static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter)
3343 if (hw->mac.type == ixgbe_mac_82598EB) 3359 if (hw->mac.type == ixgbe_mac_82598EB)
3344 netif_set_gso_max_size(adapter->netdev, 32768); 3360 netif_set_gso_max_size(adapter->netdev, 32768);
3345 3361
3346 ixgbe_dcb_check_config(&adapter->dcb_cfg);
3347 ixgbe_dcb_calculate_tc_credits(&adapter->dcb_cfg, DCB_TX_CONFIG); 3362 ixgbe_dcb_calculate_tc_credits(&adapter->dcb_cfg, DCB_TX_CONFIG);
3348 ixgbe_dcb_calculate_tc_credits(&adapter->dcb_cfg, DCB_RX_CONFIG); 3363 ixgbe_dcb_calculate_tc_credits(&adapter->dcb_cfg, DCB_RX_CONFIG);
3349 3364
@@ -3358,7 +3373,7 @@ static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter)
3358 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j), txdctl); 3373 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j), txdctl);
3359 } 3374 }
3360 /* Enable VLAN tag insert/strip */ 3375 /* Enable VLAN tag insert/strip */
3361 ixgbe_vlan_filter_enable(adapter); 3376 adapter->netdev->features |= NETIF_F_HW_VLAN_RX;
3362 3377
3363 hw->mac.ops.set_vfta(&adapter->hw, 0, 0, true); 3378 hw->mac.ops.set_vfta(&adapter->hw, 0, 0, true);
3364} 3379}
@@ -3370,13 +3385,13 @@ static void ixgbe_configure(struct ixgbe_adapter *adapter)
3370 struct ixgbe_hw *hw = &adapter->hw; 3385 struct ixgbe_hw *hw = &adapter->hw;
3371 int i; 3386 int i;
3372 3387
3373 ixgbe_set_rx_mode(netdev);
3374
3375 ixgbe_restore_vlan(adapter);
3376#ifdef CONFIG_IXGBE_DCB 3388#ifdef CONFIG_IXGBE_DCB
3377 ixgbe_configure_dcb(adapter); 3389 ixgbe_configure_dcb(adapter);
3378#endif 3390#endif
3379 3391
3392 ixgbe_set_rx_mode(netdev);
3393 ixgbe_restore_vlan(adapter);
3394
3380#ifdef IXGBE_FCOE 3395#ifdef IXGBE_FCOE
3381 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) 3396 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)
3382 ixgbe_configure_fcoe(adapter); 3397 ixgbe_configure_fcoe(adapter);
@@ -3546,7 +3561,7 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
3546 3561
3547 /* clear any pending interrupts, may auto mask */ 3562 /* clear any pending interrupts, may auto mask */
3548 IXGBE_READ_REG(hw, IXGBE_EICR); 3563 IXGBE_READ_REG(hw, IXGBE_EICR);
3549 ixgbe_irq_enable(adapter); 3564 ixgbe_irq_enable(adapter, true, true);
3550 3565
3551 /* 3566 /*
3552 * If this adapter has a fan, check to see if we had a failure 3567 * If this adapter has a fan, check to see if we had a failure
@@ -3800,6 +3815,7 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
3800 u32 rxctrl; 3815 u32 rxctrl;
3801 u32 txdctl; 3816 u32 txdctl;
3802 int i, j; 3817 int i, j;
3818 int num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
3803 3819
3804 /* signal that we are down to the interrupt handler */ 3820 /* signal that we are down to the interrupt handler */
3805 set_bit(__IXGBE_DOWN, &adapter->state); 3821 set_bit(__IXGBE_DOWN, &adapter->state);
@@ -3838,6 +3854,15 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
3838 3854
3839 ixgbe_napi_disable_all(adapter); 3855 ixgbe_napi_disable_all(adapter);
3840 3856
3857 /* Cleanup the affinity_hint CPU mask memory and callback */
3858 for (i = 0; i < num_q_vectors; i++) {
3859 struct ixgbe_q_vector *q_vector = adapter->q_vector[i];
3860 /* clear the affinity_mask in the IRQ descriptor */
3861 irq_set_affinity_hint(adapter->msix_entries[i]. vector, NULL);
3862 /* release the CPU mask memory */
3863 free_cpumask_var(q_vector->affinity_mask);
3864 }
3865
3841 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE || 3866 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE ||
3842 adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) 3867 adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)
3843 cancel_work_sync(&adapter->fdir_reinit_task); 3868 cancel_work_sync(&adapter->fdir_reinit_task);
@@ -4088,7 +4113,7 @@ static inline bool ixgbe_set_sriov_queues(struct ixgbe_adapter *adapter)
4088 * fallthrough conditions. 4113 * fallthrough conditions.
4089 * 4114 *
4090 **/ 4115 **/
4091static void ixgbe_set_num_queues(struct ixgbe_adapter *adapter) 4116static int ixgbe_set_num_queues(struct ixgbe_adapter *adapter)
4092{ 4117{
4093 /* Start with base case */ 4118 /* Start with base case */
4094 adapter->num_rx_queues = 1; 4119 adapter->num_rx_queues = 1;
@@ -4097,7 +4122,7 @@ static void ixgbe_set_num_queues(struct ixgbe_adapter *adapter)
4097 adapter->num_rx_queues_per_pool = 1; 4122 adapter->num_rx_queues_per_pool = 1;
4098 4123
4099 if (ixgbe_set_sriov_queues(adapter)) 4124 if (ixgbe_set_sriov_queues(adapter))
4100 return; 4125 goto done;
4101 4126
4102#ifdef IXGBE_FCOE 4127#ifdef IXGBE_FCOE
4103 if (ixgbe_set_fcoe_queues(adapter)) 4128 if (ixgbe_set_fcoe_queues(adapter))
@@ -4120,8 +4145,10 @@ static void ixgbe_set_num_queues(struct ixgbe_adapter *adapter)
4120 adapter->num_tx_queues = 1; 4145 adapter->num_tx_queues = 1;
4121 4146
4122done: 4147done:
4123 /* Notify the stack of the (possibly) reduced Tx Queue count. */ 4148 /* Notify the stack of the (possibly) reduced queue counts. */
4124 netif_set_real_num_tx_queues(adapter->netdev, adapter->num_tx_queues); 4149 netif_set_real_num_tx_queues(adapter->netdev, adapter->num_tx_queues);
4150 return netif_set_real_num_rx_queues(adapter->netdev,
4151 adapter->num_rx_queues);
4125} 4152}
4126 4153
4127static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter, 4154static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter,
@@ -4550,7 +4577,9 @@ static int ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter)
4550 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) 4577 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
4551 ixgbe_disable_sriov(adapter); 4578 ixgbe_disable_sriov(adapter);
4552 4579
4553 ixgbe_set_num_queues(adapter); 4580 err = ixgbe_set_num_queues(adapter);
4581 if (err)
4582 return err;
4554 4583
4555 err = pci_enable_msi(adapter->pdev); 4584 err = pci_enable_msi(adapter->pdev);
4556 if (!err) { 4585 if (!err) {
@@ -4675,7 +4704,9 @@ int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter)
4675 int err; 4704 int err;
4676 4705
4677 /* Number of supported queues */ 4706 /* Number of supported queues */
4678 ixgbe_set_num_queues(adapter); 4707 err = ixgbe_set_num_queues(adapter);
4708 if (err)
4709 return err;
4679 4710
4680 err = ixgbe_set_interrupt_capability(adapter); 4711 err = ixgbe_set_interrupt_capability(adapter);
4681 if (err) { 4712 if (err) {
@@ -6265,7 +6296,7 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, struct net_device *netdev
6265 int count = 0; 6296 int count = 0;
6266 unsigned int f; 6297 unsigned int f;
6267 6298
6268 if (adapter->vlgrp && vlan_tx_tag_present(skb)) { 6299 if (vlan_tx_tag_present(skb)) {
6269 tx_flags |= vlan_tx_tag_get(skb); 6300 tx_flags |= vlan_tx_tag_get(skb);
6270 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { 6301 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
6271 tx_flags &= ~IXGBE_TX_FLAGS_VLAN_PRIO_MASK; 6302 tx_flags &= ~IXGBE_TX_FLAGS_VLAN_PRIO_MASK;
@@ -6512,6 +6543,38 @@ static void ixgbe_netpoll(struct net_device *netdev)
6512} 6543}
6513#endif 6544#endif
6514 6545
6546static struct rtnl_link_stats64 *ixgbe_get_stats64(struct net_device *netdev,
6547 struct rtnl_link_stats64 *stats)
6548{
6549 struct ixgbe_adapter *adapter = netdev_priv(netdev);
6550 int i;
6551
6552 /* accurate rx/tx bytes/packets stats */
6553 dev_txq_stats_fold(netdev, stats);
6554 for (i = 0; i < adapter->num_rx_queues; i++) {
6555 struct ixgbe_ring *ring = adapter->rx_ring[i];
6556 u64 bytes, packets;
6557 unsigned int start;
6558
6559 do {
6560 start = u64_stats_fetch_begin_bh(&ring->syncp);
6561 packets = ring->stats.packets;
6562 bytes = ring->stats.bytes;
6563 } while (u64_stats_fetch_retry_bh(&ring->syncp, start));
6564 stats->rx_packets += packets;
6565 stats->rx_bytes += bytes;
6566 }
6567
6568 /* following stats updated by ixgbe_watchdog_task() */
6569 stats->multicast = netdev->stats.multicast;
6570 stats->rx_errors = netdev->stats.rx_errors;
6571 stats->rx_length_errors = netdev->stats.rx_length_errors;
6572 stats->rx_crc_errors = netdev->stats.rx_crc_errors;
6573 stats->rx_missed_errors = netdev->stats.rx_missed_errors;
6574 return stats;
6575}
6576
6577
6515static const struct net_device_ops ixgbe_netdev_ops = { 6578static const struct net_device_ops ixgbe_netdev_ops = {
6516 .ndo_open = ixgbe_open, 6579 .ndo_open = ixgbe_open,
6517 .ndo_stop = ixgbe_close, 6580 .ndo_stop = ixgbe_close,
@@ -6523,7 +6586,6 @@ static const struct net_device_ops ixgbe_netdev_ops = {
6523 .ndo_set_mac_address = ixgbe_set_mac, 6586 .ndo_set_mac_address = ixgbe_set_mac,
6524 .ndo_change_mtu = ixgbe_change_mtu, 6587 .ndo_change_mtu = ixgbe_change_mtu,
6525 .ndo_tx_timeout = ixgbe_tx_timeout, 6588 .ndo_tx_timeout = ixgbe_tx_timeout,
6526 .ndo_vlan_rx_register = ixgbe_vlan_rx_register,
6527 .ndo_vlan_rx_add_vid = ixgbe_vlan_rx_add_vid, 6589 .ndo_vlan_rx_add_vid = ixgbe_vlan_rx_add_vid,
6528 .ndo_vlan_rx_kill_vid = ixgbe_vlan_rx_kill_vid, 6590 .ndo_vlan_rx_kill_vid = ixgbe_vlan_rx_kill_vid,
6529 .ndo_do_ioctl = ixgbe_ioctl, 6591 .ndo_do_ioctl = ixgbe_ioctl,
@@ -6531,6 +6593,7 @@ static const struct net_device_ops ixgbe_netdev_ops = {
6531 .ndo_set_vf_vlan = ixgbe_ndo_set_vf_vlan, 6593 .ndo_set_vf_vlan = ixgbe_ndo_set_vf_vlan,
6532 .ndo_set_vf_tx_rate = ixgbe_ndo_set_vf_bw, 6594 .ndo_set_vf_tx_rate = ixgbe_ndo_set_vf_bw,
6533 .ndo_get_vf_config = ixgbe_ndo_get_vf_config, 6595 .ndo_get_vf_config = ixgbe_ndo_get_vf_config,
6596 .ndo_get_stats64 = ixgbe_get_stats64,
6534#ifdef CONFIG_NET_POLL_CONTROLLER 6597#ifdef CONFIG_NET_POLL_CONTROLLER
6535 .ndo_poll_controller = ixgbe_netpoll, 6598 .ndo_poll_controller = ixgbe_netpoll,
6536#endif 6599#endif
@@ -6842,8 +6905,10 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
6842 netdev->vlan_features |= NETIF_F_FCOE_MTU; 6905 netdev->vlan_features |= NETIF_F_FCOE_MTU;
6843 } 6906 }
6844#endif /* IXGBE_FCOE */ 6907#endif /* IXGBE_FCOE */
6845 if (pci_using_dac) 6908 if (pci_using_dac) {
6846 netdev->features |= NETIF_F_HIGHDMA; 6909 netdev->features |= NETIF_F_HIGHDMA;
6910 netdev->vlan_features |= NETIF_F_HIGHDMA;
6911 }
6847 6912
6848 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) 6913 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)
6849 netdev->features |= NETIF_F_LRO; 6914 netdev->features |= NETIF_F_LRO;
diff --git a/drivers/net/ixgbe/ixgbe_mbx.c b/drivers/net/ixgbe/ixgbe_mbx.c
index d75f9148eb1f..471f0f2cdb98 100644
--- a/drivers/net/ixgbe/ixgbe_mbx.c
+++ b/drivers/net/ixgbe/ixgbe_mbx.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2009 Intel Corporation. 4 Copyright(c) 1999 - 2010 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -200,7 +200,8 @@ out:
200 * returns SUCCESS if it successfully received a message notification and 200 * returns SUCCESS if it successfully received a message notification and
201 * copied it into the receive buffer. 201 * copied it into the receive buffer.
202 **/ 202 **/
203s32 ixgbe_read_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id) 203static s32 ixgbe_read_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size,
204 u16 mbx_id)
204{ 205{
205 struct ixgbe_mbx_info *mbx = &hw->mbx; 206 struct ixgbe_mbx_info *mbx = &hw->mbx;
206 s32 ret_val = IXGBE_ERR_MBX; 207 s32 ret_val = IXGBE_ERR_MBX;
@@ -227,7 +228,7 @@ out:
227 * returns SUCCESS if it successfully copied message into the buffer and 228 * returns SUCCESS if it successfully copied message into the buffer and
228 * received an ack to that message within delay * timeout period 229 * received an ack to that message within delay * timeout period
229 **/ 230 **/
230s32 ixgbe_write_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, 231static s32 ixgbe_write_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size,
231 u16 mbx_id) 232 u16 mbx_id)
232{ 233{
233 struct ixgbe_mbx_info *mbx = &hw->mbx; 234 struct ixgbe_mbx_info *mbx = &hw->mbx;
@@ -247,20 +248,6 @@ out:
247 return ret_val; 248 return ret_val;
248} 249}
249 250
250/**
251 * ixgbe_init_mbx_ops_generic - Initialize MB function pointers
252 * @hw: pointer to the HW structure
253 *
254 * Setup the mailbox read and write message function pointers
255 **/
256void ixgbe_init_mbx_ops_generic(struct ixgbe_hw *hw)
257{
258 struct ixgbe_mbx_info *mbx = &hw->mbx;
259
260 mbx->ops.read_posted = ixgbe_read_posted_mbx;
261 mbx->ops.write_posted = ixgbe_write_posted_mbx;
262}
263
264static s32 ixgbe_check_for_bit_pf(struct ixgbe_hw *hw, u32 mask, s32 index) 251static s32 ixgbe_check_for_bit_pf(struct ixgbe_hw *hw, u32 mask, s32 index)
265{ 252{
266 u32 mbvficr = IXGBE_READ_REG(hw, IXGBE_MBVFICR(index)); 253 u32 mbvficr = IXGBE_READ_REG(hw, IXGBE_MBVFICR(index));
diff --git a/drivers/net/ixgbe/ixgbe_mbx.h b/drivers/net/ixgbe/ixgbe_mbx.h
index be7ab3309ab7..7e0d08ff5b53 100644
--- a/drivers/net/ixgbe/ixgbe_mbx.h
+++ b/drivers/net/ixgbe/ixgbe_mbx.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2009 Intel Corporation. 4 Copyright(c) 1999 - 2010 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -83,12 +83,9 @@
83 83
84s32 ixgbe_read_mbx(struct ixgbe_hw *, u32 *, u16, u16); 84s32 ixgbe_read_mbx(struct ixgbe_hw *, u32 *, u16, u16);
85s32 ixgbe_write_mbx(struct ixgbe_hw *, u32 *, u16, u16); 85s32 ixgbe_write_mbx(struct ixgbe_hw *, u32 *, u16, u16);
86s32 ixgbe_read_posted_mbx(struct ixgbe_hw *, u32 *, u16, u16);
87s32 ixgbe_write_posted_mbx(struct ixgbe_hw *, u32 *, u16, u16);
88s32 ixgbe_check_for_msg(struct ixgbe_hw *, u16); 86s32 ixgbe_check_for_msg(struct ixgbe_hw *, u16);
89s32 ixgbe_check_for_ack(struct ixgbe_hw *, u16); 87s32 ixgbe_check_for_ack(struct ixgbe_hw *, u16);
90s32 ixgbe_check_for_rst(struct ixgbe_hw *, u16); 88s32 ixgbe_check_for_rst(struct ixgbe_hw *, u16);
91void ixgbe_init_mbx_ops_generic(struct ixgbe_hw *hw);
92void ixgbe_init_mbx_params_pf(struct ixgbe_hw *); 89void ixgbe_init_mbx_params_pf(struct ixgbe_hw *);
93 90
94extern struct ixgbe_mbx_operations mbx_ops_82599; 91extern struct ixgbe_mbx_operations mbx_ops_82599;
diff --git a/drivers/net/ixgbe/ixgbe_sriov.c b/drivers/net/ixgbe/ixgbe_sriov.c
index 49661a138e22..5428153af8f3 100644
--- a/drivers/net/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ixgbe/ixgbe_sriov.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2009 Intel Corporation. 4 Copyright(c) 1999 - 2010 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -43,8 +43,8 @@
43 43
44#include "ixgbe_sriov.h" 44#include "ixgbe_sriov.h"
45 45
46int ixgbe_set_vf_multicasts(struct ixgbe_adapter *adapter, 46static int ixgbe_set_vf_multicasts(struct ixgbe_adapter *adapter,
47 int entries, u16 *hash_list, u32 vf) 47 int entries, u16 *hash_list, u32 vf)
48{ 48{
49 struct vf_data_storage *vfinfo = &adapter->vfinfo[vf]; 49 struct vf_data_storage *vfinfo = &adapter->vfinfo[vf];
50 struct ixgbe_hw *hw = &adapter->hw; 50 struct ixgbe_hw *hw = &adapter->hw;
@@ -104,13 +104,14 @@ void ixgbe_restore_vf_multicasts(struct ixgbe_adapter *adapter)
104 } 104 }
105} 105}
106 106
107int ixgbe_set_vf_vlan(struct ixgbe_adapter *adapter, int add, int vid, u32 vf) 107static int ixgbe_set_vf_vlan(struct ixgbe_adapter *adapter, int add, int vid,
108 u32 vf)
108{ 109{
109 return adapter->hw.mac.ops.set_vfta(&adapter->hw, vid, vf, (bool)add); 110 return adapter->hw.mac.ops.set_vfta(&adapter->hw, vid, vf, (bool)add);
110} 111}
111 112
112 113
113void ixgbe_set_vmolr(struct ixgbe_hw *hw, u32 vf, bool aupe) 114static void ixgbe_set_vmolr(struct ixgbe_hw *hw, u32 vf, bool aupe)
114{ 115{
115 u32 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf)); 116 u32 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf));
116 vmolr |= (IXGBE_VMOLR_ROMPE | 117 vmolr |= (IXGBE_VMOLR_ROMPE |
@@ -134,7 +135,7 @@ static void ixgbe_set_vmvir(struct ixgbe_adapter *adapter, u32 vid, u32 vf)
134 IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf), 0); 135 IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf), 0);
135} 136}
136 137
137inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf) 138static inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf)
138{ 139{
139 struct ixgbe_hw *hw = &adapter->hw; 140 struct ixgbe_hw *hw = &adapter->hw;
140 int rar_entry = hw->mac.num_rar_entries - (vf + 1); 141 int rar_entry = hw->mac.num_rar_entries - (vf + 1);
@@ -162,8 +163,8 @@ inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf)
162 hw->mac.ops.clear_rar(hw, rar_entry); 163 hw->mac.ops.clear_rar(hw, rar_entry);
163} 164}
164 165
165int ixgbe_set_vf_mac(struct ixgbe_adapter *adapter, 166static int ixgbe_set_vf_mac(struct ixgbe_adapter *adapter,
166 int vf, unsigned char *mac_addr) 167 int vf, unsigned char *mac_addr)
167{ 168{
168 struct ixgbe_hw *hw = &adapter->hw; 169 struct ixgbe_hw *hw = &adapter->hw;
169 int rar_entry = hw->mac.num_rar_entries - (vf + 1); 170 int rar_entry = hw->mac.num_rar_entries - (vf + 1);
@@ -197,7 +198,7 @@ int ixgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask)
197 return 0; 198 return 0;
198} 199}
199 200
200inline void ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf) 201static inline void ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf)
201{ 202{
202 struct ixgbe_hw *hw = &adapter->hw; 203 struct ixgbe_hw *hw = &adapter->hw;
203 u32 reg; 204 u32 reg;
diff --git a/drivers/net/ixgbe/ixgbe_sriov.h b/drivers/net/ixgbe/ixgbe_sriov.h
index 184730ecdfb6..49dc14debef7 100644
--- a/drivers/net/ixgbe/ixgbe_sriov.h
+++ b/drivers/net/ixgbe/ixgbe_sriov.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2009 Intel Corporation. 4 Copyright(c) 1999 - 2010 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -28,16 +28,8 @@
28#ifndef _IXGBE_SRIOV_H_ 28#ifndef _IXGBE_SRIOV_H_
29#define _IXGBE_SRIOV_H_ 29#define _IXGBE_SRIOV_H_
30 30
31int ixgbe_set_vf_multicasts(struct ixgbe_adapter *adapter,
32 int entries, u16 *hash_list, u32 vf);
33void ixgbe_restore_vf_multicasts(struct ixgbe_adapter *adapter); 31void ixgbe_restore_vf_multicasts(struct ixgbe_adapter *adapter);
34int ixgbe_set_vf_vlan(struct ixgbe_adapter *adapter, int add, int vid, u32 vf);
35void ixgbe_set_vmolr(struct ixgbe_hw *hw, u32 vf, bool aupe);
36void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf);
37void ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf);
38void ixgbe_msg_task(struct ixgbe_adapter *adapter); 32void ixgbe_msg_task(struct ixgbe_adapter *adapter);
39int ixgbe_set_vf_mac(struct ixgbe_adapter *adapter,
40 int vf, unsigned char *mac_addr);
41int ixgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask); 33int ixgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask);
42void ixgbe_disable_tx_rx(struct ixgbe_adapter *adapter); 34void ixgbe_disable_tx_rx(struct ixgbe_adapter *adapter);
43void ixgbe_ping_all_vfs(struct ixgbe_adapter *adapter); 35void ixgbe_ping_all_vfs(struct ixgbe_adapter *adapter);
diff --git a/drivers/net/ixgbevf/ethtool.c b/drivers/net/ixgbevf/ethtool.c
index 4680b069b84f..4cc817acfb62 100644
--- a/drivers/net/ixgbevf/ethtool.c
+++ b/drivers/net/ixgbevf/ethtool.c
@@ -330,10 +330,8 @@ static int ixgbevf_set_ringparam(struct net_device *netdev,
330{ 330{
331 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 331 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
332 struct ixgbevf_ring *tx_ring = NULL, *rx_ring = NULL; 332 struct ixgbevf_ring *tx_ring = NULL, *rx_ring = NULL;
333 int i, err; 333 int i, err = 0;
334 u32 new_rx_count, new_tx_count; 334 u32 new_rx_count, new_tx_count;
335 bool need_tx_update = false;
336 bool need_rx_update = false;
337 335
338 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) 336 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
339 return -EINVAL; 337 return -EINVAL;
@@ -355,89 +353,96 @@ static int ixgbevf_set_ringparam(struct net_device *netdev,
355 while (test_and_set_bit(__IXGBEVF_RESETTING, &adapter->state)) 353 while (test_and_set_bit(__IXGBEVF_RESETTING, &adapter->state))
356 msleep(1); 354 msleep(1);
357 355
358 if (new_tx_count != adapter->tx_ring_count) { 356 /*
359 tx_ring = kcalloc(adapter->num_tx_queues, 357 * If the adapter isn't up and running then just set the
360 sizeof(struct ixgbevf_ring), GFP_KERNEL); 358 * new parameters and scurry for the exits.
361 if (!tx_ring) { 359 */
362 err = -ENOMEM; 360 if (!netif_running(adapter->netdev)) {
363 goto err_setup; 361 for (i = 0; i < adapter->num_tx_queues; i++)
364 } 362 adapter->tx_ring[i].count = new_tx_count;
365 memcpy(tx_ring, adapter->tx_ring, 363 for (i = 0; i < adapter->num_rx_queues; i++)
366 adapter->num_tx_queues * sizeof(struct ixgbevf_ring)); 364 adapter->rx_ring[i].count = new_rx_count;
367 for (i = 0; i < adapter->num_tx_queues; i++) { 365 adapter->tx_ring_count = new_tx_count;
368 tx_ring[i].count = new_tx_count; 366 adapter->rx_ring_count = new_rx_count;
369 err = ixgbevf_setup_tx_resources(adapter, 367 goto clear_reset;
370 &tx_ring[i]);
371 if (err) {
372 while (i) {
373 i--;
374 ixgbevf_free_tx_resources(adapter,
375 &tx_ring[i]);
376 }
377 kfree(tx_ring);
378 goto err_setup;
379 }
380 tx_ring[i].v_idx = adapter->tx_ring[i].v_idx;
381 }
382 need_tx_update = true;
383 } 368 }
384 369
385 if (new_rx_count != adapter->rx_ring_count) { 370 tx_ring = kcalloc(adapter->num_tx_queues,
386 rx_ring = kcalloc(adapter->num_rx_queues, 371 sizeof(struct ixgbevf_ring), GFP_KERNEL);
387 sizeof(struct ixgbevf_ring), GFP_KERNEL); 372 if (!tx_ring) {
388 if ((!rx_ring) && (need_tx_update)) { 373 err = -ENOMEM;
389 err = -ENOMEM; 374 goto clear_reset;
390 goto err_rx_setup;
391 }
392 memcpy(rx_ring, adapter->rx_ring,
393 adapter->num_rx_queues * sizeof(struct ixgbevf_ring));
394 for (i = 0; i < adapter->num_rx_queues; i++) {
395 rx_ring[i].count = new_rx_count;
396 err = ixgbevf_setup_rx_resources(adapter,
397 &rx_ring[i]);
398 if (err) {
399 while (i) {
400 i--;
401 ixgbevf_free_rx_resources(adapter,
402 &rx_ring[i]);
403 }
404 kfree(rx_ring);
405 goto err_rx_setup;
406 }
407 rx_ring[i].v_idx = adapter->rx_ring[i].v_idx;
408 }
409 need_rx_update = true;
410 } 375 }
411 376
412err_rx_setup: 377 rx_ring = kcalloc(adapter->num_rx_queues,
413 /* if rings need to be updated, here's the place to do it in one shot */ 378 sizeof(struct ixgbevf_ring), GFP_KERNEL);
414 if (need_tx_update || need_rx_update) { 379 if (!rx_ring) {
415 if (netif_running(netdev)) 380 err = -ENOMEM;
416 ixgbevf_down(adapter); 381 goto err_rx_setup;
417 } 382 }
418 383
419 /* tx */ 384 ixgbevf_down(adapter);
420 if (need_tx_update) { 385
421 kfree(adapter->tx_ring); 386 memcpy(tx_ring, adapter->tx_ring,
422 adapter->tx_ring = tx_ring; 387 adapter->num_tx_queues * sizeof(struct ixgbevf_ring));
423 tx_ring = NULL; 388 for (i = 0; i < adapter->num_tx_queues; i++) {
424 adapter->tx_ring_count = new_tx_count; 389 tx_ring[i].count = new_tx_count;
390 err = ixgbevf_setup_tx_resources(adapter, &tx_ring[i]);
391 if (err) {
392 while (i) {
393 i--;
394 ixgbevf_free_tx_resources(adapter,
395 &tx_ring[i]);
396 }
397 goto err_tx_ring_setup;
398 }
399 tx_ring[i].v_idx = adapter->tx_ring[i].v_idx;
425 } 400 }
426 401
427 /* rx */ 402 memcpy(rx_ring, adapter->rx_ring,
428 if (need_rx_update) { 403 adapter->num_rx_queues * sizeof(struct ixgbevf_ring));
429 kfree(adapter->rx_ring); 404 for (i = 0; i < adapter->num_rx_queues; i++) {
430 adapter->rx_ring = rx_ring; 405 rx_ring[i].count = new_rx_count;
431 rx_ring = NULL; 406 err = ixgbevf_setup_rx_resources(adapter, &rx_ring[i]);
432 adapter->rx_ring_count = new_rx_count; 407 if (err) {
408 while (i) {
409 i--;
410 ixgbevf_free_rx_resources(adapter,
411 &rx_ring[i]);
412 }
413 goto err_rx_ring_setup;
414 }
415 rx_ring[i].v_idx = adapter->rx_ring[i].v_idx;
433 } 416 }
434 417
418 /*
419 * Only switch to new rings if all the prior allocations
420 * and ring setups have succeeded.
421 */
422 kfree(adapter->tx_ring);
423 adapter->tx_ring = tx_ring;
424 adapter->tx_ring_count = new_tx_count;
425
426 kfree(adapter->rx_ring);
427 adapter->rx_ring = rx_ring;
428 adapter->rx_ring_count = new_rx_count;
429
435 /* success! */ 430 /* success! */
436 err = 0; 431 ixgbevf_up(adapter);
437 if (netif_running(netdev)) 432
438 ixgbevf_up(adapter); 433 goto clear_reset;
434
435err_rx_ring_setup:
436 for(i = 0; i < adapter->num_tx_queues; i++)
437 ixgbevf_free_tx_resources(adapter, &tx_ring[i]);
438
439err_tx_ring_setup:
440 kfree(rx_ring);
441
442err_rx_setup:
443 kfree(tx_ring);
439 444
440err_setup: 445clear_reset:
441 clear_bit(__IXGBEVF_RESETTING, &adapter->state); 446 clear_bit(__IXGBEVF_RESETTING, &adapter->state);
442 return err; 447 return err;
443} 448}
diff --git a/drivers/net/ixgbevf/ixgbevf_main.c b/drivers/net/ixgbevf/ixgbevf_main.c
index 3eda1bdbbb7a..dc03c9652389 100644
--- a/drivers/net/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ixgbevf/ixgbevf_main.c
@@ -311,7 +311,7 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_adapter *adapter,
311 netdev->stats.tx_bytes += total_bytes; 311 netdev->stats.tx_bytes += total_bytes;
312 netdev->stats.tx_packets += total_packets; 312 netdev->stats.tx_packets += total_packets;
313 313
314 return (count < tx_ring->work_limit); 314 return count < tx_ring->work_limit;
315} 315}
316 316
317/** 317/**
@@ -1495,7 +1495,7 @@ static void ixgbevf_restore_vlan(struct ixgbevf_adapter *adapter)
1495 1495
1496 if (adapter->vlgrp) { 1496 if (adapter->vlgrp) {
1497 u16 vid; 1497 u16 vid;
1498 for (vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) { 1498 for (vid = 0; vid < VLAN_N_VID; vid++) {
1499 if (!vlan_group_get_device(adapter->vlgrp, vid)) 1499 if (!vlan_group_get_device(adapter->vlgrp, vid))
1500 continue; 1500 continue;
1501 ixgbevf_vlan_rx_add_vid(adapter->netdev, vid); 1501 ixgbevf_vlan_rx_add_vid(adapter->netdev, vid);
@@ -3134,7 +3134,7 @@ static int ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
3134 3134
3135 tx_ring = &adapter->tx_ring[r_idx]; 3135 tx_ring = &adapter->tx_ring[r_idx];
3136 3136
3137 if (adapter->vlgrp && vlan_tx_tag_present(skb)) { 3137 if (vlan_tx_tag_present(skb)) {
3138 tx_flags |= vlan_tx_tag_get(skb); 3138 tx_flags |= vlan_tx_tag_get(skb);
3139 tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT; 3139 tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT;
3140 tx_flags |= IXGBE_TX_FLAGS_VLAN; 3140 tx_flags |= IXGBE_TX_FLAGS_VLAN;
diff --git a/drivers/net/ixgbevf/mbx.c b/drivers/net/ixgbevf/mbx.c
index b8143501e6fc..84ac486f4a65 100644
--- a/drivers/net/ixgbevf/mbx.c
+++ b/drivers/net/ixgbevf/mbx.c
@@ -308,7 +308,7 @@ out_no_read:
308 * 308 *
309 * Initializes the hw->mbx struct to correct values for vf mailbox 309 * Initializes the hw->mbx struct to correct values for vf mailbox
310 */ 310 */
311s32 ixgbevf_init_mbx_params_vf(struct ixgbe_hw *hw) 311static s32 ixgbevf_init_mbx_params_vf(struct ixgbe_hw *hw)
312{ 312{
313 struct ixgbe_mbx_info *mbx = &hw->mbx; 313 struct ixgbe_mbx_info *mbx = &hw->mbx;
314 314
diff --git a/drivers/net/ixgbevf/mbx.h b/drivers/net/ixgbevf/mbx.h
index 1b0e0bf4c0f5..8c063bebee7f 100644
--- a/drivers/net/ixgbevf/mbx.h
+++ b/drivers/net/ixgbevf/mbx.h
@@ -95,6 +95,4 @@
95/* forward declaration of the HW struct */ 95/* forward declaration of the HW struct */
96struct ixgbe_hw; 96struct ixgbe_hw;
97 97
98s32 ixgbevf_init_mbx_params_vf(struct ixgbe_hw *);
99
100#endif /* _IXGBE_MBX_H_ */ 98#endif /* _IXGBE_MBX_H_ */
diff --git a/drivers/net/ixgbevf/vf.c b/drivers/net/ixgbevf/vf.c
index f6f929958ba0..bfe42c1fcfaf 100644
--- a/drivers/net/ixgbevf/vf.c
+++ b/drivers/net/ixgbevf/vf.c
@@ -368,7 +368,7 @@ static s32 ixgbevf_check_mac_link_vf(struct ixgbe_hw *hw,
368 return 0; 368 return 0;
369} 369}
370 370
371struct ixgbe_mac_operations ixgbevf_mac_ops = { 371static struct ixgbe_mac_operations ixgbevf_mac_ops = {
372 .init_hw = ixgbevf_init_hw_vf, 372 .init_hw = ixgbevf_init_hw_vf,
373 .reset_hw = ixgbevf_reset_hw_vf, 373 .reset_hw = ixgbevf_reset_hw_vf,
374 .start_hw = ixgbevf_start_hw_vf, 374 .start_hw = ixgbevf_start_hw_vf,
diff --git a/drivers/net/jme.c b/drivers/net/jme.c
index c04c096bc6a9..d7a975ee2add 100644
--- a/drivers/net/jme.c
+++ b/drivers/net/jme.c
@@ -3,6 +3,7 @@
3 * 3 *
4 * Copyright 2008 JMicron Technology Corporation 4 * Copyright 2008 JMicron Technology Corporation
5 * http://www.jmicron.com/ 5 * http://www.jmicron.com/
6 * Copyright (c) 2009 - 2010 Guo-Fu Tseng <cooldavid@cooldavid.org>
6 * 7 *
7 * Author: Guo-Fu Tseng <cooldavid@cooldavid.org> 8 * Author: Guo-Fu Tseng <cooldavid@cooldavid.org>
8 * 9 *
@@ -989,6 +990,7 @@ jme_process_receive(struct jme_adapter *jme, int limit)
989 goto out; 990 goto out;
990 --limit; 991 --limit;
991 992
993 rmb();
992 desccnt = rxdesc->descwb.desccnt & RXWBDCNT_DCNT; 994 desccnt = rxdesc->descwb.desccnt & RXWBDCNT_DCNT;
993 995
994 if (unlikely(desccnt > 1 || 996 if (unlikely(desccnt > 1 ||
@@ -1574,6 +1576,16 @@ jme_free_irq(struct jme_adapter *jme)
1574 } 1576 }
1575} 1577}
1576 1578
1579static inline void
1580jme_phy_on(struct jme_adapter *jme)
1581{
1582 u32 bmcr;
1583
1584 bmcr = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_BMCR);
1585 bmcr &= ~BMCR_PDOWN;
1586 jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_BMCR, bmcr);
1587}
1588
1577static int 1589static int
1578jme_open(struct net_device *netdev) 1590jme_open(struct net_device *netdev)
1579{ 1591{
@@ -1594,10 +1606,12 @@ jme_open(struct net_device *netdev)
1594 1606
1595 jme_start_irq(jme); 1607 jme_start_irq(jme);
1596 1608
1597 if (test_bit(JME_FLAG_SSET, &jme->flags)) 1609 if (test_bit(JME_FLAG_SSET, &jme->flags)) {
1610 jme_phy_on(jme);
1598 jme_set_settings(netdev, &jme->old_ecmd); 1611 jme_set_settings(netdev, &jme->old_ecmd);
1599 else 1612 } else {
1600 jme_reset_phy_processor(jme); 1613 jme_reset_phy_processor(jme);
1614 }
1601 1615
1602 jme_reset_link(jme); 1616 jme_reset_link(jme);
1603 1617
@@ -2382,6 +2396,10 @@ jme_set_settings(struct net_device *netdev,
2382 if (ecmd->speed == SPEED_1000 && ecmd->autoneg != AUTONEG_ENABLE) 2396 if (ecmd->speed == SPEED_1000 && ecmd->autoneg != AUTONEG_ENABLE)
2383 return -EINVAL; 2397 return -EINVAL;
2384 2398
2399 /*
2400 * Check If user changed duplex only while force_media.
2401 * Hardware would not generate link change interrupt.
2402 */
2385 if (jme->mii_if.force_media && 2403 if (jme->mii_if.force_media &&
2386 ecmd->autoneg != AUTONEG_ENABLE && 2404 ecmd->autoneg != AUTONEG_ENABLE &&
2387 (jme->mii_if.full_duplex != ecmd->duplex)) 2405 (jme->mii_if.full_duplex != ecmd->duplex))
@@ -2391,12 +2409,40 @@ jme_set_settings(struct net_device *netdev,
2391 rc = mii_ethtool_sset(&(jme->mii_if), ecmd); 2409 rc = mii_ethtool_sset(&(jme->mii_if), ecmd);
2392 spin_unlock_bh(&jme->phy_lock); 2410 spin_unlock_bh(&jme->phy_lock);
2393 2411
2394 if (!rc && fdc)
2395 jme_reset_link(jme);
2396
2397 if (!rc) { 2412 if (!rc) {
2398 set_bit(JME_FLAG_SSET, &jme->flags); 2413 if (fdc)
2414 jme_reset_link(jme);
2399 jme->old_ecmd = *ecmd; 2415 jme->old_ecmd = *ecmd;
2416 set_bit(JME_FLAG_SSET, &jme->flags);
2417 }
2418
2419 return rc;
2420}
2421
2422static int
2423jme_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
2424{
2425 int rc;
2426 struct jme_adapter *jme = netdev_priv(netdev);
2427 struct mii_ioctl_data *mii_data = if_mii(rq);
2428 unsigned int duplex_chg;
2429
2430 if (cmd == SIOCSMIIREG) {
2431 u16 val = mii_data->val_in;
2432 if (!(val & (BMCR_RESET|BMCR_ANENABLE)) &&
2433 (val & BMCR_SPEED1000))
2434 return -EINVAL;
2435 }
2436
2437 spin_lock_bh(&jme->phy_lock);
2438 rc = generic_mii_ioctl(&jme->mii_if, mii_data, cmd, &duplex_chg);
2439 spin_unlock_bh(&jme->phy_lock);
2440
2441 if (!rc && (cmd == SIOCSMIIREG)) {
2442 if (duplex_chg)
2443 jme_reset_link(jme);
2444 jme_get_settings(netdev, &jme->old_ecmd);
2445 set_bit(JME_FLAG_SSET, &jme->flags);
2400 } 2446 }
2401 2447
2402 return rc; 2448 return rc;
@@ -2676,6 +2722,7 @@ static const struct net_device_ops jme_netdev_ops = {
2676 .ndo_open = jme_open, 2722 .ndo_open = jme_open,
2677 .ndo_stop = jme_close, 2723 .ndo_stop = jme_close,
2678 .ndo_validate_addr = eth_validate_addr, 2724 .ndo_validate_addr = eth_validate_addr,
2725 .ndo_do_ioctl = jme_ioctl,
2679 .ndo_start_xmit = jme_start_xmit, 2726 .ndo_start_xmit = jme_start_xmit,
2680 .ndo_set_mac_address = jme_set_macaddr, 2727 .ndo_set_mac_address = jme_set_macaddr,
2681 .ndo_set_multicast_list = jme_set_multi, 2728 .ndo_set_multicast_list = jme_set_multi,
@@ -2867,6 +2914,8 @@ jme_init_one(struct pci_dev *pdev,
2867 jme->mii_if.supports_gmii = true; 2914 jme->mii_if.supports_gmii = true;
2868 else 2915 else
2869 jme->mii_if.supports_gmii = false; 2916 jme->mii_if.supports_gmii = false;
2917 jme->mii_if.phy_id_mask = 0x1F;
2918 jme->mii_if.reg_num_mask = 0x1F;
2870 jme->mii_if.mdio_read = jme_mdio_read; 2919 jme->mii_if.mdio_read = jme_mdio_read;
2871 jme->mii_if.mdio_write = jme_mdio_write; 2920 jme->mii_if.mdio_write = jme_mdio_write;
2872 2921
@@ -3005,10 +3054,12 @@ jme_resume(struct pci_dev *pdev)
3005 jme_clear_pm(jme); 3054 jme_clear_pm(jme);
3006 pci_restore_state(pdev); 3055 pci_restore_state(pdev);
3007 3056
3008 if (test_bit(JME_FLAG_SSET, &jme->flags)) 3057 if (test_bit(JME_FLAG_SSET, &jme->flags)) {
3058 jme_phy_on(jme);
3009 jme_set_settings(netdev, &jme->old_ecmd); 3059 jme_set_settings(netdev, &jme->old_ecmd);
3010 else 3060 } else {
3011 jme_reset_phy_processor(jme); 3061 jme_reset_phy_processor(jme);
3062 }
3012 3063
3013 jme_start_irq(jme); 3064 jme_start_irq(jme);
3014 netif_device_attach(netdev); 3065 netif_device_attach(netdev);
diff --git a/drivers/net/jme.h b/drivers/net/jme.h
index 1360f68861b8..eac09264bf2a 100644
--- a/drivers/net/jme.h
+++ b/drivers/net/jme.h
@@ -3,6 +3,7 @@
3 * 3 *
4 * Copyright 2008 JMicron Technology Corporation 4 * Copyright 2008 JMicron Technology Corporation
5 * http://www.jmicron.com/ 5 * http://www.jmicron.com/
6 * Copyright (c) 2009 - 2010 Guo-Fu Tseng <cooldavid@cooldavid.org>
6 * 7 *
7 * Author: Guo-Fu Tseng <cooldavid@cooldavid.org> 8 * Author: Guo-Fu Tseng <cooldavid@cooldavid.org>
8 * 9 *
@@ -25,7 +26,7 @@
25#define __JME_H_INCLUDED__ 26#define __JME_H_INCLUDED__
26 27
27#define DRV_NAME "jme" 28#define DRV_NAME "jme"
28#define DRV_VERSION "1.0.6" 29#define DRV_VERSION "1.0.7"
29#define PFX DRV_NAME ": " 30#define PFX DRV_NAME ": "
30 31
31#define PCI_DEVICE_ID_JMICRON_JMC250 0x0250 32#define PCI_DEVICE_ID_JMICRON_JMC250 0x0250
diff --git a/drivers/net/ll_temac_main.c b/drivers/net/ll_temac_main.c
index 874ee01e8d9d..9f8e7027b0b3 100644
--- a/drivers/net/ll_temac_main.c
+++ b/drivers/net/ll_temac_main.c
@@ -38,6 +38,7 @@
38#include <linux/of_device.h> 38#include <linux/of_device.h>
39#include <linux/of_mdio.h> 39#include <linux/of_mdio.h>
40#include <linux/of_platform.h> 40#include <linux/of_platform.h>
41#include <linux/of_address.h>
41#include <linux/skbuff.h> 42#include <linux/skbuff.h>
42#include <linux/spinlock.h> 43#include <linux/spinlock.h>
43#include <linux/tcp.h> /* needed for sizeof(tcphdr) */ 44#include <linux/tcp.h> /* needed for sizeof(tcphdr) */
@@ -494,7 +495,7 @@ static u32 temac_setoptions(struct net_device *ndev, u32 options)
494 lp->options |= options; 495 lp->options |= options;
495 mutex_unlock(&lp->indirect_mutex); 496 mutex_unlock(&lp->indirect_mutex);
496 497
497 return (0); 498 return 0;
498} 499}
499 500
500/* Initialize temac */ 501/* Initialize temac */
diff --git a/drivers/net/ll_temac_mdio.c b/drivers/net/ll_temac_mdio.c
index 5ae28c975b38..8cf9d4f56bb2 100644
--- a/drivers/net/ll_temac_mdio.c
+++ b/drivers/net/ll_temac_mdio.c
@@ -10,6 +10,7 @@
10#include <linux/phy.h> 10#include <linux/phy.h>
11#include <linux/of.h> 11#include <linux/of.h>
12#include <linux/of_device.h> 12#include <linux/of_device.h>
13#include <linux/of_address.h>
13#include <linux/slab.h> 14#include <linux/slab.h>
14#include <linux/of_mdio.h> 15#include <linux/of_mdio.h>
15 16
diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c
index 9a0996795321..2d9663a1c54d 100644
--- a/drivers/net/loopback.c
+++ b/drivers/net/loopback.c
@@ -64,7 +64,6 @@ struct pcpu_lstats {
64 u64 packets; 64 u64 packets;
65 u64 bytes; 65 u64 bytes;
66 struct u64_stats_sync syncp; 66 struct u64_stats_sync syncp;
67 unsigned long drops;
68}; 67};
69 68
70/* 69/*
@@ -74,7 +73,6 @@ struct pcpu_lstats {
74static netdev_tx_t loopback_xmit(struct sk_buff *skb, 73static netdev_tx_t loopback_xmit(struct sk_buff *skb,
75 struct net_device *dev) 74 struct net_device *dev)
76{ 75{
77 struct pcpu_lstats __percpu *pcpu_lstats;
78 struct pcpu_lstats *lb_stats; 76 struct pcpu_lstats *lb_stats;
79 int len; 77 int len;
80 78
@@ -83,8 +81,7 @@ static netdev_tx_t loopback_xmit(struct sk_buff *skb,
83 skb->protocol = eth_type_trans(skb, dev); 81 skb->protocol = eth_type_trans(skb, dev);
84 82
85 /* it's OK to use per_cpu_ptr() because BHs are off */ 83 /* it's OK to use per_cpu_ptr() because BHs are off */
86 pcpu_lstats = (void __percpu __force *)dev->ml_priv; 84 lb_stats = this_cpu_ptr(dev->lstats);
87 lb_stats = this_cpu_ptr(pcpu_lstats);
88 85
89 len = skb->len; 86 len = skb->len;
90 if (likely(netif_rx(skb) == NET_RX_SUCCESS)) { 87 if (likely(netif_rx(skb) == NET_RX_SUCCESS)) {
@@ -92,8 +89,7 @@ static netdev_tx_t loopback_xmit(struct sk_buff *skb,
92 lb_stats->bytes += len; 89 lb_stats->bytes += len;
93 lb_stats->packets++; 90 lb_stats->packets++;
94 u64_stats_update_end(&lb_stats->syncp); 91 u64_stats_update_end(&lb_stats->syncp);
95 } else 92 }
96 lb_stats->drops++;
97 93
98 return NETDEV_TX_OK; 94 return NETDEV_TX_OK;
99} 95}
@@ -101,32 +97,26 @@ static netdev_tx_t loopback_xmit(struct sk_buff *skb,
101static struct rtnl_link_stats64 *loopback_get_stats64(struct net_device *dev, 97static struct rtnl_link_stats64 *loopback_get_stats64(struct net_device *dev,
102 struct rtnl_link_stats64 *stats) 98 struct rtnl_link_stats64 *stats)
103{ 99{
104 const struct pcpu_lstats __percpu *pcpu_lstats;
105 u64 bytes = 0; 100 u64 bytes = 0;
106 u64 packets = 0; 101 u64 packets = 0;
107 u64 drops = 0;
108 int i; 102 int i;
109 103
110 pcpu_lstats = (void __percpu __force *)dev->ml_priv;
111 for_each_possible_cpu(i) { 104 for_each_possible_cpu(i) {
112 const struct pcpu_lstats *lb_stats; 105 const struct pcpu_lstats *lb_stats;
113 u64 tbytes, tpackets; 106 u64 tbytes, tpackets;
114 unsigned int start; 107 unsigned int start;
115 108
116 lb_stats = per_cpu_ptr(pcpu_lstats, i); 109 lb_stats = per_cpu_ptr(dev->lstats, i);
117 do { 110 do {
118 start = u64_stats_fetch_begin(&lb_stats->syncp); 111 start = u64_stats_fetch_begin(&lb_stats->syncp);
119 tbytes = lb_stats->bytes; 112 tbytes = lb_stats->bytes;
120 tpackets = lb_stats->packets; 113 tpackets = lb_stats->packets;
121 } while (u64_stats_fetch_retry(&lb_stats->syncp, start)); 114 } while (u64_stats_fetch_retry(&lb_stats->syncp, start));
122 drops += lb_stats->drops;
123 bytes += tbytes; 115 bytes += tbytes;
124 packets += tpackets; 116 packets += tpackets;
125 } 117 }
126 stats->rx_packets = packets; 118 stats->rx_packets = packets;
127 stats->tx_packets = packets; 119 stats->tx_packets = packets;
128 stats->rx_dropped = drops;
129 stats->rx_errors = drops;
130 stats->rx_bytes = bytes; 120 stats->rx_bytes = bytes;
131 stats->tx_bytes = bytes; 121 stats->tx_bytes = bytes;
132 return stats; 122 return stats;
@@ -147,22 +137,16 @@ static const struct ethtool_ops loopback_ethtool_ops = {
147 137
148static int loopback_dev_init(struct net_device *dev) 138static int loopback_dev_init(struct net_device *dev)
149{ 139{
150 struct pcpu_lstats __percpu *lstats; 140 dev->lstats = alloc_percpu(struct pcpu_lstats);
151 141 if (!dev->lstats)
152 lstats = alloc_percpu(struct pcpu_lstats);
153 if (!lstats)
154 return -ENOMEM; 142 return -ENOMEM;
155 143
156 dev->ml_priv = (void __force *)lstats;
157 return 0; 144 return 0;
158} 145}
159 146
160static void loopback_dev_free(struct net_device *dev) 147static void loopback_dev_free(struct net_device *dev)
161{ 148{
162 struct pcpu_lstats __percpu *lstats = 149 free_percpu(dev->lstats);
163 (void __percpu __force *)dev->ml_priv;
164
165 free_percpu(lstats);
166 free_netdev(dev); 150 free_netdev(dev);
167} 151}
168 152
diff --git a/drivers/net/lp486e.c b/drivers/net/lp486e.c
index 3df046a58b1d..3698824744cb 100644
--- a/drivers/net/lp486e.c
+++ b/drivers/net/lp486e.c
@@ -460,7 +460,7 @@ init_rx_bufs(struct net_device *dev, int num) {
460 } 460 }
461 lp->rbd_tail->next = rfd->rbd; 461 lp->rbd_tail->next = rfd->rbd;
462#endif 462#endif
463 return (i); 463 return i;
464} 464}
465 465
466static inline void 466static inline void
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index 0ef0eb0db945..0fc9dc7f20db 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -788,6 +788,10 @@ static int macvlan_device_event(struct notifier_block *unused,
788 } 788 }
789 break; 789 break;
790 case NETDEV_UNREGISTER: 790 case NETDEV_UNREGISTER:
791 /* twiddle thumbs on netns device moves */
792 if (dev->reg_state != NETREG_UNREGISTERING)
793 break;
794
791 list_for_each_entry_safe(vlan, next, &port->vlans, list) 795 list_for_each_entry_safe(vlan, next, &port->vlans, list)
792 vlan->dev->rtnl_link_ops->dellink(vlan->dev, NULL); 796 vlan->dev->rtnl_link_ops->dellink(vlan->dev, NULL);
793 break; 797 break;
diff --git a/drivers/net/meth.c b/drivers/net/meth.c
index 42e3294671d7..60135aa55802 100644
--- a/drivers/net/meth.c
+++ b/drivers/net/meth.c
@@ -461,7 +461,7 @@ static int meth_tx_full(struct net_device *dev)
461{ 461{
462 struct meth_private *priv = netdev_priv(dev); 462 struct meth_private *priv = netdev_priv(dev);
463 463
464 return (priv->tx_count >= TX_RING_ENTRIES - 1); 464 return priv->tx_count >= TX_RING_ENTRIES - 1;
465} 465}
466 466
467static void meth_tx_cleanup(struct net_device* dev, unsigned long int_status) 467static void meth_tx_cleanup(struct net_device* dev, unsigned long int_status)
diff --git a/drivers/net/mlx4/en_netdev.c b/drivers/net/mlx4/en_netdev.c
index 411bda581c04..79478bd4211a 100644
--- a/drivers/net/mlx4/en_netdev.c
+++ b/drivers/net/mlx4/en_netdev.c
@@ -1025,7 +1025,8 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
1025 */ 1025 */
1026 dev->netdev_ops = &mlx4_netdev_ops; 1026 dev->netdev_ops = &mlx4_netdev_ops;
1027 dev->watchdog_timeo = MLX4_EN_WATCHDOG_TIMEOUT; 1027 dev->watchdog_timeo = MLX4_EN_WATCHDOG_TIMEOUT;
1028 dev->real_num_tx_queues = MLX4_EN_NUM_TX_RINGS; 1028 netif_set_real_num_tx_queues(dev, priv->tx_ring_num);
1029 netif_set_real_num_rx_queues(dev, priv->rx_ring_num);
1029 1030
1030 SET_ETHTOOL_OPS(dev, &mlx4_en_ethtool_ops); 1031 SET_ETHTOOL_OPS(dev, &mlx4_en_ethtool_ops);
1031 1032
diff --git a/drivers/net/mlx4/en_selftest.c b/drivers/net/mlx4/en_selftest.c
index 43357d35616a..9c91a92da705 100644
--- a/drivers/net/mlx4/en_selftest.c
+++ b/drivers/net/mlx4/en_selftest.c
@@ -107,7 +107,7 @@ static int mlx4_en_test_loopback(struct mlx4_en_priv *priv)
107mlx4_en_test_loopback_exit: 107mlx4_en_test_loopback_exit:
108 108
109 priv->validate_loopback = 0; 109 priv->validate_loopback = 0;
110 return (!loopback_ok); 110 return !loopback_ok;
111} 111}
112 112
113 113
diff --git a/drivers/net/mlx4/en_tx.c b/drivers/net/mlx4/en_tx.c
index 98dd620042a8..a680cd4a5ab6 100644
--- a/drivers/net/mlx4/en_tx.c
+++ b/drivers/net/mlx4/en_tx.c
@@ -583,7 +583,7 @@ u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb)
583 /* If we support per priority flow control and the packet contains 583 /* If we support per priority flow control and the packet contains
584 * a vlan tag, send the packet to the TX ring assigned to that priority 584 * a vlan tag, send the packet to the TX ring assigned to that priority
585 */ 585 */
586 if (priv->prof->rx_ppp && priv->vlgrp && vlan_tx_tag_present(skb)) { 586 if (priv->prof->rx_ppp && vlan_tx_tag_present(skb)) {
587 vlan_tag = vlan_tx_tag_get(skb); 587 vlan_tag = vlan_tx_tag_get(skb);
588 return MLX4_EN_NUM_TX_RINGS + (vlan_tag >> 13); 588 return MLX4_EN_NUM_TX_RINGS + (vlan_tag >> 13);
589 } 589 }
@@ -634,7 +634,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
634 634
635 tx_ind = skb->queue_mapping; 635 tx_ind = skb->queue_mapping;
636 ring = &priv->tx_ring[tx_ind]; 636 ring = &priv->tx_ring[tx_ind];
637 if (priv->vlgrp && vlan_tx_tag_present(skb)) 637 if (vlan_tx_tag_present(skb))
638 vlan_tag = vlan_tx_tag_get(skb); 638 vlan_tag = vlan_tx_tag_get(skb);
639 639
640 /* Check available TXBBs And 2K spare for prefetch */ 640 /* Check available TXBBs And 2K spare for prefetch */
diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/mv643xx_eth.c
index 2d488abcf62d..dd2b6a71c6d7 100644
--- a/drivers/net/mv643xx_eth.c
+++ b/drivers/net/mv643xx_eth.c
@@ -2901,7 +2901,8 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
2901 mp->dev = dev; 2901 mp->dev = dev;
2902 2902
2903 set_params(mp, pd); 2903 set_params(mp, pd);
2904 dev->real_num_tx_queues = mp->txq_count; 2904 netif_set_real_num_tx_queues(dev, mp->txq_count);
2905 netif_set_real_num_rx_queues(dev, mp->rxq_count);
2905 2906
2906 if (pd->phy_addr != MV643XX_ETH_PHY_NONE) 2907 if (pd->phy_addr != MV643XX_ETH_PHY_NONE)
2907 mp->phy = phy_scan(mp, pd->phy_addr); 2908 mp->phy = phy_scan(mp, pd->phy_addr);
diff --git a/drivers/net/myri10ge/myri10ge.c b/drivers/net/myri10ge/myri10ge.c
index 24ab8a43c777..8524cc40ec57 100644
--- a/drivers/net/myri10ge/myri10ge.c
+++ b/drivers/net/myri10ge/myri10ge.c
@@ -225,6 +225,7 @@ struct myri10ge_priv {
225 struct msix_entry *msix_vectors; 225 struct msix_entry *msix_vectors;
226#ifdef CONFIG_MYRI10GE_DCA 226#ifdef CONFIG_MYRI10GE_DCA
227 int dca_enabled; 227 int dca_enabled;
228 int relaxed_order;
228#endif 229#endif
229 u32 link_state; 230 u32 link_state;
230 unsigned int rdma_tags_available; 231 unsigned int rdma_tags_available;
@@ -990,7 +991,7 @@ static int myri10ge_reset(struct myri10ge_priv *mgp)
990 * RX queues, so if we get an error, first retry using a 991 * RX queues, so if we get an error, first retry using a
991 * single TX queue before giving up */ 992 * single TX queue before giving up */
992 if (status != 0 && mgp->dev->real_num_tx_queues > 1) { 993 if (status != 0 && mgp->dev->real_num_tx_queues > 1) {
993 mgp->dev->real_num_tx_queues = 1; 994 netif_set_real_num_tx_queues(mgp->dev, 1);
994 cmd.data0 = mgp->num_slices; 995 cmd.data0 = mgp->num_slices;
995 cmd.data1 = MXGEFW_SLICE_INTR_MODE_ONE_PER_SLICE; 996 cmd.data1 = MXGEFW_SLICE_INTR_MODE_ONE_PER_SLICE;
996 status = myri10ge_send_cmd(mgp, 997 status = myri10ge_send_cmd(mgp,
@@ -1074,10 +1075,28 @@ static int myri10ge_reset(struct myri10ge_priv *mgp)
1074} 1075}
1075 1076
1076#ifdef CONFIG_MYRI10GE_DCA 1077#ifdef CONFIG_MYRI10GE_DCA
1078static int myri10ge_toggle_relaxed(struct pci_dev *pdev, int on)
1079{
1080 int ret, cap, err;
1081 u16 ctl;
1082
1083 cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
1084 if (!cap)
1085 return 0;
1086
1087 err = pci_read_config_word(pdev, cap + PCI_EXP_DEVCTL, &ctl);
1088 ret = (ctl & PCI_EXP_DEVCTL_RELAX_EN) >> 4;
1089 if (ret != on) {
1090 ctl &= ~PCI_EXP_DEVCTL_RELAX_EN;
1091 ctl |= (on << 4);
1092 pci_write_config_word(pdev, cap + PCI_EXP_DEVCTL, ctl);
1093 }
1094 return ret;
1095}
1096
1077static void 1097static void
1078myri10ge_write_dca(struct myri10ge_slice_state *ss, int cpu, int tag) 1098myri10ge_write_dca(struct myri10ge_slice_state *ss, int cpu, int tag)
1079{ 1099{
1080 ss->cpu = cpu;
1081 ss->cached_dca_tag = tag; 1100 ss->cached_dca_tag = tag;
1082 put_be32(htonl(tag), ss->dca_tag); 1101 put_be32(htonl(tag), ss->dca_tag);
1083} 1102}
@@ -1088,9 +1107,10 @@ static inline void myri10ge_update_dca(struct myri10ge_slice_state *ss)
1088 int tag; 1107 int tag;
1089 1108
1090 if (cpu != ss->cpu) { 1109 if (cpu != ss->cpu) {
1091 tag = dca_get_tag(cpu); 1110 tag = dca3_get_tag(&ss->mgp->pdev->dev, cpu);
1092 if (ss->cached_dca_tag != tag) 1111 if (ss->cached_dca_tag != tag)
1093 myri10ge_write_dca(ss, cpu, tag); 1112 myri10ge_write_dca(ss, cpu, tag);
1113 ss->cpu = cpu;
1094 } 1114 }
1095 put_cpu(); 1115 put_cpu();
1096} 1116}
@@ -1113,9 +1133,13 @@ static void myri10ge_setup_dca(struct myri10ge_priv *mgp)
1113 "dca_add_requester() failed, err=%d\n", err); 1133 "dca_add_requester() failed, err=%d\n", err);
1114 return; 1134 return;
1115 } 1135 }
1136 mgp->relaxed_order = myri10ge_toggle_relaxed(pdev, 0);
1116 mgp->dca_enabled = 1; 1137 mgp->dca_enabled = 1;
1117 for (i = 0; i < mgp->num_slices; i++) 1138 for (i = 0; i < mgp->num_slices; i++) {
1118 myri10ge_write_dca(&mgp->ss[i], -1, 0); 1139 mgp->ss[i].cpu = -1;
1140 mgp->ss[i].cached_dca_tag = -1;
1141 myri10ge_update_dca(&mgp->ss[i]);
1142 }
1119} 1143}
1120 1144
1121static void myri10ge_teardown_dca(struct myri10ge_priv *mgp) 1145static void myri10ge_teardown_dca(struct myri10ge_priv *mgp)
@@ -1126,6 +1150,8 @@ static void myri10ge_teardown_dca(struct myri10ge_priv *mgp)
1126 if (!mgp->dca_enabled) 1150 if (!mgp->dca_enabled)
1127 return; 1151 return;
1128 mgp->dca_enabled = 0; 1152 mgp->dca_enabled = 0;
1153 if (mgp->relaxed_order)
1154 myri10ge_toggle_relaxed(pdev, 1);
1129 err = dca_remove_requester(&pdev->dev); 1155 err = dca_remove_requester(&pdev->dev);
1130} 1156}
1131 1157
@@ -1555,12 +1581,12 @@ static irqreturn_t myri10ge_intr(int irq, void *arg)
1555 * valid since MSI-X irqs are not shared */ 1581 * valid since MSI-X irqs are not shared */
1556 if ((mgp->dev->real_num_tx_queues == 1) && (ss != mgp->ss)) { 1582 if ((mgp->dev->real_num_tx_queues == 1) && (ss != mgp->ss)) {
1557 napi_schedule(&ss->napi); 1583 napi_schedule(&ss->napi);
1558 return (IRQ_HANDLED); 1584 return IRQ_HANDLED;
1559 } 1585 }
1560 1586
1561 /* make sure it is our IRQ, and that the DMA has finished */ 1587 /* make sure it is our IRQ, and that the DMA has finished */
1562 if (unlikely(!stats->valid)) 1588 if (unlikely(!stats->valid))
1563 return (IRQ_NONE); 1589 return IRQ_NONE;
1564 1590
1565 /* low bit indicates receives are present, so schedule 1591 /* low bit indicates receives are present, so schedule
1566 * napi poll handler */ 1592 * napi poll handler */
@@ -1599,7 +1625,7 @@ static irqreturn_t myri10ge_intr(int irq, void *arg)
1599 myri10ge_check_statblock(mgp); 1625 myri10ge_check_statblock(mgp);
1600 1626
1601 put_be32(htonl(3), ss->irq_claim + 1); 1627 put_be32(htonl(3), ss->irq_claim + 1);
1602 return (IRQ_HANDLED); 1628 return IRQ_HANDLED;
1603} 1629}
1604 1630
1605static int 1631static int
@@ -3923,7 +3949,8 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
3923 dev_err(&pdev->dev, "failed to alloc slice state\n"); 3949 dev_err(&pdev->dev, "failed to alloc slice state\n");
3924 goto abort_with_firmware; 3950 goto abort_with_firmware;
3925 } 3951 }
3926 netdev->real_num_tx_queues = mgp->num_slices; 3952 netif_set_real_num_tx_queues(netdev, mgp->num_slices);
3953 netif_set_real_num_rx_queues(netdev, mgp->num_slices);
3927 status = myri10ge_reset(mgp); 3954 status = myri10ge_reset(mgp);
3928 if (status != 0) { 3955 if (status != 0) {
3929 dev_err(&pdev->dev, "failed reset\n"); 3956 dev_err(&pdev->dev, "failed reset\n");
diff --git a/drivers/net/myri_sbus.c b/drivers/net/myri_sbus.c
index 617f898ba5f0..4846e131a04e 100644
--- a/drivers/net/myri_sbus.c
+++ b/drivers/net/myri_sbus.c
@@ -735,7 +735,7 @@ static int myri_header(struct sk_buff *skb, struct net_device *dev,
735 int i; 735 int i;
736 for (i = 0; i < dev->addr_len; i++) 736 for (i = 0; i < dev->addr_len; i++)
737 eth->h_dest[i] = 0; 737 eth->h_dest[i] = 0;
738 return(dev->hard_header_len); 738 return dev->hard_header_len;
739 } 739 }
740 740
741 if (daddr) { 741 if (daddr) {
diff --git a/drivers/net/netconsole.c b/drivers/net/netconsole.c
index ca142c47b2e4..94255f09093d 100644
--- a/drivers/net/netconsole.c
+++ b/drivers/net/netconsole.c
@@ -678,7 +678,14 @@ static int netconsole_netdev_event(struct notifier_block *this,
678 strlcpy(nt->np.dev_name, dev->name, IFNAMSIZ); 678 strlcpy(nt->np.dev_name, dev->name, IFNAMSIZ);
679 break; 679 break;
680 case NETDEV_UNREGISTER: 680 case NETDEV_UNREGISTER:
681 netpoll_cleanup(&nt->np); 681 /*
682 * rtnl_lock already held
683 */
684 if (nt->np.dev) {
685 __netpoll_cleanup(&nt->np);
686 dev_put(nt->np.dev);
687 nt->np.dev = NULL;
688 }
682 /* Fall through */ 689 /* Fall through */
683 case NETDEV_GOING_DOWN: 690 case NETDEV_GOING_DOWN:
684 case NETDEV_BONDING_DESLAVE: 691 case NETDEV_BONDING_DESLAVE:
diff --git a/drivers/net/netxen/netxen_nic.h b/drivers/net/netxen/netxen_nic.h
index 6dca3574e355..8e8a97839cb0 100644
--- a/drivers/net/netxen/netxen_nic.h
+++ b/drivers/net/netxen/netxen_nic.h
@@ -175,7 +175,10 @@
175#define MAX_NUM_CARDS 4 175#define MAX_NUM_CARDS 4
176 176
177#define MAX_BUFFERS_PER_CMD 32 177#define MAX_BUFFERS_PER_CMD 32
178#define TX_STOP_THRESH ((MAX_SKB_FRAGS >> 2) + 4) 178#define MAX_TSO_HEADER_DESC 2
179#define MGMT_CMD_DESC_RESV 4
180#define TX_STOP_THRESH ((MAX_SKB_FRAGS >> 2) + MAX_TSO_HEADER_DESC \
181 + MGMT_CMD_DESC_RESV)
179#define NX_MAX_TX_TIMEOUTS 2 182#define NX_MAX_TX_TIMEOUTS 2
180 183
181/* 184/*
@@ -1253,19 +1256,9 @@ struct netxen_adapter {
1253 const struct firmware *fw; 1256 const struct firmware *fw;
1254}; 1257};
1255 1258
1256int netxen_niu_xg_init_port(struct netxen_adapter *adapter, int port);
1257int netxen_niu_disable_xg_port(struct netxen_adapter *adapter);
1258
1259int nx_fw_cmd_query_phy(struct netxen_adapter *adapter, u32 reg, u32 *val); 1259int nx_fw_cmd_query_phy(struct netxen_adapter *adapter, u32 reg, u32 *val);
1260int nx_fw_cmd_set_phy(struct netxen_adapter *adapter, u32 reg, u32 val); 1260int nx_fw_cmd_set_phy(struct netxen_adapter *adapter, u32 reg, u32 val);
1261 1261
1262/* Functions available from netxen_nic_hw.c */
1263int netxen_nic_set_mtu_xgb(struct netxen_adapter *adapter, int new_mtu);
1264int netxen_nic_set_mtu_gb(struct netxen_adapter *adapter, int new_mtu);
1265
1266int netxen_p2_nic_set_mac_addr(struct netxen_adapter *adapter, u8 *addr);
1267int netxen_p3_nic_set_mac_addr(struct netxen_adapter *adapter, u8 *addr);
1268
1269#define NXRD32(adapter, off) \ 1262#define NXRD32(adapter, off) \
1270 (adapter->crb_read(adapter, off)) 1263 (adapter->crb_read(adapter, off))
1271#define NXWR32(adapter, off, val) \ 1264#define NXWR32(adapter, off, val) \
@@ -1345,11 +1338,8 @@ void netxen_post_rx_buffers(struct netxen_adapter *adapter, u32 ringid,
1345 struct nx_host_rds_ring *rds_ring); 1338 struct nx_host_rds_ring *rds_ring);
1346int netxen_process_cmd_ring(struct netxen_adapter *adapter); 1339int netxen_process_cmd_ring(struct netxen_adapter *adapter);
1347int netxen_process_rcv_ring(struct nx_host_sds_ring *sds_ring, int max); 1340int netxen_process_rcv_ring(struct nx_host_sds_ring *sds_ring, int max);
1348void netxen_p2_nic_set_multi(struct net_device *netdev); 1341
1349void netxen_p3_nic_set_multi(struct net_device *netdev);
1350void netxen_p3_free_mac_list(struct netxen_adapter *adapter); 1342void netxen_p3_free_mac_list(struct netxen_adapter *adapter);
1351int netxen_p2_nic_set_promisc(struct netxen_adapter *adapter, u32 mode);
1352int netxen_p3_nic_set_promisc(struct netxen_adapter *adapter, u32);
1353int netxen_config_intr_coalesce(struct netxen_adapter *adapter); 1343int netxen_config_intr_coalesce(struct netxen_adapter *adapter);
1354int netxen_config_rss(struct netxen_adapter *adapter, int enable); 1344int netxen_config_rss(struct netxen_adapter *adapter, int enable);
1355int netxen_config_ipaddr(struct netxen_adapter *adapter, u32 ip, int cmd); 1345int netxen_config_ipaddr(struct netxen_adapter *adapter, u32 ip, int cmd);
@@ -1364,9 +1354,6 @@ int netxen_config_hw_lro(struct netxen_adapter *adapter, int enable);
1364int netxen_config_bridged_mode(struct netxen_adapter *adapter, int enable); 1354int netxen_config_bridged_mode(struct netxen_adapter *adapter, int enable);
1365int netxen_send_lro_cleanup(struct netxen_adapter *adapter); 1355int netxen_send_lro_cleanup(struct netxen_adapter *adapter);
1366 1356
1367int netxen_nic_set_mac(struct net_device *netdev, void *p);
1368struct net_device_stats *netxen_nic_get_stats(struct net_device *netdev);
1369
1370void netxen_nic_update_cmd_producer(struct netxen_adapter *adapter, 1357void netxen_nic_update_cmd_producer(struct netxen_adapter *adapter,
1371 struct nx_host_tx_ring *tx_ring); 1358 struct nx_host_tx_ring *tx_ring);
1372 1359
diff --git a/drivers/net/netxen/netxen_nic_hw.c b/drivers/net/netxen/netxen_nic_hw.c
index 29d7b93d0493..37d3ebd65be8 100644
--- a/drivers/net/netxen/netxen_nic_hw.c
+++ b/drivers/net/netxen/netxen_nic_hw.c
@@ -319,6 +319,8 @@ static unsigned crb_hub_agt[64] =
319 319
320#define NETXEN_PCIE_SEM_TIMEOUT 10000 320#define NETXEN_PCIE_SEM_TIMEOUT 10000
321 321
322static int netxen_nic_set_mtu_xgb(struct netxen_adapter *adapter, int new_mtu);
323
322int 324int
323netxen_pcie_sem_lock(struct netxen_adapter *adapter, int sem, u32 id_reg) 325netxen_pcie_sem_lock(struct netxen_adapter *adapter, int sem, u32 id_reg)
324{ 326{
@@ -345,7 +347,7 @@ netxen_pcie_sem_unlock(struct netxen_adapter *adapter, int sem)
345 NXRD32(adapter, NETXEN_PCIE_REG(PCIE_SEM_UNLOCK(sem))); 347 NXRD32(adapter, NETXEN_PCIE_REG(PCIE_SEM_UNLOCK(sem)));
346} 348}
347 349
348int netxen_niu_xg_init_port(struct netxen_adapter *adapter, int port) 350static int netxen_niu_xg_init_port(struct netxen_adapter *adapter, int port)
349{ 351{
350 if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) { 352 if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) {
351 NXWR32(adapter, NETXEN_NIU_XGE_CONFIG_1+(0x10000*port), 0x1447); 353 NXWR32(adapter, NETXEN_NIU_XGE_CONFIG_1+(0x10000*port), 0x1447);
@@ -356,7 +358,7 @@ int netxen_niu_xg_init_port(struct netxen_adapter *adapter, int port)
356} 358}
357 359
358/* Disable an XG interface */ 360/* Disable an XG interface */
359int netxen_niu_disable_xg_port(struct netxen_adapter *adapter) 361static int netxen_niu_disable_xg_port(struct netxen_adapter *adapter)
360{ 362{
361 __u32 mac_cfg; 363 __u32 mac_cfg;
362 u32 port = adapter->physical_port; 364 u32 port = adapter->physical_port;
@@ -383,7 +385,7 @@ int netxen_niu_disable_xg_port(struct netxen_adapter *adapter)
383#define MAC_LO(addr) \ 385#define MAC_LO(addr) \
384 ((addr[5] << 16) | (addr[4] << 8) | (addr[3])) 386 ((addr[5] << 16) | (addr[4] << 8) | (addr[3]))
385 387
386int netxen_p2_nic_set_promisc(struct netxen_adapter *adapter, u32 mode) 388static int netxen_p2_nic_set_promisc(struct netxen_adapter *adapter, u32 mode)
387{ 389{
388 u32 mac_cfg; 390 u32 mac_cfg;
389 u32 cnt = 0; 391 u32 cnt = 0;
@@ -434,7 +436,7 @@ int netxen_p2_nic_set_promisc(struct netxen_adapter *adapter, u32 mode)
434 return 0; 436 return 0;
435} 437}
436 438
437int netxen_p2_nic_set_mac_addr(struct netxen_adapter *adapter, u8 *addr) 439static int netxen_p2_nic_set_mac_addr(struct netxen_adapter *adapter, u8 *addr)
438{ 440{
439 u32 mac_hi, mac_lo; 441 u32 mac_hi, mac_lo;
440 u32 reg_hi, reg_lo; 442 u32 reg_hi, reg_lo;
@@ -531,7 +533,7 @@ netxen_nic_set_mcast_addr(struct netxen_adapter *adapter,
531 return 0; 533 return 0;
532} 534}
533 535
534void netxen_p2_nic_set_multi(struct net_device *netdev) 536static void netxen_p2_nic_set_multi(struct net_device *netdev)
535{ 537{
536 struct netxen_adapter *adapter = netdev_priv(netdev); 538 struct netxen_adapter *adapter = netdev_priv(netdev);
537 struct netdev_hw_addr *ha; 539 struct netdev_hw_addr *ha;
@@ -598,8 +600,14 @@ netxen_send_cmd_descs(struct netxen_adapter *adapter,
598 600
599 if (nr_desc >= netxen_tx_avail(tx_ring)) { 601 if (nr_desc >= netxen_tx_avail(tx_ring)) {
600 netif_tx_stop_queue(tx_ring->txq); 602 netif_tx_stop_queue(tx_ring->txq);
601 __netif_tx_unlock_bh(tx_ring->txq); 603 smp_mb();
602 return -EBUSY; 604 if (netxen_tx_avail(tx_ring) > nr_desc) {
605 if (netxen_tx_avail(tx_ring) > TX_STOP_THRESH)
606 netif_tx_wake_queue(tx_ring->txq);
607 } else {
608 __netif_tx_unlock_bh(tx_ring->txq);
609 return -EBUSY;
610 }
603 } 611 }
604 612
605 do { 613 do {
@@ -674,7 +682,7 @@ static int nx_p3_nic_add_mac(struct netxen_adapter *adapter,
674 cur->mac_addr, NETXEN_MAC_ADD); 682 cur->mac_addr, NETXEN_MAC_ADD);
675} 683}
676 684
677void netxen_p3_nic_set_multi(struct net_device *netdev) 685static void netxen_p3_nic_set_multi(struct net_device *netdev)
678{ 686{
679 struct netxen_adapter *adapter = netdev_priv(netdev); 687 struct netxen_adapter *adapter = netdev_priv(netdev);
680 struct netdev_hw_addr *ha; 688 struct netdev_hw_addr *ha;
@@ -721,7 +729,7 @@ send_fw_cmd:
721 } 729 }
722} 730}
723 731
724int netxen_p3_nic_set_promisc(struct netxen_adapter *adapter, u32 mode) 732static int netxen_p3_nic_set_promisc(struct netxen_adapter *adapter, u32 mode)
725{ 733{
726 nx_nic_req_t req; 734 nx_nic_req_t req;
727 u64 word; 735 u64 word;
@@ -754,7 +762,7 @@ void netxen_p3_free_mac_list(struct netxen_adapter *adapter)
754 } 762 }
755} 763}
756 764
757int netxen_p3_nic_set_mac_addr(struct netxen_adapter *adapter, u8 *addr) 765static int netxen_p3_nic_set_mac_addr(struct netxen_adapter *adapter, u8 *addr)
758{ 766{
759 /* assuming caller has already copied new addr to netdev */ 767 /* assuming caller has already copied new addr to netdev */
760 netxen_p3_nic_set_multi(adapter->netdev); 768 netxen_p3_nic_set_multi(adapter->netdev);
@@ -1816,14 +1824,14 @@ int netxen_nic_get_board_info(struct netxen_adapter *adapter)
1816 if (netxen_rom_fast_read(adapter, offset, &board_type)) 1824 if (netxen_rom_fast_read(adapter, offset, &board_type))
1817 return -EIO; 1825 return -EIO;
1818 1826
1819 adapter->ahw.board_type = board_type;
1820
1821 if (board_type == NETXEN_BRDTYPE_P3_4_GB_MM) { 1827 if (board_type == NETXEN_BRDTYPE_P3_4_GB_MM) {
1822 u32 gpio = NXRD32(adapter, NETXEN_ROMUSB_GLB_PAD_GPIO_I); 1828 u32 gpio = NXRD32(adapter, NETXEN_ROMUSB_GLB_PAD_GPIO_I);
1823 if ((gpio & 0x8000) == 0) 1829 if ((gpio & 0x8000) == 0)
1824 board_type = NETXEN_BRDTYPE_P3_10G_TP; 1830 board_type = NETXEN_BRDTYPE_P3_10G_TP;
1825 } 1831 }
1826 1832
1833 adapter->ahw.board_type = board_type;
1834
1827 switch (board_type) { 1835 switch (board_type) {
1828 case NETXEN_BRDTYPE_P2_SB35_4G: 1836 case NETXEN_BRDTYPE_P2_SB35_4G:
1829 adapter->ahw.port_type = NETXEN_NIC_GBE; 1837 adapter->ahw.port_type = NETXEN_NIC_GBE;
@@ -1867,16 +1875,7 @@ int netxen_nic_get_board_info(struct netxen_adapter *adapter)
1867} 1875}
1868 1876
1869/* NIU access sections */ 1877/* NIU access sections */
1870 1878static int netxen_nic_set_mtu_xgb(struct netxen_adapter *adapter, int new_mtu)
1871int netxen_nic_set_mtu_gb(struct netxen_adapter *adapter, int new_mtu)
1872{
1873 new_mtu += MTU_FUDGE_FACTOR;
1874 NXWR32(adapter, NETXEN_NIU_GB_MAX_FRAME_SIZE(adapter->physical_port),
1875 new_mtu);
1876 return 0;
1877}
1878
1879int netxen_nic_set_mtu_xgb(struct netxen_adapter *adapter, int new_mtu)
1880{ 1879{
1881 new_mtu += MTU_FUDGE_FACTOR; 1880 new_mtu += MTU_FUDGE_FACTOR;
1882 if (adapter->physical_port == 0) 1881 if (adapter->physical_port == 0)
diff --git a/drivers/net/netxen/netxen_nic_init.c b/drivers/net/netxen/netxen_nic_init.c
index cabae7bb1fc6..95fe552aa279 100644
--- a/drivers/net/netxen/netxen_nic_init.c
+++ b/drivers/net/netxen/netxen_nic_init.c
@@ -346,7 +346,7 @@ static u32 netxen_decode_crb_addr(u32 addr)
346 if (pci_base == NETXEN_ADDR_ERROR) 346 if (pci_base == NETXEN_ADDR_ERROR)
347 return pci_base; 347 return pci_base;
348 else 348 else
349 return (pci_base + offset); 349 return pci_base + offset;
350} 350}
351 351
352#define NETXEN_MAX_ROM_WAIT_USEC 100 352#define NETXEN_MAX_ROM_WAIT_USEC 100
@@ -1540,7 +1540,6 @@ netxen_process_rcv(struct netxen_adapter *adapter,
1540 if (pkt_offset) 1540 if (pkt_offset)
1541 skb_pull(skb, pkt_offset); 1541 skb_pull(skb, pkt_offset);
1542 1542
1543 skb->truesize = skb->len + sizeof(struct sk_buff);
1544 skb->protocol = eth_type_trans(skb, netdev); 1543 skb->protocol = eth_type_trans(skb, netdev);
1545 1544
1546 napi_gro_receive(&sds_ring->napi, skb); 1545 napi_gro_receive(&sds_ring->napi, skb);
@@ -1602,8 +1601,6 @@ netxen_process_lro(struct netxen_adapter *adapter,
1602 1601
1603 skb_put(skb, lro_length + data_offset); 1602 skb_put(skb, lro_length + data_offset);
1604 1603
1605 skb->truesize = skb->len + sizeof(struct sk_buff) + skb_headroom(skb);
1606
1607 skb_pull(skb, l2_hdr_offset); 1604 skb_pull(skb, l2_hdr_offset);
1608 skb->protocol = eth_type_trans(skb, netdev); 1605 skb->protocol = eth_type_trans(skb, netdev);
1609 1606
@@ -1766,14 +1763,10 @@ int netxen_process_cmd_ring(struct netxen_adapter *adapter)
1766 1763
1767 smp_mb(); 1764 smp_mb();
1768 1765
1769 if (netif_queue_stopped(netdev) && netif_carrier_ok(netdev)) { 1766 if (netif_queue_stopped(netdev) && netif_carrier_ok(netdev))
1770 __netif_tx_lock(tx_ring->txq, smp_processor_id()); 1767 if (netxen_tx_avail(tx_ring) > TX_STOP_THRESH)
1771 if (netxen_tx_avail(tx_ring) > TX_STOP_THRESH) {
1772 netif_wake_queue(netdev); 1768 netif_wake_queue(netdev);
1773 adapter->tx_timeo_cnt = 0; 1769 adapter->tx_timeo_cnt = 0;
1774 }
1775 __netif_tx_unlock(tx_ring->txq);
1776 }
1777 } 1770 }
1778 /* 1771 /*
1779 * If everything is freed up to consumer then check if the ring is full 1772 * If everything is freed up to consumer then check if the ring is full
@@ -1792,7 +1785,7 @@ int netxen_process_cmd_ring(struct netxen_adapter *adapter)
1792 done = (sw_consumer == hw_consumer); 1785 done = (sw_consumer == hw_consumer);
1793 spin_unlock(&adapter->tx_clean_lock); 1786 spin_unlock(&adapter->tx_clean_lock);
1794 1787
1795 return (done); 1788 return done;
1796} 1789}
1797 1790
1798void 1791void
diff --git a/drivers/net/netxen/netxen_nic_main.c b/drivers/net/netxen/netxen_nic_main.c
index 73d314592230..50820beac3aa 100644
--- a/drivers/net/netxen/netxen_nic_main.c
+++ b/drivers/net/netxen/netxen_nic_main.c
@@ -95,6 +95,8 @@ static irqreturn_t netxen_msi_intr(int irq, void *data);
95static irqreturn_t netxen_msix_intr(int irq, void *data); 95static irqreturn_t netxen_msix_intr(int irq, void *data);
96 96
97static void netxen_config_indev_addr(struct net_device *dev, unsigned long); 97static void netxen_config_indev_addr(struct net_device *dev, unsigned long);
98static struct net_device_stats *netxen_nic_get_stats(struct net_device *netdev);
99static int netxen_nic_set_mac(struct net_device *netdev, void *p);
98 100
99/* PCI Device ID Table */ 101/* PCI Device ID Table */
100#define ENTRY(device) \ 102#define ENTRY(device) \
@@ -125,11 +127,6 @@ netxen_nic_update_cmd_producer(struct netxen_adapter *adapter,
125 struct nx_host_tx_ring *tx_ring) 127 struct nx_host_tx_ring *tx_ring)
126{ 128{
127 NXWRIO(adapter, tx_ring->crb_cmd_producer, tx_ring->producer); 129 NXWRIO(adapter, tx_ring->crb_cmd_producer, tx_ring->producer);
128
129 if (netxen_tx_avail(tx_ring) <= TX_STOP_THRESH) {
130 netif_stop_queue(adapter->netdev);
131 smp_mb();
132 }
133} 130}
134 131
135static uint32_t crb_cmd_consumer[4] = { 132static uint32_t crb_cmd_consumer[4] = {
@@ -177,7 +174,7 @@ netxen_alloc_sds_rings(struct netxen_recv_context *recv_ctx, int count)
177 174
178 recv_ctx->sds_rings = kzalloc(size, GFP_KERNEL); 175 recv_ctx->sds_rings = kzalloc(size, GFP_KERNEL);
179 176
180 return (recv_ctx->sds_rings == NULL); 177 return recv_ctx->sds_rings == NULL;
181} 178}
182 179
183static void 180static void
@@ -460,7 +457,7 @@ netxen_read_mac_addr(struct netxen_adapter *adapter)
460 return 0; 457 return 0;
461} 458}
462 459
463int netxen_nic_set_mac(struct net_device *netdev, void *p) 460static int netxen_nic_set_mac(struct net_device *netdev, void *p)
464{ 461{
465 struct netxen_adapter *adapter = netdev_priv(netdev); 462 struct netxen_adapter *adapter = netdev_priv(netdev);
466 struct sockaddr *addr = p; 463 struct sockaddr *addr = p;
@@ -1209,7 +1206,7 @@ netxen_setup_netdev(struct netxen_adapter *adapter,
1209 adapter->max_mc_count = 16; 1206 adapter->max_mc_count = 16;
1210 1207
1211 netdev->netdev_ops = &netxen_netdev_ops; 1208 netdev->netdev_ops = &netxen_netdev_ops;
1212 netdev->watchdog_timeo = 2*HZ; 1209 netdev->watchdog_timeo = 5*HZ;
1213 1210
1214 netxen_nic_change_mtu(netdev, netdev->mtu); 1211 netxen_nic_change_mtu(netdev, netdev->mtu);
1215 1212
@@ -1254,6 +1251,28 @@ netxen_setup_netdev(struct netxen_adapter *adapter,
1254 return 0; 1251 return 0;
1255} 1252}
1256 1253
1254#ifdef CONFIG_PCIEAER
1255static void netxen_mask_aer_correctable(struct netxen_adapter *adapter)
1256{
1257 struct pci_dev *pdev = adapter->pdev;
1258 struct pci_dev *root = pdev->bus->self;
1259 u32 aer_pos;
1260
1261 if (adapter->ahw.board_type != NETXEN_BRDTYPE_P3_4_GB_MM &&
1262 adapter->ahw.board_type != NETXEN_BRDTYPE_P3_10G_TP)
1263 return;
1264
1265 if (root->pcie_type != PCI_EXP_TYPE_ROOT_PORT)
1266 return;
1267
1268 aer_pos = pci_find_ext_capability(root, PCI_EXT_CAP_ID_ERR);
1269 if (!aer_pos)
1270 return;
1271
1272 pci_write_config_dword(root, aer_pos + PCI_ERR_COR_MASK, 0xffff);
1273}
1274#endif
1275
1257static int __devinit 1276static int __devinit
1258netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 1277netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1259{ 1278{
@@ -1322,6 +1341,10 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1322 goto err_out_iounmap; 1341 goto err_out_iounmap;
1323 } 1342 }
1324 1343
1344#ifdef CONFIG_PCIEAER
1345 netxen_mask_aer_correctable(adapter);
1346#endif
1347
1325 /* Mezz cards have PCI function 0,2,3 enabled */ 1348 /* Mezz cards have PCI function 0,2,3 enabled */
1326 switch (adapter->ahw.board_type) { 1349 switch (adapter->ahw.board_type) {
1327 case NETXEN_BRDTYPE_P2_SB31_10G_IMEZ: 1350 case NETXEN_BRDTYPE_P2_SB31_10G_IMEZ:
@@ -1825,9 +1848,13 @@ netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1825 /* 4 fragments per cmd des */ 1848 /* 4 fragments per cmd des */
1826 no_of_desc = (frag_count + 3) >> 2; 1849 no_of_desc = (frag_count + 3) >> 2;
1827 1850
1828 if (unlikely(no_of_desc + 2 > netxen_tx_avail(tx_ring))) { 1851 if (unlikely(netxen_tx_avail(tx_ring) <= TX_STOP_THRESH)) {
1829 netif_stop_queue(netdev); 1852 netif_stop_queue(netdev);
1830 return NETDEV_TX_BUSY; 1853 smp_mb();
1854 if (netxen_tx_avail(tx_ring) > TX_STOP_THRESH)
1855 netif_start_queue(netdev);
1856 else
1857 return NETDEV_TX_BUSY;
1831 } 1858 }
1832 1859
1833 producer = tx_ring->producer; 1860 producer = tx_ring->producer;
@@ -2027,7 +2054,7 @@ request_reset:
2027 clear_bit(__NX_RESETTING, &adapter->state); 2054 clear_bit(__NX_RESETTING, &adapter->state);
2028} 2055}
2029 2056
2030struct net_device_stats *netxen_nic_get_stats(struct net_device *netdev) 2057static struct net_device_stats *netxen_nic_get_stats(struct net_device *netdev)
2031{ 2058{
2032 struct netxen_adapter *adapter = netdev_priv(netdev); 2059 struct netxen_adapter *adapter = netdev_priv(netdev);
2033 struct net_device_stats *stats = &netdev->stats; 2060 struct net_device_stats *stats = &netdev->stats;
diff --git a/drivers/net/niu.c b/drivers/net/niu.c
index 8e1859c801a4..781e368329f9 100644
--- a/drivers/net/niu.c
+++ b/drivers/net/niu.c
@@ -283,7 +283,7 @@ static int niu_enable_interrupts(struct niu *np, int on)
283 283
284static u32 phy_encode(u32 type, int port) 284static u32 phy_encode(u32 type, int port)
285{ 285{
286 return (type << (port * 2)); 286 return type << (port * 2);
287} 287}
288 288
289static u32 phy_decode(u32 val, int port) 289static u32 phy_decode(u32 val, int port)
@@ -3043,8 +3043,7 @@ static int tcam_flush_all(struct niu *np)
3043 3043
3044static u64 hash_addr_regval(unsigned long index, unsigned long num_entries) 3044static u64 hash_addr_regval(unsigned long index, unsigned long num_entries)
3045{ 3045{
3046 return ((u64)index | (num_entries == 1 ? 3046 return (u64)index | (num_entries == 1 ? HASH_TBL_ADDR_AUTOINC : 0);
3047 HASH_TBL_ADDR_AUTOINC : 0));
3048} 3047}
3049 3048
3050#if 0 3049#if 0
@@ -3276,7 +3275,7 @@ static u16 tcam_get_index(struct niu *np, u16 idx)
3276 /* One entry reserved for IP fragment rule */ 3275 /* One entry reserved for IP fragment rule */
3277 if (idx >= (np->clas.tcam_sz - 1)) 3276 if (idx >= (np->clas.tcam_sz - 1))
3278 idx = 0; 3277 idx = 0;
3279 return (np->clas.tcam_top + ((idx+1) * np->parent->num_ports)); 3278 return np->clas.tcam_top + ((idx+1) * np->parent->num_ports);
3280} 3279}
3281 3280
3282static u16 tcam_get_size(struct niu *np) 3281static u16 tcam_get_size(struct niu *np)
@@ -3313,7 +3312,7 @@ static unsigned int niu_hash_rxaddr(struct rx_ring_info *rp, u64 a)
3313 a >>= PAGE_SHIFT; 3312 a >>= PAGE_SHIFT;
3314 a ^= (a >> ilog2(MAX_RBR_RING_SIZE)); 3313 a ^= (a >> ilog2(MAX_RBR_RING_SIZE));
3315 3314
3316 return (a & (MAX_RBR_RING_SIZE - 1)); 3315 return a & (MAX_RBR_RING_SIZE - 1);
3317} 3316}
3318 3317
3319static struct page *niu_find_rxpage(struct rx_ring_info *rp, u64 addr, 3318static struct page *niu_find_rxpage(struct rx_ring_info *rp, u64 addr,
@@ -4502,7 +4501,8 @@ static int niu_alloc_channels(struct niu *np)
4502 np->num_rx_rings = parent->rxchan_per_port[port]; 4501 np->num_rx_rings = parent->rxchan_per_port[port];
4503 np->num_tx_rings = parent->txchan_per_port[port]; 4502 np->num_tx_rings = parent->txchan_per_port[port];
4504 4503
4505 np->dev->real_num_tx_queues = np->num_tx_rings; 4504 netif_set_real_num_rx_queues(np->dev, np->num_rx_rings);
4505 netif_set_real_num_tx_queues(np->dev, np->num_tx_rings);
4506 4506
4507 np->rx_rings = kcalloc(np->num_rx_rings, sizeof(struct rx_ring_info), 4507 np->rx_rings = kcalloc(np->num_rx_rings, sizeof(struct rx_ring_info),
4508 GFP_KERNEL); 4508 GFP_KERNEL);
@@ -7090,24 +7090,20 @@ static int niu_get_hash_opts(struct niu *np, struct ethtool_rxnfc *nfc)
7090static void niu_get_ip4fs_from_tcam_key(struct niu_tcam_entry *tp, 7090static void niu_get_ip4fs_from_tcam_key(struct niu_tcam_entry *tp,
7091 struct ethtool_rx_flow_spec *fsp) 7091 struct ethtool_rx_flow_spec *fsp)
7092{ 7092{
7093 u32 tmp;
7094 u16 prt;
7093 7095
7094 fsp->h_u.tcp_ip4_spec.ip4src = (tp->key[3] & TCAM_V4KEY3_SADDR) >> 7096 tmp = (tp->key[3] & TCAM_V4KEY3_SADDR) >> TCAM_V4KEY3_SADDR_SHIFT;
7095 TCAM_V4KEY3_SADDR_SHIFT; 7097 fsp->h_u.tcp_ip4_spec.ip4src = cpu_to_be32(tmp);
7096 fsp->h_u.tcp_ip4_spec.ip4dst = (tp->key[3] & TCAM_V4KEY3_DADDR) >> 7098
7097 TCAM_V4KEY3_DADDR_SHIFT; 7099 tmp = (tp->key[3] & TCAM_V4KEY3_DADDR) >> TCAM_V4KEY3_DADDR_SHIFT;
7098 fsp->m_u.tcp_ip4_spec.ip4src = (tp->key_mask[3] & TCAM_V4KEY3_SADDR) >> 7100 fsp->h_u.tcp_ip4_spec.ip4dst = cpu_to_be32(tmp);
7099 TCAM_V4KEY3_SADDR_SHIFT; 7101
7100 fsp->m_u.tcp_ip4_spec.ip4dst = (tp->key_mask[3] & TCAM_V4KEY3_DADDR) >> 7102 tmp = (tp->key_mask[3] & TCAM_V4KEY3_SADDR) >> TCAM_V4KEY3_SADDR_SHIFT;
7101 TCAM_V4KEY3_DADDR_SHIFT; 7103 fsp->m_u.tcp_ip4_spec.ip4src = cpu_to_be32(tmp);
7102 7104
7103 fsp->h_u.tcp_ip4_spec.ip4src = 7105 tmp = (tp->key_mask[3] & TCAM_V4KEY3_DADDR) >> TCAM_V4KEY3_DADDR_SHIFT;
7104 cpu_to_be32(fsp->h_u.tcp_ip4_spec.ip4src); 7106 fsp->m_u.tcp_ip4_spec.ip4dst = cpu_to_be32(tmp);
7105 fsp->m_u.tcp_ip4_spec.ip4src =
7106 cpu_to_be32(fsp->m_u.tcp_ip4_spec.ip4src);
7107 fsp->h_u.tcp_ip4_spec.ip4dst =
7108 cpu_to_be32(fsp->h_u.tcp_ip4_spec.ip4dst);
7109 fsp->m_u.tcp_ip4_spec.ip4dst =
7110 cpu_to_be32(fsp->m_u.tcp_ip4_spec.ip4dst);
7111 7107
7112 fsp->h_u.tcp_ip4_spec.tos = (tp->key[2] & TCAM_V4KEY2_TOS) >> 7108 fsp->h_u.tcp_ip4_spec.tos = (tp->key[2] & TCAM_V4KEY2_TOS) >>
7113 TCAM_V4KEY2_TOS_SHIFT; 7109 TCAM_V4KEY2_TOS_SHIFT;
@@ -7118,54 +7114,40 @@ static void niu_get_ip4fs_from_tcam_key(struct niu_tcam_entry *tp,
7118 case TCP_V4_FLOW: 7114 case TCP_V4_FLOW:
7119 case UDP_V4_FLOW: 7115 case UDP_V4_FLOW:
7120 case SCTP_V4_FLOW: 7116 case SCTP_V4_FLOW:
7121 fsp->h_u.tcp_ip4_spec.psrc = 7117 prt = ((tp->key[2] & TCAM_V4KEY2_PORT_SPI) >>
7122 ((tp->key[2] & TCAM_V4KEY2_PORT_SPI) >> 7118 TCAM_V4KEY2_PORT_SPI_SHIFT) >> 16;
7123 TCAM_V4KEY2_PORT_SPI_SHIFT) >> 16; 7119 fsp->h_u.tcp_ip4_spec.psrc = cpu_to_be16(prt);
7124 fsp->h_u.tcp_ip4_spec.pdst =
7125 ((tp->key[2] & TCAM_V4KEY2_PORT_SPI) >>
7126 TCAM_V4KEY2_PORT_SPI_SHIFT) & 0xffff;
7127 fsp->m_u.tcp_ip4_spec.psrc =
7128 ((tp->key_mask[2] & TCAM_V4KEY2_PORT_SPI) >>
7129 TCAM_V4KEY2_PORT_SPI_SHIFT) >> 16;
7130 fsp->m_u.tcp_ip4_spec.pdst =
7131 ((tp->key_mask[2] & TCAM_V4KEY2_PORT_SPI) >>
7132 TCAM_V4KEY2_PORT_SPI_SHIFT) & 0xffff;
7133 7120
7134 fsp->h_u.tcp_ip4_spec.psrc = 7121 prt = ((tp->key[2] & TCAM_V4KEY2_PORT_SPI) >>
7135 cpu_to_be16(fsp->h_u.tcp_ip4_spec.psrc); 7122 TCAM_V4KEY2_PORT_SPI_SHIFT) & 0xffff;
7136 fsp->h_u.tcp_ip4_spec.pdst = 7123 fsp->h_u.tcp_ip4_spec.pdst = cpu_to_be16(prt);
7137 cpu_to_be16(fsp->h_u.tcp_ip4_spec.pdst); 7124
7138 fsp->m_u.tcp_ip4_spec.psrc = 7125 prt = ((tp->key_mask[2] & TCAM_V4KEY2_PORT_SPI) >>
7139 cpu_to_be16(fsp->m_u.tcp_ip4_spec.psrc); 7126 TCAM_V4KEY2_PORT_SPI_SHIFT) >> 16;
7140 fsp->m_u.tcp_ip4_spec.pdst = 7127 fsp->m_u.tcp_ip4_spec.psrc = cpu_to_be16(prt);
7141 cpu_to_be16(fsp->m_u.tcp_ip4_spec.pdst); 7128
7129 prt = ((tp->key_mask[2] & TCAM_V4KEY2_PORT_SPI) >>
7130 TCAM_V4KEY2_PORT_SPI_SHIFT) & 0xffff;
7131 fsp->m_u.tcp_ip4_spec.pdst = cpu_to_be16(prt);
7142 break; 7132 break;
7143 case AH_V4_FLOW: 7133 case AH_V4_FLOW:
7144 case ESP_V4_FLOW: 7134 case ESP_V4_FLOW:
7145 fsp->h_u.ah_ip4_spec.spi = 7135 tmp = (tp->key[2] & TCAM_V4KEY2_PORT_SPI) >>
7146 (tp->key[2] & TCAM_V4KEY2_PORT_SPI) >>
7147 TCAM_V4KEY2_PORT_SPI_SHIFT;
7148 fsp->m_u.ah_ip4_spec.spi =
7149 (tp->key_mask[2] & TCAM_V4KEY2_PORT_SPI) >>
7150 TCAM_V4KEY2_PORT_SPI_SHIFT; 7136 TCAM_V4KEY2_PORT_SPI_SHIFT;
7137 fsp->h_u.ah_ip4_spec.spi = cpu_to_be32(tmp);
7151 7138
7152 fsp->h_u.ah_ip4_spec.spi = 7139 tmp = (tp->key_mask[2] & TCAM_V4KEY2_PORT_SPI) >>
7153 cpu_to_be32(fsp->h_u.ah_ip4_spec.spi); 7140 TCAM_V4KEY2_PORT_SPI_SHIFT;
7154 fsp->m_u.ah_ip4_spec.spi = 7141 fsp->m_u.ah_ip4_spec.spi = cpu_to_be32(tmp);
7155 cpu_to_be32(fsp->m_u.ah_ip4_spec.spi);
7156 break; 7142 break;
7157 case IP_USER_FLOW: 7143 case IP_USER_FLOW:
7158 fsp->h_u.usr_ip4_spec.l4_4_bytes = 7144 tmp = (tp->key[2] & TCAM_V4KEY2_PORT_SPI) >>
7159 (tp->key[2] & TCAM_V4KEY2_PORT_SPI) >>
7160 TCAM_V4KEY2_PORT_SPI_SHIFT;
7161 fsp->m_u.usr_ip4_spec.l4_4_bytes =
7162 (tp->key_mask[2] & TCAM_V4KEY2_PORT_SPI) >>
7163 TCAM_V4KEY2_PORT_SPI_SHIFT; 7145 TCAM_V4KEY2_PORT_SPI_SHIFT;
7146 fsp->h_u.usr_ip4_spec.l4_4_bytes = cpu_to_be32(tmp);
7164 7147
7165 fsp->h_u.usr_ip4_spec.l4_4_bytes = 7148 tmp = (tp->key_mask[2] & TCAM_V4KEY2_PORT_SPI) >>
7166 cpu_to_be32(fsp->h_u.usr_ip4_spec.l4_4_bytes); 7149 TCAM_V4KEY2_PORT_SPI_SHIFT;
7167 fsp->m_u.usr_ip4_spec.l4_4_bytes = 7150 fsp->m_u.usr_ip4_spec.l4_4_bytes = cpu_to_be32(tmp);
7168 cpu_to_be32(fsp->m_u.usr_ip4_spec.l4_4_bytes);
7169 7151
7170 fsp->h_u.usr_ip4_spec.proto = 7152 fsp->h_u.usr_ip4_spec.proto =
7171 (tp->key[2] & TCAM_V4KEY2_PROTO) >> 7153 (tp->key[2] & TCAM_V4KEY2_PROTO) >>
@@ -7462,10 +7444,12 @@ static int niu_add_ethtool_tcam_entry(struct niu *np,
7462 if (fsp->flow_type == IP_USER_FLOW) { 7444 if (fsp->flow_type == IP_USER_FLOW) {
7463 int i; 7445 int i;
7464 int add_usr_cls = 0; 7446 int add_usr_cls = 0;
7465 int ipv6 = 0;
7466 struct ethtool_usrip4_spec *uspec = &fsp->h_u.usr_ip4_spec; 7447 struct ethtool_usrip4_spec *uspec = &fsp->h_u.usr_ip4_spec;
7467 struct ethtool_usrip4_spec *umask = &fsp->m_u.usr_ip4_spec; 7448 struct ethtool_usrip4_spec *umask = &fsp->m_u.usr_ip4_spec;
7468 7449
7450 if (uspec->ip_ver != ETH_RX_NFC_IP4)
7451 return -EINVAL;
7452
7469 niu_lock_parent(np, flags); 7453 niu_lock_parent(np, flags);
7470 7454
7471 for (i = 0; i < NIU_L3_PROG_CLS; i++) { 7455 for (i = 0; i < NIU_L3_PROG_CLS; i++) {
@@ -7494,9 +7478,7 @@ static int niu_add_ethtool_tcam_entry(struct niu *np,
7494 default: 7478 default:
7495 break; 7479 break;
7496 } 7480 }
7497 if (uspec->ip_ver == ETH_RX_NFC_IP6) 7481 ret = tcam_user_ip_class_set(np, class, 0,
7498 ipv6 = 1;
7499 ret = tcam_user_ip_class_set(np, class, ipv6,
7500 uspec->proto, 7482 uspec->proto,
7501 uspec->tos, 7483 uspec->tos,
7502 umask->tos); 7484 umask->tos);
@@ -7553,16 +7535,7 @@ static int niu_add_ethtool_tcam_entry(struct niu *np,
7553 ret = -EINVAL; 7535 ret = -EINVAL;
7554 goto out; 7536 goto out;
7555 case IP_USER_FLOW: 7537 case IP_USER_FLOW:
7556 if (fsp->h_u.usr_ip4_spec.ip_ver == ETH_RX_NFC_IP4) { 7538 niu_get_tcamkey_from_ip4fs(fsp, tp, l2_rdc_table, class);
7557 niu_get_tcamkey_from_ip4fs(fsp, tp, l2_rdc_table,
7558 class);
7559 } else {
7560 /* Not yet implemented */
7561 netdev_info(np->dev, "niu%d: In %s(): usr flow for IPv6 not implemented\n",
7562 parent->index, __func__);
7563 ret = -EINVAL;
7564 goto out;
7565 }
7566 break; 7539 break;
7567 default: 7540 default:
7568 netdev_info(np->dev, "niu%d: In %s(): Unknown flow type %d\n", 7541 netdev_info(np->dev, "niu%d: In %s(): Unknown flow type %d\n",
@@ -7805,11 +7778,11 @@ static int niu_get_sset_count(struct net_device *dev, int stringset)
7805 if (stringset != ETH_SS_STATS) 7778 if (stringset != ETH_SS_STATS)
7806 return -EINVAL; 7779 return -EINVAL;
7807 7780
7808 return ((np->flags & NIU_FLAGS_XMAC ? 7781 return (np->flags & NIU_FLAGS_XMAC ?
7809 NUM_XMAC_STAT_KEYS : 7782 NUM_XMAC_STAT_KEYS :
7810 NUM_BMAC_STAT_KEYS) + 7783 NUM_BMAC_STAT_KEYS) +
7811 (np->num_rx_rings * NUM_RXCHAN_STAT_KEYS) + 7784 (np->num_rx_rings * NUM_RXCHAN_STAT_KEYS) +
7812 (np->num_tx_rings * NUM_TXCHAN_STAT_KEYS)); 7785 (np->num_tx_rings * NUM_TXCHAN_STAT_KEYS);
7813} 7786}
7814 7787
7815static void niu_get_ethtool_stats(struct net_device *dev, 7788static void niu_get_ethtool_stats(struct net_device *dev,
diff --git a/drivers/net/ns83820.c b/drivers/net/ns83820.c
index 3bbd0aab17e8..84134c766f3a 100644
--- a/drivers/net/ns83820.c
+++ b/drivers/net/ns83820.c
@@ -772,7 +772,7 @@ static int ns83820_setup_rx(struct net_device *ndev)
772 phy_intr(ndev); 772 phy_intr(ndev);
773 773
774 /* Okay, let it rip */ 774 /* Okay, let it rip */
775 spin_lock_irq(&dev->misc_lock); 775 spin_lock(&dev->misc_lock);
776 dev->IMR_cache |= ISR_PHY; 776 dev->IMR_cache |= ISR_PHY;
777 dev->IMR_cache |= ISR_RXRCMP; 777 dev->IMR_cache |= ISR_RXRCMP;
778 //dev->IMR_cache |= ISR_RXERR; 778 //dev->IMR_cache |= ISR_RXERR;
diff --git a/drivers/net/pch_gbe/Makefile b/drivers/net/pch_gbe/Makefile
new file mode 100644
index 000000000000..31288d4ad248
--- /dev/null
+++ b/drivers/net/pch_gbe/Makefile
@@ -0,0 +1,4 @@
1obj-$(CONFIG_PCH_GBE) += pch_gbe.o
2
3pch_gbe-y := pch_gbe_phy.o pch_gbe_ethtool.o pch_gbe_param.o
4pch_gbe-y += pch_gbe_api.o pch_gbe_main.o
diff --git a/drivers/net/pch_gbe/pch_gbe.h b/drivers/net/pch_gbe/pch_gbe.h
new file mode 100644
index 000000000000..a0c26a99520f
--- /dev/null
+++ b/drivers/net/pch_gbe/pch_gbe.h
@@ -0,0 +1,659 @@
1/*
2 * Copyright (C) 1999 - 2010 Intel Corporation.
3 * Copyright (C) 2010 OKI SEMICONDUCTOR Co., LTD.
4 *
5 * This code was derived from the Intel e1000e Linux driver.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; version 2 of the License.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
19 */
20
21#ifndef _PCH_GBE_H_
22#define _PCH_GBE_H_
23
24#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
25
26#include <linux/mii.h>
27#include <linux/delay.h>
28#include <linux/pci.h>
29#include <linux/netdevice.h>
30#include <linux/etherdevice.h>
31#include <linux/ethtool.h>
32#include <linux/vmalloc.h>
33#include <net/ip.h>
34#include <net/tcp.h>
35#include <net/udp.h>
36
37/**
38 * pch_gbe_regs_mac_adr - Structure holding values of mac address registers
39 * @high Denotes the 1st to 4th byte from the initial of MAC address
40 * @low Denotes the 5th to 6th byte from the initial of MAC address
41 */
42struct pch_gbe_regs_mac_adr {
43 u32 high;
44 u32 low;
45};
46/**
47 * pch_udc_regs - Structure holding values of MAC registers
48 */
49struct pch_gbe_regs {
50 u32 INT_ST;
51 u32 INT_EN;
52 u32 MODE;
53 u32 RESET;
54 u32 TCPIP_ACC;
55 u32 EX_LIST;
56 u32 INT_ST_HOLD;
57 u32 PHY_INT_CTRL;
58 u32 MAC_RX_EN;
59 u32 RX_FCTRL;
60 u32 PAUSE_REQ;
61 u32 RX_MODE;
62 u32 TX_MODE;
63 u32 RX_FIFO_ST;
64 u32 TX_FIFO_ST;
65 u32 TX_FID;
66 u32 TX_RESULT;
67 u32 PAUSE_PKT1;
68 u32 PAUSE_PKT2;
69 u32 PAUSE_PKT3;
70 u32 PAUSE_PKT4;
71 u32 PAUSE_PKT5;
72 u32 reserve[2];
73 struct pch_gbe_regs_mac_adr mac_adr[16];
74 u32 ADDR_MASK;
75 u32 MIIM;
76 u32 reserve2;
77 u32 RGMII_ST;
78 u32 RGMII_CTRL;
79 u32 reserve3[3];
80 u32 DMA_CTRL;
81 u32 reserve4[3];
82 u32 RX_DSC_BASE;
83 u32 RX_DSC_SIZE;
84 u32 RX_DSC_HW_P;
85 u32 RX_DSC_HW_P_HLD;
86 u32 RX_DSC_SW_P;
87 u32 reserve5[3];
88 u32 TX_DSC_BASE;
89 u32 TX_DSC_SIZE;
90 u32 TX_DSC_HW_P;
91 u32 TX_DSC_HW_P_HLD;
92 u32 TX_DSC_SW_P;
93 u32 reserve6[3];
94 u32 RX_DMA_ST;
95 u32 TX_DMA_ST;
96 u32 reserve7[2];
97 u32 WOL_ST;
98 u32 WOL_CTRL;
99 u32 WOL_ADDR_MASK;
100};
101
102/* Interrupt Status */
103/* Interrupt Status Hold */
104/* Interrupt Enable */
105#define PCH_GBE_INT_RX_DMA_CMPLT 0x00000001 /* Receive DMA Transfer Complete */
106#define PCH_GBE_INT_RX_VALID 0x00000002 /* MAC Normal Receive Complete */
107#define PCH_GBE_INT_RX_FRAME_ERR 0x00000004 /* Receive frame error */
108#define PCH_GBE_INT_RX_FIFO_ERR 0x00000008 /* Receive FIFO Overflow */
109#define PCH_GBE_INT_RX_DMA_ERR 0x00000010 /* Receive DMA Transfer Error */
110#define PCH_GBE_INT_RX_DSC_EMP 0x00000020 /* Receive Descriptor Empty */
111#define PCH_GBE_INT_TX_CMPLT 0x00000100 /* MAC Transmission Complete */
112#define PCH_GBE_INT_TX_DMA_CMPLT 0x00000200 /* DMA Transfer Complete */
113#define PCH_GBE_INT_TX_FIFO_ERR 0x00000400 /* Transmission FIFO underflow. */
114#define PCH_GBE_INT_TX_DMA_ERR 0x00000800 /* Transmission DMA Error */
115#define PCH_GBE_INT_PAUSE_CMPLT 0x00001000 /* Pause Transmission complete */
116#define PCH_GBE_INT_MIIM_CMPLT 0x00010000 /* MIIM I/F Read completion */
117#define PCH_GBE_INT_PHY_INT 0x00100000 /* Interruption from PHY */
118#define PCH_GBE_INT_WOL_DET 0x01000000 /* Wake On LAN Event detection. */
119#define PCH_GBE_INT_TCPIP_ERR 0x10000000 /* TCP/IP Accelerator Error */
120
121/* Mode */
122#define PCH_GBE_MODE_MII_ETHER 0x00000000 /* GIGA Ethernet Mode [MII] */
123#define PCH_GBE_MODE_GMII_ETHER 0x80000000 /* GIGA Ethernet Mode [GMII] */
124#define PCH_GBE_MODE_HALF_DUPLEX 0x00000000 /* Duplex Mode [half duplex] */
125#define PCH_GBE_MODE_FULL_DUPLEX 0x40000000 /* Duplex Mode [full duplex] */
126#define PCH_GBE_MODE_FR_BST 0x04000000 /* Frame bursting is done */
127
128/* Reset */
129#define PCH_GBE_ALL_RST 0x80000000 /* All reset */
130#define PCH_GBE_TX_RST 0x40000000 /* TX MAC, TX FIFO, TX DMA reset */
131#define PCH_GBE_RX_RST 0x04000000 /* RX MAC, RX FIFO, RX DMA reset */
132
133/* TCP/IP Accelerator Control */
134#define PCH_GBE_EX_LIST_EN 0x00000008 /* External List Enable */
135#define PCH_GBE_RX_TCPIPACC_OFF 0x00000004 /* RX TCP/IP ACC Disabled */
136#define PCH_GBE_TX_TCPIPACC_EN 0x00000002 /* TX TCP/IP ACC Enable */
137#define PCH_GBE_RX_TCPIPACC_EN 0x00000001 /* RX TCP/IP ACC Enable */
138
139/* MAC RX Enable */
140#define PCH_GBE_MRE_MAC_RX_EN 0x00000001 /* MAC Receive Enable */
141
142/* RX Flow Control */
143#define PCH_GBE_FL_CTRL_EN 0x80000000 /* Pause packet is enabled */
144
145/* Pause Packet Request */
146#define PCH_GBE_PS_PKT_RQ 0x80000000 /* Pause packet Request */
147
148/* RX Mode */
149#define PCH_GBE_ADD_FIL_EN 0x80000000 /* Address Filtering Enable */
150/* Multicast Filtering Enable */
151#define PCH_GBE_MLT_FIL_EN 0x40000000
152/* Receive Almost Empty Threshold */
153#define PCH_GBE_RH_ALM_EMP_4 0x00000000 /* 4 words */
154#define PCH_GBE_RH_ALM_EMP_8 0x00004000 /* 8 words */
155#define PCH_GBE_RH_ALM_EMP_16 0x00008000 /* 16 words */
156#define PCH_GBE_RH_ALM_EMP_32 0x0000C000 /* 32 words */
157/* Receive Almost Full Threshold */
158#define PCH_GBE_RH_ALM_FULL_4 0x00000000 /* 4 words */
159#define PCH_GBE_RH_ALM_FULL_8 0x00001000 /* 8 words */
160#define PCH_GBE_RH_ALM_FULL_16 0x00002000 /* 16 words */
161#define PCH_GBE_RH_ALM_FULL_32 0x00003000 /* 32 words */
162/* RX FIFO Read Triger Threshold */
163#define PCH_GBE_RH_RD_TRG_4 0x00000000 /* 4 words */
164#define PCH_GBE_RH_RD_TRG_8 0x00000200 /* 8 words */
165#define PCH_GBE_RH_RD_TRG_16 0x00000400 /* 16 words */
166#define PCH_GBE_RH_RD_TRG_32 0x00000600 /* 32 words */
167#define PCH_GBE_RH_RD_TRG_64 0x00000800 /* 64 words */
168#define PCH_GBE_RH_RD_TRG_128 0x00000A00 /* 128 words */
169#define PCH_GBE_RH_RD_TRG_256 0x00000C00 /* 256 words */
170#define PCH_GBE_RH_RD_TRG_512 0x00000E00 /* 512 words */
171
172/* Receive Descriptor bit definitions */
173#define PCH_GBE_RXD_ACC_STAT_BCAST 0x00000400
174#define PCH_GBE_RXD_ACC_STAT_MCAST 0x00000200
175#define PCH_GBE_RXD_ACC_STAT_UCAST 0x00000100
176#define PCH_GBE_RXD_ACC_STAT_TCPIPOK 0x000000C0
177#define PCH_GBE_RXD_ACC_STAT_IPOK 0x00000080
178#define PCH_GBE_RXD_ACC_STAT_TCPOK 0x00000040
179#define PCH_GBE_RXD_ACC_STAT_IP6ERR 0x00000020
180#define PCH_GBE_RXD_ACC_STAT_OFLIST 0x00000010
181#define PCH_GBE_RXD_ACC_STAT_TYPEIP 0x00000008
182#define PCH_GBE_RXD_ACC_STAT_MACL 0x00000004
183#define PCH_GBE_RXD_ACC_STAT_PPPOE 0x00000002
184#define PCH_GBE_RXD_ACC_STAT_VTAGT 0x00000001
185#define PCH_GBE_RXD_GMAC_STAT_PAUSE 0x0200
186#define PCH_GBE_RXD_GMAC_STAT_MARBR 0x0100
187#define PCH_GBE_RXD_GMAC_STAT_MARMLT 0x0080
188#define PCH_GBE_RXD_GMAC_STAT_MARIND 0x0040
189#define PCH_GBE_RXD_GMAC_STAT_MARNOTMT 0x0020
190#define PCH_GBE_RXD_GMAC_STAT_TLONG 0x0010
191#define PCH_GBE_RXD_GMAC_STAT_TSHRT 0x0008
192#define PCH_GBE_RXD_GMAC_STAT_NOTOCTAL 0x0004
193#define PCH_GBE_RXD_GMAC_STAT_NBLERR 0x0002
194#define PCH_GBE_RXD_GMAC_STAT_CRCERR 0x0001
195
196/* Transmit Descriptor bit definitions */
197#define PCH_GBE_TXD_CTRL_TCPIP_ACC_OFF 0x0008
198#define PCH_GBE_TXD_CTRL_ITAG 0x0004
199#define PCH_GBE_TXD_CTRL_ICRC 0x0002
200#define PCH_GBE_TXD_CTRL_APAD 0x0001
201#define PCH_GBE_TXD_WORDS_SHIFT 2
202#define PCH_GBE_TXD_GMAC_STAT_CMPLT 0x2000
203#define PCH_GBE_TXD_GMAC_STAT_ABT 0x1000
204#define PCH_GBE_TXD_GMAC_STAT_EXCOL 0x0800
205#define PCH_GBE_TXD_GMAC_STAT_SNGCOL 0x0400
206#define PCH_GBE_TXD_GMAC_STAT_MLTCOL 0x0200
207#define PCH_GBE_TXD_GMAC_STAT_CRSER 0x0100
208#define PCH_GBE_TXD_GMAC_STAT_TLNG 0x0080
209#define PCH_GBE_TXD_GMAC_STAT_TSHRT 0x0040
210#define PCH_GBE_TXD_GMAC_STAT_LTCOL 0x0020
211#define PCH_GBE_TXD_GMAC_STAT_TFUNDFLW 0x0010
212#define PCH_GBE_TXD_GMAC_STAT_RTYCNT_MASK 0x000F
213
214/* TX Mode */
215#define PCH_GBE_TM_NO_RTRY 0x80000000 /* No Retransmission */
216#define PCH_GBE_TM_LONG_PKT 0x40000000 /* Long Packt TX Enable */
217#define PCH_GBE_TM_ST_AND_FD 0x20000000 /* Stare and Forward */
218#define PCH_GBE_TM_SHORT_PKT 0x10000000 /* Short Packet TX Enable */
219#define PCH_GBE_TM_LTCOL_RETX 0x08000000 /* Retransmission at Late Collision */
220/* Frame Start Threshold */
221#define PCH_GBE_TM_TH_TX_STRT_4 0x00000000 /* 4 words */
222#define PCH_GBE_TM_TH_TX_STRT_8 0x00004000 /* 8 words */
223#define PCH_GBE_TM_TH_TX_STRT_16 0x00008000 /* 16 words */
224#define PCH_GBE_TM_TH_TX_STRT_32 0x0000C000 /* 32 words */
225/* Transmit Almost Empty Threshold */
226#define PCH_GBE_TM_TH_ALM_EMP_4 0x00000000 /* 4 words */
227#define PCH_GBE_TM_TH_ALM_EMP_8 0x00000800 /* 8 words */
228#define PCH_GBE_TM_TH_ALM_EMP_16 0x00001000 /* 16 words */
229#define PCH_GBE_TM_TH_ALM_EMP_32 0x00001800 /* 32 words */
230#define PCH_GBE_TM_TH_ALM_EMP_64 0x00002000 /* 64 words */
231#define PCH_GBE_TM_TH_ALM_EMP_128 0x00002800 /* 128 words */
232#define PCH_GBE_TM_TH_ALM_EMP_256 0x00003000 /* 256 words */
233#define PCH_GBE_TM_TH_ALM_EMP_512 0x00003800 /* 512 words */
234/* Transmit Almost Full Threshold */
235#define PCH_GBE_TM_TH_ALM_FULL_4 0x00000000 /* 4 words */
236#define PCH_GBE_TM_TH_ALM_FULL_8 0x00000200 /* 8 words */
237#define PCH_GBE_TM_TH_ALM_FULL_16 0x00000400 /* 16 words */
238#define PCH_GBE_TM_TH_ALM_FULL_32 0x00000600 /* 32 words */
239
240/* RX FIFO Status */
241#define PCH_GBE_RF_ALM_FULL 0x80000000 /* RX FIFO is almost full. */
242#define PCH_GBE_RF_ALM_EMP 0x40000000 /* RX FIFO is almost empty. */
243#define PCH_GBE_RF_RD_TRG 0x20000000 /* Become more than RH_RD_TRG. */
244#define PCH_GBE_RF_STRWD 0x1FFE0000 /* The word count of RX FIFO. */
245#define PCH_GBE_RF_RCVING 0x00010000 /* Stored in RX FIFO. */
246
247/* MAC Address Mask */
248#define PCH_GBE_BUSY 0x80000000
249
250/* MIIM */
251#define PCH_GBE_MIIM_OPER_WRITE 0x04000000
252#define PCH_GBE_MIIM_OPER_READ 0x00000000
253#define PCH_GBE_MIIM_OPER_READY 0x04000000
254#define PCH_GBE_MIIM_PHY_ADDR_SHIFT 21
255#define PCH_GBE_MIIM_REG_ADDR_SHIFT 16
256
257/* RGMII Status */
258#define PCH_GBE_LINK_UP 0x80000008
259#define PCH_GBE_RXC_SPEED_MSK 0x00000006
260#define PCH_GBE_RXC_SPEED_2_5M 0x00000000 /* 2.5MHz */
261#define PCH_GBE_RXC_SPEED_25M 0x00000002 /* 25MHz */
262#define PCH_GBE_RXC_SPEED_125M 0x00000004 /* 100MHz */
263#define PCH_GBE_DUPLEX_FULL 0x00000001
264
265/* RGMII Control */
266#define PCH_GBE_CRS_SEL 0x00000010
267#define PCH_GBE_RGMII_RATE_125M 0x00000000
268#define PCH_GBE_RGMII_RATE_25M 0x00000008
269#define PCH_GBE_RGMII_RATE_2_5M 0x0000000C
270#define PCH_GBE_RGMII_MODE_GMII 0x00000000
271#define PCH_GBE_RGMII_MODE_RGMII 0x00000002
272#define PCH_GBE_CHIP_TYPE_EXTERNAL 0x00000000
273#define PCH_GBE_CHIP_TYPE_INTERNAL 0x00000001
274
275/* DMA Control */
276#define PCH_GBE_RX_DMA_EN 0x00000002 /* Enables Receive DMA */
277#define PCH_GBE_TX_DMA_EN 0x00000001 /* Enables Transmission DMA */
278
279/* Wake On LAN Status */
280#define PCH_GBE_WLS_BR 0x00000008 /* Broadcas Address */
281#define PCH_GBE_WLS_MLT 0x00000004 /* Multicast Address */
282
283/* The Frame registered in Address Recognizer */
284#define PCH_GBE_WLS_IND 0x00000002
285#define PCH_GBE_WLS_MP 0x00000001 /* Magic packet Address */
286
287/* Wake On LAN Control */
288#define PCH_GBE_WLC_WOL_MODE 0x00010000
289#define PCH_GBE_WLC_IGN_TLONG 0x00000100
290#define PCH_GBE_WLC_IGN_TSHRT 0x00000080
291#define PCH_GBE_WLC_IGN_OCTER 0x00000040
292#define PCH_GBE_WLC_IGN_NBLER 0x00000020
293#define PCH_GBE_WLC_IGN_CRCER 0x00000010
294#define PCH_GBE_WLC_BR 0x00000008
295#define PCH_GBE_WLC_MLT 0x00000004
296#define PCH_GBE_WLC_IND 0x00000002
297#define PCH_GBE_WLC_MP 0x00000001
298
299/* Wake On LAN Address Mask */
300#define PCH_GBE_WLA_BUSY 0x80000000
301
302
303
304/* TX/RX descriptor defines */
305#define PCH_GBE_MAX_TXD 4096
306#define PCH_GBE_DEFAULT_TXD 256
307#define PCH_GBE_MIN_TXD 8
308#define PCH_GBE_MAX_RXD 4096
309#define PCH_GBE_DEFAULT_RXD 256
310#define PCH_GBE_MIN_RXD 8
311
312/* Number of Transmit and Receive Descriptors must be a multiple of 8 */
313#define PCH_GBE_TX_DESC_MULTIPLE 8
314#define PCH_GBE_RX_DESC_MULTIPLE 8
315
316/* Read/Write operation is done through MII Management IF */
317#define PCH_GBE_HAL_MIIM_READ ((u32)0x00000000)
318#define PCH_GBE_HAL_MIIM_WRITE ((u32)0x04000000)
319
320/* flow control values */
321#define PCH_GBE_FC_NONE 0
322#define PCH_GBE_FC_RX_PAUSE 1
323#define PCH_GBE_FC_TX_PAUSE 2
324#define PCH_GBE_FC_FULL 3
325#define PCH_GBE_FC_DEFAULT PCH_GBE_FC_FULL
326
327
328struct pch_gbe_hw;
329/**
330 * struct pch_gbe_functions - HAL APi function pointer
331 * @get_bus_info: for pch_gbe_hal_get_bus_info
332 * @init_hw: for pch_gbe_hal_init_hw
333 * @read_phy_reg: for pch_gbe_hal_read_phy_reg
334 * @write_phy_reg: for pch_gbe_hal_write_phy_reg
335 * @reset_phy: for pch_gbe_hal_phy_hw_reset
336 * @sw_reset_phy: for pch_gbe_hal_phy_sw_reset
337 * @power_up_phy: for pch_gbe_hal_power_up_phy
338 * @power_down_phy: for pch_gbe_hal_power_down_phy
339 * @read_mac_addr: for pch_gbe_hal_read_mac_addr
340 */
341struct pch_gbe_functions {
342 void (*get_bus_info) (struct pch_gbe_hw *);
343 s32 (*init_hw) (struct pch_gbe_hw *);
344 s32 (*read_phy_reg) (struct pch_gbe_hw *, u32, u16 *);
345 s32 (*write_phy_reg) (struct pch_gbe_hw *, u32, u16);
346 void (*reset_phy) (struct pch_gbe_hw *);
347 void (*sw_reset_phy) (struct pch_gbe_hw *);
348 void (*power_up_phy) (struct pch_gbe_hw *hw);
349 void (*power_down_phy) (struct pch_gbe_hw *hw);
350 s32 (*read_mac_addr) (struct pch_gbe_hw *);
351};
352
353/**
354 * struct pch_gbe_mac_info - MAC infomation
355 * @addr[6]: Store the MAC address
356 * @fc: Mode of flow control
357 * @fc_autoneg: Auto negotiation enable for flow control setting
358 * @tx_fc_enable: Enable flag of Transmit flow control
359 * @max_frame_size: Max transmit frame size
360 * @min_frame_size: Min transmit frame size
361 * @autoneg: Auto negotiation enable
362 * @link_speed: Link speed
363 * @link_duplex: Link duplex
364 */
365struct pch_gbe_mac_info {
366 u8 addr[6];
367 u8 fc;
368 u8 fc_autoneg;
369 u8 tx_fc_enable;
370 u32 max_frame_size;
371 u32 min_frame_size;
372 u8 autoneg;
373 u16 link_speed;
374 u16 link_duplex;
375};
376
377/**
378 * struct pch_gbe_phy_info - PHY infomation
379 * @addr: PHY address
380 * @id: PHY's identifier
381 * @revision: PHY's revision
382 * @reset_delay_us: HW reset delay time[us]
383 * @autoneg_advertised: Autoneg advertised
384 */
385struct pch_gbe_phy_info {
386 u32 addr;
387 u32 id;
388 u32 revision;
389 u32 reset_delay_us;
390 u16 autoneg_advertised;
391};
392
393/*!
394 * @ingroup Gigabit Ether driver Layer
395 * @struct pch_gbe_bus_info
396 * @brief Bus infomation
397 */
398struct pch_gbe_bus_info {
399 u8 type;
400 u8 speed;
401 u8 width;
402};
403
404/*!
405 * @ingroup Gigabit Ether driver Layer
406 * @struct pch_gbe_hw
407 * @brief Hardware infomation
408 */
409struct pch_gbe_hw {
410 void *back;
411
412 struct pch_gbe_regs __iomem *reg;
413 spinlock_t miim_lock;
414
415 const struct pch_gbe_functions *func;
416 struct pch_gbe_mac_info mac;
417 struct pch_gbe_phy_info phy;
418 struct pch_gbe_bus_info bus;
419};
420
421/**
422 * struct pch_gbe_rx_desc - Receive Descriptor
423 * @buffer_addr: RX Frame Buffer Address
424 * @tcp_ip_status: TCP/IP Accelerator Status
425 * @rx_words_eob: RX word count and Byte position
426 * @gbec_status: GMAC Status
427 * @dma_status: DMA Status
428 * @reserved1: Reserved
429 * @reserved2: Reserved
430 */
431struct pch_gbe_rx_desc {
432 u32 buffer_addr;
433 u32 tcp_ip_status;
434 u16 rx_words_eob;
435 u16 gbec_status;
436 u8 dma_status;
437 u8 reserved1;
438 u16 reserved2;
439};
440
441/**
442 * struct pch_gbe_tx_desc - Transmit Descriptor
443 * @buffer_addr: TX Frame Buffer Address
444 * @length: Data buffer length
445 * @reserved1: Reserved
446 * @tx_words_eob: TX word count and Byte position
447 * @tx_frame_ctrl: TX Frame Control
448 * @dma_status: DMA Status
449 * @reserved2: Reserved
450 * @gbec_status: GMAC Status
451 */
452struct pch_gbe_tx_desc {
453 u32 buffer_addr;
454 u16 length;
455 u16 reserved1;
456 u16 tx_words_eob;
457 u16 tx_frame_ctrl;
458 u8 dma_status;
459 u8 reserved2;
460 u16 gbec_status;
461};
462
463
464/**
465 * struct pch_gbe_buffer - Buffer infomation
466 * @skb: pointer to a socket buffer
467 * @dma: DMA address
468 * @time_stamp: time stamp
469 * @length: data size
470 */
471struct pch_gbe_buffer {
472 struct sk_buff *skb;
473 dma_addr_t dma;
474 unsigned long time_stamp;
475 u16 length;
476 bool mapped;
477};
478
479/**
480 * struct pch_gbe_tx_ring - tx ring infomation
481 * @tx_lock: spinlock structs
482 * @desc: pointer to the descriptor ring memory
483 * @dma: physical address of the descriptor ring
484 * @size: length of descriptor ring in bytes
485 * @count: number of descriptors in the ring
486 * @next_to_use: next descriptor to associate a buffer with
487 * @next_to_clean: next descriptor to check for DD status bit
488 * @buffer_info: array of buffer information structs
489 */
490struct pch_gbe_tx_ring {
491 spinlock_t tx_lock;
492 struct pch_gbe_tx_desc *desc;
493 dma_addr_t dma;
494 unsigned int size;
495 unsigned int count;
496 unsigned int next_to_use;
497 unsigned int next_to_clean;
498 struct pch_gbe_buffer *buffer_info;
499};
500
501/**
502 * struct pch_gbe_rx_ring - rx ring infomation
503 * @desc: pointer to the descriptor ring memory
504 * @dma: physical address of the descriptor ring
505 * @size: length of descriptor ring in bytes
506 * @count: number of descriptors in the ring
507 * @next_to_use: next descriptor to associate a buffer with
508 * @next_to_clean: next descriptor to check for DD status bit
509 * @buffer_info: array of buffer information structs
510 */
511struct pch_gbe_rx_ring {
512 struct pch_gbe_rx_desc *desc;
513 dma_addr_t dma;
514 unsigned int size;
515 unsigned int count;
516 unsigned int next_to_use;
517 unsigned int next_to_clean;
518 struct pch_gbe_buffer *buffer_info;
519};
520
521/**
522 * struct pch_gbe_hw_stats - Statistics counters collected by the MAC
523 * @rx_packets: total packets received
524 * @tx_packets: total packets transmitted
525 * @rx_bytes: total bytes received
526 * @tx_bytes: total bytes transmitted
527 * @rx_errors: bad packets received
528 * @tx_errors: packet transmit problems
529 * @rx_dropped: no space in Linux buffers
530 * @tx_dropped: no space available in Linux
531 * @multicast: multicast packets received
532 * @collisions: collisions
533 * @rx_crc_errors: received packet with crc error
534 * @rx_frame_errors: received frame alignment error
535 * @rx_alloc_buff_failed: allocate failure of a receive buffer
536 * @tx_length_errors: transmit length error
537 * @tx_aborted_errors: transmit aborted error
538 * @tx_carrier_errors: transmit carrier error
539 * @tx_timeout_count: Number of transmit timeout
540 * @tx_restart_count: Number of transmit restert
541 * @intr_rx_dsc_empty_count: Interrupt count of receive descriptor empty
542 * @intr_rx_frame_err_count: Interrupt count of receive frame error
543 * @intr_rx_fifo_err_count: Interrupt count of receive FIFO error
544 * @intr_rx_dma_err_count: Interrupt count of receive DMA error
545 * @intr_tx_fifo_err_count: Interrupt count of transmit FIFO error
546 * @intr_tx_dma_err_count: Interrupt count of transmit DMA error
547 * @intr_tcpip_err_count: Interrupt count of TCP/IP Accelerator
548 */
549struct pch_gbe_hw_stats {
550 u32 rx_packets;
551 u32 tx_packets;
552 u32 rx_bytes;
553 u32 tx_bytes;
554 u32 rx_errors;
555 u32 tx_errors;
556 u32 rx_dropped;
557 u32 tx_dropped;
558 u32 multicast;
559 u32 collisions;
560 u32 rx_crc_errors;
561 u32 rx_frame_errors;
562 u32 rx_alloc_buff_failed;
563 u32 tx_length_errors;
564 u32 tx_aborted_errors;
565 u32 tx_carrier_errors;
566 u32 tx_timeout_count;
567 u32 tx_restart_count;
568 u32 intr_rx_dsc_empty_count;
569 u32 intr_rx_frame_err_count;
570 u32 intr_rx_fifo_err_count;
571 u32 intr_rx_dma_err_count;
572 u32 intr_tx_fifo_err_count;
573 u32 intr_tx_dma_err_count;
574 u32 intr_tcpip_err_count;
575};
576
577/**
578 * struct pch_gbe_adapter - board specific private data structure
579 * @stats_lock: Spinlock structure for status
580 * @tx_queue_lock: Spinlock structure for transmit
581 * @ethtool_lock: Spinlock structure for ethtool
582 * @irq_sem: Semaphore for interrupt
583 * @netdev: Pointer of network device structure
584 * @pdev: Pointer of pci device structure
585 * @polling_netdev: Pointer of polling network device structure
586 * @napi: NAPI structure
587 * @hw: Pointer of hardware structure
588 * @stats: Hardware status
589 * @reset_task: Reset task
590 * @mii: MII information structure
591 * @watchdog_timer: Watchdog timer list
592 * @wake_up_evt: Wake up event
593 * @config_space: Configuration space
594 * @msg_enable: Driver message level
595 * @led_status: LED status
596 * @tx_ring: Pointer of Tx descriptor ring structure
597 * @rx_ring: Pointer of Rx descriptor ring structure
598 * @rx_buffer_len: Receive buffer length
599 * @tx_queue_len: Transmit queue length
600 * @rx_csum: Receive TCP/IP checksum enable/disable
601 * @tx_csum: Transmit TCP/IP checksum enable/disable
602 * @have_msi: PCI MSI mode flag
603 */
604
605struct pch_gbe_adapter {
606 spinlock_t stats_lock;
607 spinlock_t tx_queue_lock;
608 spinlock_t ethtool_lock;
609 atomic_t irq_sem;
610 struct net_device *netdev;
611 struct pci_dev *pdev;
612 struct net_device *polling_netdev;
613 struct napi_struct napi;
614 struct pch_gbe_hw hw;
615 struct pch_gbe_hw_stats stats;
616 struct work_struct reset_task;
617 struct mii_if_info mii;
618 struct timer_list watchdog_timer;
619 u32 wake_up_evt;
620 u32 *config_space;
621 unsigned long led_status;
622 struct pch_gbe_tx_ring *tx_ring;
623 struct pch_gbe_rx_ring *rx_ring;
624 unsigned long rx_buffer_len;
625 unsigned long tx_queue_len;
626 bool rx_csum;
627 bool tx_csum;
628 bool have_msi;
629};
630
631extern const char pch_driver_version[];
632
633/* pch_gbe_main.c */
634extern int pch_gbe_up(struct pch_gbe_adapter *adapter);
635extern void pch_gbe_down(struct pch_gbe_adapter *adapter);
636extern void pch_gbe_reinit_locked(struct pch_gbe_adapter *adapter);
637extern void pch_gbe_reset(struct pch_gbe_adapter *adapter);
638extern int pch_gbe_setup_tx_resources(struct pch_gbe_adapter *adapter,
639 struct pch_gbe_tx_ring *txdr);
640extern int pch_gbe_setup_rx_resources(struct pch_gbe_adapter *adapter,
641 struct pch_gbe_rx_ring *rxdr);
642extern void pch_gbe_free_tx_resources(struct pch_gbe_adapter *adapter,
643 struct pch_gbe_tx_ring *tx_ring);
644extern void pch_gbe_free_rx_resources(struct pch_gbe_adapter *adapter,
645 struct pch_gbe_rx_ring *rx_ring);
646extern void pch_gbe_update_stats(struct pch_gbe_adapter *adapter);
647
648/* pch_gbe_param.c */
649extern void pch_gbe_check_options(struct pch_gbe_adapter *adapter);
650
651/* pch_gbe_ethtool.c */
652extern void pch_gbe_set_ethtool_ops(struct net_device *netdev);
653
654/* pch_gbe_mac.c */
655extern s32 pch_gbe_mac_force_mac_fc(struct pch_gbe_hw *hw);
656extern s32 pch_gbe_mac_read_mac_addr(struct pch_gbe_hw *hw);
657extern u16 pch_gbe_mac_ctrl_miim(struct pch_gbe_hw *hw,
658 u32 addr, u32 dir, u32 reg, u16 data);
659#endif /* _PCH_GBE_H_ */
diff --git a/drivers/net/pch_gbe/pch_gbe_api.c b/drivers/net/pch_gbe/pch_gbe_api.c
new file mode 100644
index 000000000000..e48f084ad226
--- /dev/null
+++ b/drivers/net/pch_gbe/pch_gbe_api.c
@@ -0,0 +1,245 @@
1/*
2 * Copyright (C) 1999 - 2010 Intel Corporation.
3 * Copyright (C) 2010 OKI SEMICONDUCTOR Co., LTD.
4 *
5 * This code was derived from the Intel e1000e Linux driver.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; version 2 of the License.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
19 */
20#include "pch_gbe.h"
21#include "pch_gbe_phy.h"
22
23/* bus type values */
24#define pch_gbe_bus_type_unknown 0
25#define pch_gbe_bus_type_pci 1
26#define pch_gbe_bus_type_pcix 2
27#define pch_gbe_bus_type_pci_express 3
28#define pch_gbe_bus_type_reserved 4
29
30/* bus speed values */
31#define pch_gbe_bus_speed_unknown 0
32#define pch_gbe_bus_speed_33 1
33#define pch_gbe_bus_speed_66 2
34#define pch_gbe_bus_speed_100 3
35#define pch_gbe_bus_speed_120 4
36#define pch_gbe_bus_speed_133 5
37#define pch_gbe_bus_speed_2500 6
38#define pch_gbe_bus_speed_reserved 7
39
40/* bus width values */
41#define pch_gbe_bus_width_unknown 0
42#define pch_gbe_bus_width_pcie_x1 1
43#define pch_gbe_bus_width_pcie_x2 2
44#define pch_gbe_bus_width_pcie_x4 4
45#define pch_gbe_bus_width_32 5
46#define pch_gbe_bus_width_64 6
47#define pch_gbe_bus_width_reserved 7
48
49/**
50 * pch_gbe_plat_get_bus_info - Obtain bus information for adapter
51 * @hw: Pointer to the HW structure
52 */
53static void pch_gbe_plat_get_bus_info(struct pch_gbe_hw *hw)
54{
55 hw->bus.type = pch_gbe_bus_type_pci_express;
56 hw->bus.speed = pch_gbe_bus_speed_2500;
57 hw->bus.width = pch_gbe_bus_width_pcie_x1;
58}
59
60/**
61 * pch_gbe_plat_init_hw - Initialize hardware
62 * @hw: Pointer to the HW structure
63 * Returns
64 * 0: Successfully
65 * Negative value: Failed-EBUSY
66 */
67static s32 pch_gbe_plat_init_hw(struct pch_gbe_hw *hw)
68{
69 s32 ret_val;
70
71 ret_val = pch_gbe_phy_get_id(hw);
72 if (ret_val) {
73 pr_err("pch_gbe_phy_get_id error\n");
74 return ret_val;
75 }
76 pch_gbe_phy_init_setting(hw);
77 /* Setup Mac interface option RGMII */
78#ifdef PCH_GBE_MAC_IFOP_RGMII
79 pch_gbe_phy_set_rgmii(hw);
80#endif
81 return ret_val;
82}
83
84static const struct pch_gbe_functions pch_gbe_ops = {
85 .get_bus_info = pch_gbe_plat_get_bus_info,
86 .init_hw = pch_gbe_plat_init_hw,
87 .read_phy_reg = pch_gbe_phy_read_reg_miic,
88 .write_phy_reg = pch_gbe_phy_write_reg_miic,
89 .reset_phy = pch_gbe_phy_hw_reset,
90 .sw_reset_phy = pch_gbe_phy_sw_reset,
91 .power_up_phy = pch_gbe_phy_power_up,
92 .power_down_phy = pch_gbe_phy_power_down,
93 .read_mac_addr = pch_gbe_mac_read_mac_addr
94};
95
96/**
97 * pch_gbe_plat_init_function_pointers - Init func ptrs
98 * @hw: Pointer to the HW structure
99 */
100static void pch_gbe_plat_init_function_pointers(struct pch_gbe_hw *hw)
101{
102 /* Set PHY parameter */
103 hw->phy.reset_delay_us = PCH_GBE_PHY_RESET_DELAY_US;
104 /* Set function pointers */
105 hw->func = &pch_gbe_ops;
106}
107
108/**
109 * pch_gbe_hal_setup_init_funcs - Initializes function pointers
110 * @hw: Pointer to the HW structure
111 * Returns
112 * 0: Successfully
113 * ENOSYS: Function is not registered
114 */
115inline s32 pch_gbe_hal_setup_init_funcs(struct pch_gbe_hw *hw)
116{
117 if (!hw->reg) {
118 pr_err("ERROR: Registers not mapped\n");
119 return -ENOSYS;
120 }
121 pch_gbe_plat_init_function_pointers(hw);
122 return 0;
123}
124
125/**
126 * pch_gbe_hal_get_bus_info - Obtain bus information for adapter
127 * @hw: Pointer to the HW structure
128 */
129inline void pch_gbe_hal_get_bus_info(struct pch_gbe_hw *hw)
130{
131 if (!hw->func->get_bus_info)
132 pr_err("ERROR: configuration\n");
133 else
134 hw->func->get_bus_info(hw);
135}
136
137/**
138 * pch_gbe_hal_init_hw - Initialize hardware
139 * @hw: Pointer to the HW structure
140 * Returns
141 * 0: Successfully
142 * ENOSYS: Function is not registered
143 */
144inline s32 pch_gbe_hal_init_hw(struct pch_gbe_hw *hw)
145{
146 if (!hw->func->init_hw) {
147 pr_err("ERROR: configuration\n");
148 return -ENOSYS;
149 }
150 return hw->func->init_hw(hw);
151}
152
153/**
154 * pch_gbe_hal_read_phy_reg - Reads PHY register
155 * @hw: Pointer to the HW structure
156 * @offset: The register to read
157 * @data: The buffer to store the 16-bit read.
158 * Returns
159 * 0: Successfully
160 * Negative value: Failed
161 */
162inline s32 pch_gbe_hal_read_phy_reg(struct pch_gbe_hw *hw, u32 offset,
163 u16 *data)
164{
165 if (!hw->func->read_phy_reg)
166 return 0;
167 return hw->func->read_phy_reg(hw, offset, data);
168}
169
170/**
171 * pch_gbe_hal_write_phy_reg - Writes PHY register
172 * @hw: Pointer to the HW structure
173 * @offset: The register to read
174 * @data: The value to write.
175 * Returns
176 * 0: Successfully
177 * Negative value: Failed
178 */
179inline s32 pch_gbe_hal_write_phy_reg(struct pch_gbe_hw *hw, u32 offset,
180 u16 data)
181{
182 if (!hw->func->write_phy_reg)
183 return 0;
184 return hw->func->write_phy_reg(hw, offset, data);
185}
186
187/**
188 * pch_gbe_hal_phy_hw_reset - Hard PHY reset
189 * @hw: Pointer to the HW structure
190 */
191inline void pch_gbe_hal_phy_hw_reset(struct pch_gbe_hw *hw)
192{
193 if (!hw->func->reset_phy)
194 pr_err("ERROR: configuration\n");
195 else
196 hw->func->reset_phy(hw);
197}
198
199/**
200 * pch_gbe_hal_phy_sw_reset - Soft PHY reset
201 * @hw: Pointer to the HW structure
202 */
203inline void pch_gbe_hal_phy_sw_reset(struct pch_gbe_hw *hw)
204{
205 if (!hw->func->sw_reset_phy)
206 pr_err("ERROR: configuration\n");
207 else
208 hw->func->sw_reset_phy(hw);
209}
210
211/**
212 * pch_gbe_hal_read_mac_addr - Reads MAC address
213 * @hw: Pointer to the HW structure
214 * Returns
215 * 0: Successfully
216 * ENOSYS: Function is not registered
217 */
218inline s32 pch_gbe_hal_read_mac_addr(struct pch_gbe_hw *hw)
219{
220 if (!hw->func->read_mac_addr) {
221 pr_err("ERROR: configuration\n");
222 return -ENOSYS;
223 }
224 return hw->func->read_mac_addr(hw);
225}
226
227/**
228 * pch_gbe_hal_power_up_phy - Power up PHY
229 * @hw: Pointer to the HW structure
230 */
231inline void pch_gbe_hal_power_up_phy(struct pch_gbe_hw *hw)
232{
233 if (hw->func->power_up_phy)
234 hw->func->power_up_phy(hw);
235}
236
237/**
238 * pch_gbe_hal_power_down_phy - Power down PHY
239 * @hw: Pointer to the HW structure
240 */
241inline void pch_gbe_hal_power_down_phy(struct pch_gbe_hw *hw)
242{
243 if (hw->func->power_down_phy)
244 hw->func->power_down_phy(hw);
245}
diff --git a/drivers/net/pch_gbe/pch_gbe_api.h b/drivers/net/pch_gbe/pch_gbe_api.h
new file mode 100644
index 000000000000..94aaac5b057b
--- /dev/null
+++ b/drivers/net/pch_gbe/pch_gbe_api.h
@@ -0,0 +1,36 @@
1/*
2 * Copyright (C) 1999 - 2010 Intel Corporation.
3 * Copyright (C) 2010 OKI SEMICONDUCTOR Co., LTD.
4 *
5 * This code was derived from the Intel e1000e Linux driver.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; version 2 of the License.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
19 */
20#ifndef _PCH_GBE_API_H_
21#define _PCH_GBE_API_H_
22
23#include "pch_gbe_phy.h"
24
25s32 pch_gbe_hal_setup_init_funcs(struct pch_gbe_hw *hw);
26void pch_gbe_hal_get_bus_info(struct pch_gbe_hw *hw);
27s32 pch_gbe_hal_init_hw(struct pch_gbe_hw *hw);
28s32 pch_gbe_hal_read_phy_reg(struct pch_gbe_hw *hw, u32 offset, u16 *data);
29s32 pch_gbe_hal_write_phy_reg(struct pch_gbe_hw *hw, u32 offset, u16 data);
30void pch_gbe_hal_phy_hw_reset(struct pch_gbe_hw *hw);
31void pch_gbe_hal_phy_sw_reset(struct pch_gbe_hw *hw);
32s32 pch_gbe_hal_read_mac_addr(struct pch_gbe_hw *hw);
33void pch_gbe_hal_power_up_phy(struct pch_gbe_hw *hw);
34void pch_gbe_hal_power_down_phy(struct pch_gbe_hw *hw);
35
36#endif
diff --git a/drivers/net/pch_gbe/pch_gbe_ethtool.c b/drivers/net/pch_gbe/pch_gbe_ethtool.c
new file mode 100644
index 000000000000..c8cc32c0edc9
--- /dev/null
+++ b/drivers/net/pch_gbe/pch_gbe_ethtool.c
@@ -0,0 +1,585 @@
1/*
2 * Copyright (C) 1999 - 2010 Intel Corporation.
3 * Copyright (C) 2010 OKI SEMICONDUCTOR Co., LTD.
4 *
5 * This code was derived from the Intel e1000e Linux driver.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; version 2 of the License.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
19 */
20#include "pch_gbe.h"
21#include "pch_gbe_api.h"
22
23/**
24 * pch_gbe_stats - Stats item infomation
25 */
26struct pch_gbe_stats {
27 char string[ETH_GSTRING_LEN];
28 size_t size;
29 size_t offset;
30};
31
32#define PCH_GBE_STAT(m) \
33{ \
34 .string = #m, \
35 .size = FIELD_SIZEOF(struct pch_gbe_hw_stats, m), \
36 .offset = offsetof(struct pch_gbe_hw_stats, m), \
37}
38
39/**
40 * pch_gbe_gstrings_stats - ethtool information status name list
41 */
42static const struct pch_gbe_stats pch_gbe_gstrings_stats[] = {
43 PCH_GBE_STAT(rx_packets),
44 PCH_GBE_STAT(tx_packets),
45 PCH_GBE_STAT(rx_bytes),
46 PCH_GBE_STAT(tx_bytes),
47 PCH_GBE_STAT(rx_errors),
48 PCH_GBE_STAT(tx_errors),
49 PCH_GBE_STAT(rx_dropped),
50 PCH_GBE_STAT(tx_dropped),
51 PCH_GBE_STAT(multicast),
52 PCH_GBE_STAT(collisions),
53 PCH_GBE_STAT(rx_crc_errors),
54 PCH_GBE_STAT(rx_frame_errors),
55 PCH_GBE_STAT(rx_alloc_buff_failed),
56 PCH_GBE_STAT(tx_length_errors),
57 PCH_GBE_STAT(tx_aborted_errors),
58 PCH_GBE_STAT(tx_carrier_errors),
59 PCH_GBE_STAT(tx_timeout_count),
60 PCH_GBE_STAT(tx_restart_count),
61 PCH_GBE_STAT(intr_rx_dsc_empty_count),
62 PCH_GBE_STAT(intr_rx_frame_err_count),
63 PCH_GBE_STAT(intr_rx_fifo_err_count),
64 PCH_GBE_STAT(intr_rx_dma_err_count),
65 PCH_GBE_STAT(intr_tx_fifo_err_count),
66 PCH_GBE_STAT(intr_tx_dma_err_count),
67 PCH_GBE_STAT(intr_tcpip_err_count)
68};
69
70#define PCH_GBE_QUEUE_STATS_LEN 0
71#define PCH_GBE_GLOBAL_STATS_LEN ARRAY_SIZE(pch_gbe_gstrings_stats)
72#define PCH_GBE_STATS_LEN (PCH_GBE_GLOBAL_STATS_LEN + PCH_GBE_QUEUE_STATS_LEN)
73
74#define PCH_GBE_MAC_REGS_LEN (sizeof(struct pch_gbe_regs) / 4)
75#define PCH_GBE_REGS_LEN (PCH_GBE_MAC_REGS_LEN + PCH_GBE_PHY_REGS_LEN)
76/**
77 * pch_gbe_get_settings - Get device-specific settings
78 * @netdev: Network interface device structure
79 * @ecmd: Ethtool command
80 * Returns
81 * 0: Successful.
82 * Negative value: Failed.
83 */
84static int pch_gbe_get_settings(struct net_device *netdev,
85 struct ethtool_cmd *ecmd)
86{
87 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
88 int ret;
89
90 ret = mii_ethtool_gset(&adapter->mii, ecmd);
91 ecmd->supported &= ~(SUPPORTED_TP | SUPPORTED_1000baseT_Half);
92 ecmd->advertising &= ~(ADVERTISED_TP | ADVERTISED_1000baseT_Half);
93
94 if (!netif_carrier_ok(adapter->netdev))
95 ecmd->speed = -1;
96 return ret;
97}
98
99/**
100 * pch_gbe_set_settings - Set device-specific settings
101 * @netdev: Network interface device structure
102 * @ecmd: Ethtool command
103 * Returns
104 * 0: Successful.
105 * Negative value: Failed.
106 */
107static int pch_gbe_set_settings(struct net_device *netdev,
108 struct ethtool_cmd *ecmd)
109{
110 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
111 struct pch_gbe_hw *hw = &adapter->hw;
112 int ret;
113
114 pch_gbe_hal_write_phy_reg(hw, MII_BMCR, BMCR_RESET);
115
116 if (ecmd->speed == USHRT_MAX) {
117 ecmd->speed = SPEED_1000;
118 ecmd->duplex = DUPLEX_FULL;
119 }
120 ret = mii_ethtool_sset(&adapter->mii, ecmd);
121 if (ret) {
122 pr_err("Error: mii_ethtool_sset\n");
123 return ret;
124 }
125 hw->mac.link_speed = ecmd->speed;
126 hw->mac.link_duplex = ecmd->duplex;
127 hw->phy.autoneg_advertised = ecmd->advertising;
128 hw->mac.autoneg = ecmd->autoneg;
129 pch_gbe_hal_phy_sw_reset(hw);
130
131 /* reset the link */
132 if (netif_running(adapter->netdev)) {
133 pch_gbe_down(adapter);
134 ret = pch_gbe_up(adapter);
135 } else {
136 pch_gbe_reset(adapter);
137 }
138 return ret;
139}
140
141/**
142 * pch_gbe_get_regs_len - Report the size of device registers
143 * @netdev: Network interface device structure
144 * Returns: the size of device registers.
145 */
146static int pch_gbe_get_regs_len(struct net_device *netdev)
147{
148 return PCH_GBE_REGS_LEN * (int)sizeof(u32);
149}
150
151/**
152 * pch_gbe_get_drvinfo - Report driver information
153 * @netdev: Network interface device structure
154 * @drvinfo: Driver information structure
155 */
156static void pch_gbe_get_drvinfo(struct net_device *netdev,
157 struct ethtool_drvinfo *drvinfo)
158{
159 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
160
161 strcpy(drvinfo->driver, KBUILD_MODNAME);
162 strcpy(drvinfo->version, pch_driver_version);
163 strcpy(drvinfo->fw_version, "N/A");
164 strcpy(drvinfo->bus_info, pci_name(adapter->pdev));
165 drvinfo->regdump_len = pch_gbe_get_regs_len(netdev);
166}
167
168/**
169 * pch_gbe_get_regs - Get device registers
170 * @netdev: Network interface device structure
171 * @regs: Ethtool register structure
172 * @p: Buffer pointer of read device register date
173 */
174static void pch_gbe_get_regs(struct net_device *netdev,
175 struct ethtool_regs *regs, void *p)
176{
177 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
178 struct pch_gbe_hw *hw = &adapter->hw;
179 struct pci_dev *pdev = adapter->pdev;
180 u32 *regs_buff = p;
181 u16 i, tmp;
182
183 regs->version = 0x1000000 | (__u32)pdev->revision << 16 | pdev->device;
184 for (i = 0; i < PCH_GBE_MAC_REGS_LEN; i++)
185 *regs_buff++ = ioread32(&hw->reg->INT_ST + i);
186 /* PHY register */
187 for (i = 0; i < PCH_GBE_PHY_REGS_LEN; i++) {
188 pch_gbe_hal_read_phy_reg(&adapter->hw, i, &tmp);
189 *regs_buff++ = tmp;
190 }
191}
192
193/**
194 * pch_gbe_get_wol - Report whether Wake-on-Lan is enabled
195 * @netdev: Network interface device structure
196 * @wol: Wake-on-Lan information
197 */
198static void pch_gbe_get_wol(struct net_device *netdev,
199 struct ethtool_wolinfo *wol)
200{
201 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
202
203 wol->supported = WAKE_UCAST | WAKE_MCAST | WAKE_BCAST | WAKE_MAGIC;
204 wol->wolopts = 0;
205
206 if ((adapter->wake_up_evt & PCH_GBE_WLC_IND))
207 wol->wolopts |= WAKE_UCAST;
208 if ((adapter->wake_up_evt & PCH_GBE_WLC_MLT))
209 wol->wolopts |= WAKE_MCAST;
210 if ((adapter->wake_up_evt & PCH_GBE_WLC_BR))
211 wol->wolopts |= WAKE_BCAST;
212 if ((adapter->wake_up_evt & PCH_GBE_WLC_MP))
213 wol->wolopts |= WAKE_MAGIC;
214}
215
216/**
217 * pch_gbe_set_wol - Turn Wake-on-Lan on or off
218 * @netdev: Network interface device structure
219 * @wol: Pointer of wake-on-Lan information straucture
220 * Returns
221 * 0: Successful.
222 * Negative value: Failed.
223 */
224static int pch_gbe_set_wol(struct net_device *netdev,
225 struct ethtool_wolinfo *wol)
226{
227 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
228
229 if ((wol->wolopts & (WAKE_PHY | WAKE_ARP | WAKE_MAGICSECURE)))
230 return -EOPNOTSUPP;
231 /* these settings will always override what we currently have */
232 adapter->wake_up_evt = 0;
233
234 if ((wol->wolopts & WAKE_UCAST))
235 adapter->wake_up_evt |= PCH_GBE_WLC_IND;
236 if ((wol->wolopts & WAKE_MCAST))
237 adapter->wake_up_evt |= PCH_GBE_WLC_MLT;
238 if ((wol->wolopts & WAKE_BCAST))
239 adapter->wake_up_evt |= PCH_GBE_WLC_BR;
240 if ((wol->wolopts & WAKE_MAGIC))
241 adapter->wake_up_evt |= PCH_GBE_WLC_MP;
242 return 0;
243}
244
245/**
246 * pch_gbe_nway_reset - Restart autonegotiation
247 * @netdev: Network interface device structure
248 * Returns
249 * 0: Successful.
250 * Negative value: Failed.
251 */
252static int pch_gbe_nway_reset(struct net_device *netdev)
253{
254 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
255
256 return mii_nway_restart(&adapter->mii);
257}
258
259/**
260 * pch_gbe_get_ringparam - Report ring sizes
261 * @netdev: Network interface device structure
262 * @ring: Ring param structure
263 */
264static void pch_gbe_get_ringparam(struct net_device *netdev,
265 struct ethtool_ringparam *ring)
266{
267 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
268 struct pch_gbe_tx_ring *txdr = adapter->tx_ring;
269 struct pch_gbe_rx_ring *rxdr = adapter->rx_ring;
270
271 ring->rx_max_pending = PCH_GBE_MAX_RXD;
272 ring->tx_max_pending = PCH_GBE_MAX_TXD;
273 ring->rx_mini_max_pending = 0;
274 ring->rx_jumbo_max_pending = 0;
275 ring->rx_pending = rxdr->count;
276 ring->tx_pending = txdr->count;
277 ring->rx_mini_pending = 0;
278 ring->rx_jumbo_pending = 0;
279}
280
281/**
282 * pch_gbe_set_ringparam - Set ring sizes
283 * @netdev: Network interface device structure
284 * @ring: Ring param structure
285 * Returns
286 * 0: Successful.
287 * Negative value: Failed.
288 */
289static int pch_gbe_set_ringparam(struct net_device *netdev,
290 struct ethtool_ringparam *ring)
291{
292 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
293 struct pch_gbe_tx_ring *txdr, *tx_old;
294 struct pch_gbe_rx_ring *rxdr, *rx_old;
295 int tx_ring_size, rx_ring_size;
296 int err = 0;
297
298 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
299 return -EINVAL;
300 tx_ring_size = (int)sizeof(struct pch_gbe_tx_ring);
301 rx_ring_size = (int)sizeof(struct pch_gbe_rx_ring);
302
303 if ((netif_running(adapter->netdev)))
304 pch_gbe_down(adapter);
305 tx_old = adapter->tx_ring;
306 rx_old = adapter->rx_ring;
307
308 txdr = kzalloc(tx_ring_size, GFP_KERNEL);
309 if (!txdr) {
310 err = -ENOMEM;
311 goto err_alloc_tx;
312 }
313 rxdr = kzalloc(rx_ring_size, GFP_KERNEL);
314 if (!rxdr) {
315 err = -ENOMEM;
316 goto err_alloc_rx;
317 }
318 adapter->tx_ring = txdr;
319 adapter->rx_ring = rxdr;
320
321 rxdr->count =
322 clamp_val(ring->rx_pending, PCH_GBE_MIN_RXD, PCH_GBE_MAX_RXD);
323 rxdr->count = roundup(rxdr->count, PCH_GBE_RX_DESC_MULTIPLE);
324
325 txdr->count =
326 clamp_val(ring->tx_pending, PCH_GBE_MIN_RXD, PCH_GBE_MAX_RXD);
327 txdr->count = roundup(txdr->count, PCH_GBE_TX_DESC_MULTIPLE);
328
329 if ((netif_running(adapter->netdev))) {
330 /* Try to get new resources before deleting old */
331 err = pch_gbe_setup_rx_resources(adapter, adapter->rx_ring);
332 if (err)
333 goto err_setup_rx;
334 err = pch_gbe_setup_tx_resources(adapter, adapter->tx_ring);
335 if (err)
336 goto err_setup_tx;
337 /* save the new, restore the old in order to free it,
338 * then restore the new back again */
339#ifdef RINGFREE
340 adapter->rx_ring = rx_old;
341 adapter->tx_ring = tx_old;
342 pch_gbe_free_rx_resources(adapter, adapter->rx_ring);
343 pch_gbe_free_tx_resources(adapter, adapter->tx_ring);
344 kfree(tx_old);
345 kfree(rx_old);
346 adapter->rx_ring = rxdr;
347 adapter->tx_ring = txdr;
348#else
349 pch_gbe_free_rx_resources(adapter, rx_old);
350 pch_gbe_free_tx_resources(adapter, tx_old);
351 kfree(tx_old);
352 kfree(rx_old);
353 adapter->rx_ring = rxdr;
354 adapter->tx_ring = txdr;
355#endif
356 err = pch_gbe_up(adapter);
357 }
358 return err;
359
360err_setup_tx:
361 pch_gbe_free_rx_resources(adapter, adapter->rx_ring);
362err_setup_rx:
363 adapter->rx_ring = rx_old;
364 adapter->tx_ring = tx_old;
365 kfree(rxdr);
366err_alloc_rx:
367 kfree(txdr);
368err_alloc_tx:
369 if (netif_running(adapter->netdev))
370 pch_gbe_up(adapter);
371 return err;
372}
373
374/**
375 * pch_gbe_get_pauseparam - Report pause parameters
376 * @netdev: Network interface device structure
377 * @pause: Pause parameters structure
378 */
379static void pch_gbe_get_pauseparam(struct net_device *netdev,
380 struct ethtool_pauseparam *pause)
381{
382 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
383 struct pch_gbe_hw *hw = &adapter->hw;
384
385 pause->autoneg =
386 ((hw->mac.fc_autoneg) ? AUTONEG_ENABLE : AUTONEG_DISABLE);
387
388 if (hw->mac.fc == PCH_GBE_FC_RX_PAUSE) {
389 pause->rx_pause = 1;
390 } else if (hw->mac.fc == PCH_GBE_FC_TX_PAUSE) {
391 pause->tx_pause = 1;
392 } else if (hw->mac.fc == PCH_GBE_FC_FULL) {
393 pause->rx_pause = 1;
394 pause->tx_pause = 1;
395 }
396}
397
398/**
399 * pch_gbe_set_pauseparam - Set pause paramters
400 * @netdev: Network interface device structure
401 * @pause: Pause parameters structure
402 * Returns
403 * 0: Successful.
404 * Negative value: Failed.
405 */
406static int pch_gbe_set_pauseparam(struct net_device *netdev,
407 struct ethtool_pauseparam *pause)
408{
409 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
410 struct pch_gbe_hw *hw = &adapter->hw;
411 int ret = 0;
412
413 hw->mac.fc_autoneg = pause->autoneg;
414 if ((pause->rx_pause) && (pause->tx_pause))
415 hw->mac.fc = PCH_GBE_FC_FULL;
416 else if ((pause->rx_pause) && (!pause->tx_pause))
417 hw->mac.fc = PCH_GBE_FC_RX_PAUSE;
418 else if ((!pause->rx_pause) && (pause->tx_pause))
419 hw->mac.fc = PCH_GBE_FC_TX_PAUSE;
420 else if ((!pause->rx_pause) && (!pause->tx_pause))
421 hw->mac.fc = PCH_GBE_FC_NONE;
422
423 if (hw->mac.fc_autoneg == AUTONEG_ENABLE) {
424 if ((netif_running(adapter->netdev))) {
425 pch_gbe_down(adapter);
426 ret = pch_gbe_up(adapter);
427 } else {
428 pch_gbe_reset(adapter);
429 }
430 } else {
431 ret = pch_gbe_mac_force_mac_fc(hw);
432 }
433 return ret;
434}
435
436/**
437 * pch_gbe_get_rx_csum - Report whether receive checksums are turned on or off
438 * @netdev: Network interface device structure
439 * Returns
440 * true(1): Checksum On
441 * false(0): Checksum Off
442 */
443static u32 pch_gbe_get_rx_csum(struct net_device *netdev)
444{
445 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
446
447 return adapter->rx_csum;
448}
449
450/**
451 * pch_gbe_set_rx_csum - Turn receive checksum on or off
452 * @netdev: Network interface device structure
453 * @data: Checksum On[true] or Off[false]
454 * Returns
455 * 0: Successful.
456 * Negative value: Failed.
457 */
458static int pch_gbe_set_rx_csum(struct net_device *netdev, u32 data)
459{
460 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
461
462 adapter->rx_csum = data;
463 if ((netif_running(netdev)))
464 pch_gbe_reinit_locked(adapter);
465 else
466 pch_gbe_reset(adapter);
467
468 return 0;
469}
470
471/**
472 * pch_gbe_get_tx_csum - Report whether transmit checksums are turned on or off
473 * @netdev: Network interface device structure
474 * Returns
475 * true(1): Checksum On
476 * false(0): Checksum Off
477 */
478static u32 pch_gbe_get_tx_csum(struct net_device *netdev)
479{
480 return (netdev->features & NETIF_F_HW_CSUM) != 0;
481}
482
483/**
484 * pch_gbe_set_tx_csum - Turn transmit checksums on or off
485 * @netdev: Network interface device structure
486 * @data: Checksum on[true] or off[false]
487 * Returns
488 * 0: Successful.
489 * Negative value: Failed.
490 */
491static int pch_gbe_set_tx_csum(struct net_device *netdev, u32 data)
492{
493 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
494
495 adapter->tx_csum = data;
496 if (data)
497 netdev->features |= NETIF_F_HW_CSUM;
498 else
499 netdev->features &= ~NETIF_F_HW_CSUM;
500 return 0;
501}
502
503/**
504 * pch_gbe_get_strings - Return a set of strings that describe the requested
505 * objects
506 * @netdev: Network interface device structure
507 * @stringset: Select the stringset. [ETH_SS_TEST] [ETH_SS_STATS]
508 * @data: Pointer of read string data.
509 */
510static void pch_gbe_get_strings(struct net_device *netdev, u32 stringset,
511 u8 *data)
512{
513 u8 *p = data;
514 int i;
515
516 switch (stringset) {
517 case (u32) ETH_SS_STATS:
518 for (i = 0; i < PCH_GBE_GLOBAL_STATS_LEN; i++) {
519 memcpy(p, pch_gbe_gstrings_stats[i].string,
520 ETH_GSTRING_LEN);
521 p += ETH_GSTRING_LEN;
522 }
523 break;
524 }
525}
526
527/**
528 * pch_gbe_get_ethtool_stats - Return statistics about the device
529 * @netdev: Network interface device structure
530 * @stats: Ethtool statue structure
531 * @data: Pointer of read status area
532 */
533static void pch_gbe_get_ethtool_stats(struct net_device *netdev,
534 struct ethtool_stats *stats, u64 *data)
535{
536 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
537 int i;
538 const struct pch_gbe_stats *gstats = pch_gbe_gstrings_stats;
539 char *hw_stats = (char *)&adapter->stats;
540
541 pch_gbe_update_stats(adapter);
542 for (i = 0; i < PCH_GBE_GLOBAL_STATS_LEN; i++) {
543 char *p = hw_stats + gstats->offset;
544 data[i] = gstats->size == sizeof(u64) ? *(u64 *)p:(*(u32 *)p);
545 gstats++;
546 }
547}
548
549static int pch_gbe_get_sset_count(struct net_device *netdev, int sset)
550{
551 switch (sset) {
552 case ETH_SS_STATS:
553 return PCH_GBE_STATS_LEN;
554 default:
555 return -EOPNOTSUPP;
556 }
557}
558
559static const struct ethtool_ops pch_gbe_ethtool_ops = {
560 .get_settings = pch_gbe_get_settings,
561 .set_settings = pch_gbe_set_settings,
562 .get_drvinfo = pch_gbe_get_drvinfo,
563 .get_regs_len = pch_gbe_get_regs_len,
564 .get_regs = pch_gbe_get_regs,
565 .get_wol = pch_gbe_get_wol,
566 .set_wol = pch_gbe_set_wol,
567 .nway_reset = pch_gbe_nway_reset,
568 .get_link = ethtool_op_get_link,
569 .get_ringparam = pch_gbe_get_ringparam,
570 .set_ringparam = pch_gbe_set_ringparam,
571 .get_pauseparam = pch_gbe_get_pauseparam,
572 .set_pauseparam = pch_gbe_set_pauseparam,
573 .get_rx_csum = pch_gbe_get_rx_csum,
574 .set_rx_csum = pch_gbe_set_rx_csum,
575 .get_tx_csum = pch_gbe_get_tx_csum,
576 .set_tx_csum = pch_gbe_set_tx_csum,
577 .get_strings = pch_gbe_get_strings,
578 .get_ethtool_stats = pch_gbe_get_ethtool_stats,
579 .get_sset_count = pch_gbe_get_sset_count,
580};
581
582void pch_gbe_set_ethtool_ops(struct net_device *netdev)
583{
584 SET_ETHTOOL_OPS(netdev, &pch_gbe_ethtool_ops);
585}
diff --git a/drivers/net/pch_gbe/pch_gbe_main.c b/drivers/net/pch_gbe/pch_gbe_main.c
new file mode 100644
index 000000000000..472056b47440
--- /dev/null
+++ b/drivers/net/pch_gbe/pch_gbe_main.c
@@ -0,0 +1,2477 @@
1/*
2 * Copyright (C) 1999 - 2010 Intel Corporation.
3 * Copyright (C) 2010 OKI SEMICONDUCTOR Co., LTD.
4 *
5 * This code was derived from the Intel e1000e Linux driver.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; version 2 of the License.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
19 */
20
21#include "pch_gbe.h"
22#include "pch_gbe_api.h"
23
24#define DRV_VERSION "1.00"
25const char pch_driver_version[] = DRV_VERSION;
26
27#define PCI_DEVICE_ID_INTEL_IOH1_GBE 0x8802 /* Pci device ID */
28#define PCH_GBE_MAR_ENTRIES 16
29#define PCH_GBE_SHORT_PKT 64
30#define DSC_INIT16 0xC000
31#define PCH_GBE_DMA_ALIGN 0
32#define PCH_GBE_WATCHDOG_PERIOD (1 * HZ) /* watchdog time */
33#define PCH_GBE_COPYBREAK_DEFAULT 256
34#define PCH_GBE_PCI_BAR 1
35
36#define PCH_GBE_TX_WEIGHT 64
37#define PCH_GBE_RX_WEIGHT 64
38#define PCH_GBE_RX_BUFFER_WRITE 16
39
40/* Initialize the wake-on-LAN settings */
41#define PCH_GBE_WL_INIT_SETTING (PCH_GBE_WLC_MP)
42
43#define PCH_GBE_MAC_RGMII_CTRL_SETTING ( \
44 PCH_GBE_CHIP_TYPE_INTERNAL | \
45 PCH_GBE_RGMII_MODE_RGMII | \
46 PCH_GBE_CRS_SEL \
47 )
48
49/* Ethertype field values */
50#define PCH_GBE_MAX_JUMBO_FRAME_SIZE 10318
51#define PCH_GBE_FRAME_SIZE_2048 2048
52#define PCH_GBE_FRAME_SIZE_4096 4096
53#define PCH_GBE_FRAME_SIZE_8192 8192
54
55#define PCH_GBE_GET_DESC(R, i, type) (&(((struct type *)((R).desc))[i]))
56#define PCH_GBE_RX_DESC(R, i) PCH_GBE_GET_DESC(R, i, pch_gbe_rx_desc)
57#define PCH_GBE_TX_DESC(R, i) PCH_GBE_GET_DESC(R, i, pch_gbe_tx_desc)
58#define PCH_GBE_DESC_UNUSED(R) \
59 ((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \
60 (R)->next_to_clean - (R)->next_to_use - 1)
61
62/* Pause packet value */
63#define PCH_GBE_PAUSE_PKT1_VALUE 0x00C28001
64#define PCH_GBE_PAUSE_PKT2_VALUE 0x00000100
65#define PCH_GBE_PAUSE_PKT4_VALUE 0x01000888
66#define PCH_GBE_PAUSE_PKT5_VALUE 0x0000FFFF
67
68#define PCH_GBE_ETH_ALEN 6
69
70/* This defines the bits that are set in the Interrupt Mask
71 * Set/Read Register. Each bit is documented below:
72 * o RXT0 = Receiver Timer Interrupt (ring 0)
73 * o TXDW = Transmit Descriptor Written Back
74 * o RXDMT0 = Receive Descriptor Minimum Threshold hit (ring 0)
75 * o RXSEQ = Receive Sequence Error
76 * o LSC = Link Status Change
77 */
78#define PCH_GBE_INT_ENABLE_MASK ( \
79 PCH_GBE_INT_RX_DMA_CMPLT | \
80 PCH_GBE_INT_RX_DSC_EMP | \
81 PCH_GBE_INT_WOL_DET | \
82 PCH_GBE_INT_TX_CMPLT \
83 )
84
85
86static unsigned int copybreak __read_mostly = PCH_GBE_COPYBREAK_DEFAULT;
87
88static int pch_gbe_mdio_read(struct net_device *netdev, int addr, int reg);
89static void pch_gbe_mdio_write(struct net_device *netdev, int addr, int reg,
90 int data);
91/**
92 * pch_gbe_mac_read_mac_addr - Read MAC address
93 * @hw: Pointer to the HW structure
94 * Returns
95 * 0: Successful.
96 */
97s32 pch_gbe_mac_read_mac_addr(struct pch_gbe_hw *hw)
98{
99 u32 adr1a, adr1b;
100
101 adr1a = ioread32(&hw->reg->mac_adr[0].high);
102 adr1b = ioread32(&hw->reg->mac_adr[0].low);
103
104 hw->mac.addr[0] = (u8)(adr1a & 0xFF);
105 hw->mac.addr[1] = (u8)((adr1a >> 8) & 0xFF);
106 hw->mac.addr[2] = (u8)((adr1a >> 16) & 0xFF);
107 hw->mac.addr[3] = (u8)((adr1a >> 24) & 0xFF);
108 hw->mac.addr[4] = (u8)(adr1b & 0xFF);
109 hw->mac.addr[5] = (u8)((adr1b >> 8) & 0xFF);
110
111 pr_debug("hw->mac.addr : %pM\n", hw->mac.addr);
112 return 0;
113}
114
115/**
116 * pch_gbe_wait_clr_bit - Wait to clear a bit
117 * @reg: Pointer of register
118 * @busy: Busy bit
119 */
120static void pch_gbe_wait_clr_bit(void *reg, u32 bit)
121{
122 u32 tmp;
123 /* wait busy */
124 tmp = 1000;
125 while ((ioread32(reg) & bit) && --tmp)
126 cpu_relax();
127 if (!tmp)
128 pr_err("Error: busy bit is not cleared\n");
129}
130/**
131 * pch_gbe_mac_mar_set - Set MAC address register
132 * @hw: Pointer to the HW structure
133 * @addr: Pointer to the MAC address
134 * @index: MAC address array register
135 */
136static void pch_gbe_mac_mar_set(struct pch_gbe_hw *hw, u8 * addr, u32 index)
137{
138 u32 mar_low, mar_high, adrmask;
139
140 pr_debug("index : 0x%x\n", index);
141
142 /*
143 * HW expects these in little endian so we reverse the byte order
144 * from network order (big endian) to little endian
145 */
146 mar_high = ((u32) addr[0] | ((u32) addr[1] << 8) |
147 ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
148 mar_low = ((u32) addr[4] | ((u32) addr[5] << 8));
149 /* Stop the MAC Address of index. */
150 adrmask = ioread32(&hw->reg->ADDR_MASK);
151 iowrite32((adrmask | (0x0001 << index)), &hw->reg->ADDR_MASK);
152 /* wait busy */
153 pch_gbe_wait_clr_bit(&hw->reg->ADDR_MASK, PCH_GBE_BUSY);
154 /* Set the MAC address to the MAC address 1A/1B register */
155 iowrite32(mar_high, &hw->reg->mac_adr[index].high);
156 iowrite32(mar_low, &hw->reg->mac_adr[index].low);
157 /* Start the MAC address of index */
158 iowrite32((adrmask & ~(0x0001 << index)), &hw->reg->ADDR_MASK);
159 /* wait busy */
160 pch_gbe_wait_clr_bit(&hw->reg->ADDR_MASK, PCH_GBE_BUSY);
161}
162
163/**
164 * pch_gbe_mac_reset_hw - Reset hardware
165 * @hw: Pointer to the HW structure
166 */
167static void pch_gbe_mac_reset_hw(struct pch_gbe_hw *hw)
168{
169 /* Read the MAC address. and store to the private data */
170 pch_gbe_mac_read_mac_addr(hw);
171 iowrite32(PCH_GBE_ALL_RST, &hw->reg->RESET);
172#ifdef PCH_GBE_MAC_IFOP_RGMII
173 iowrite32(PCH_GBE_MODE_GMII_ETHER, &hw->reg->MODE);
174#endif
175 pch_gbe_wait_clr_bit(&hw->reg->RESET, PCH_GBE_ALL_RST);
176 /* Setup the receive address */
177 pch_gbe_mac_mar_set(hw, hw->mac.addr, 0);
178 return;
179}
180
181/**
182 * pch_gbe_mac_init_rx_addrs - Initialize receive address's
183 * @hw: Pointer to the HW structure
184 * @mar_count: Receive address registers
185 */
186static void pch_gbe_mac_init_rx_addrs(struct pch_gbe_hw *hw, u16 mar_count)
187{
188 u32 i;
189
190 /* Setup the receive address */
191 pch_gbe_mac_mar_set(hw, hw->mac.addr, 0);
192
193 /* Zero out the other receive addresses */
194 for (i = 1; i < mar_count; i++) {
195 iowrite32(0, &hw->reg->mac_adr[i].high);
196 iowrite32(0, &hw->reg->mac_adr[i].low);
197 }
198 iowrite32(0xFFFE, &hw->reg->ADDR_MASK);
199 /* wait busy */
200 pch_gbe_wait_clr_bit(&hw->reg->ADDR_MASK, PCH_GBE_BUSY);
201}
202
203
204/**
205 * pch_gbe_mac_mc_addr_list_update - Update Multicast addresses
206 * @hw: Pointer to the HW structure
207 * @mc_addr_list: Array of multicast addresses to program
208 * @mc_addr_count: Number of multicast addresses to program
209 * @mar_used_count: The first MAC Address register free to program
210 * @mar_total_num: Total number of supported MAC Address Registers
211 */
212static void pch_gbe_mac_mc_addr_list_update(struct pch_gbe_hw *hw,
213 u8 *mc_addr_list, u32 mc_addr_count,
214 u32 mar_used_count, u32 mar_total_num)
215{
216 u32 i, adrmask;
217
218 /* Load the first set of multicast addresses into the exact
219 * filters (RAR). If there are not enough to fill the RAR
220 * array, clear the filters.
221 */
222 for (i = mar_used_count; i < mar_total_num; i++) {
223 if (mc_addr_count) {
224 pch_gbe_mac_mar_set(hw, mc_addr_list, i);
225 mc_addr_count--;
226 mc_addr_list += PCH_GBE_ETH_ALEN;
227 } else {
228 /* Clear MAC address mask */
229 adrmask = ioread32(&hw->reg->ADDR_MASK);
230 iowrite32((adrmask | (0x0001 << i)),
231 &hw->reg->ADDR_MASK);
232 /* wait busy */
233 pch_gbe_wait_clr_bit(&hw->reg->ADDR_MASK, PCH_GBE_BUSY);
234 /* Clear MAC address */
235 iowrite32(0, &hw->reg->mac_adr[i].high);
236 iowrite32(0, &hw->reg->mac_adr[i].low);
237 }
238 }
239}
240
241/**
242 * pch_gbe_mac_force_mac_fc - Force the MAC's flow control settings
243 * @hw: Pointer to the HW structure
244 * Returns
245 * 0: Successful.
246 * Negative value: Failed.
247 */
248s32 pch_gbe_mac_force_mac_fc(struct pch_gbe_hw *hw)
249{
250 struct pch_gbe_mac_info *mac = &hw->mac;
251 u32 rx_fctrl;
252
253 pr_debug("mac->fc = %u\n", mac->fc);
254
255 rx_fctrl = ioread32(&hw->reg->RX_FCTRL);
256
257 switch (mac->fc) {
258 case PCH_GBE_FC_NONE:
259 rx_fctrl &= ~PCH_GBE_FL_CTRL_EN;
260 mac->tx_fc_enable = false;
261 break;
262 case PCH_GBE_FC_RX_PAUSE:
263 rx_fctrl |= PCH_GBE_FL_CTRL_EN;
264 mac->tx_fc_enable = false;
265 break;
266 case PCH_GBE_FC_TX_PAUSE:
267 rx_fctrl &= ~PCH_GBE_FL_CTRL_EN;
268 mac->tx_fc_enable = true;
269 break;
270 case PCH_GBE_FC_FULL:
271 rx_fctrl |= PCH_GBE_FL_CTRL_EN;
272 mac->tx_fc_enable = true;
273 break;
274 default:
275 pr_err("Flow control param set incorrectly\n");
276 return -EINVAL;
277 }
278 if (mac->link_duplex == DUPLEX_HALF)
279 rx_fctrl &= ~PCH_GBE_FL_CTRL_EN;
280 iowrite32(rx_fctrl, &hw->reg->RX_FCTRL);
281 pr_debug("RX_FCTRL reg : 0x%08x mac->tx_fc_enable : %d\n",
282 ioread32(&hw->reg->RX_FCTRL), mac->tx_fc_enable);
283 return 0;
284}
285
286/**
287 * pch_gbe_mac_set_wol_event - Set wake-on-lan event
288 * @hw: Pointer to the HW structure
289 * @wu_evt: Wake up event
290 */
291static void pch_gbe_mac_set_wol_event(struct pch_gbe_hw *hw, u32 wu_evt)
292{
293 u32 addr_mask;
294
295 pr_debug("wu_evt : 0x%08x ADDR_MASK reg : 0x%08x\n",
296 wu_evt, ioread32(&hw->reg->ADDR_MASK));
297
298 if (wu_evt) {
299 /* Set Wake-On-Lan address mask */
300 addr_mask = ioread32(&hw->reg->ADDR_MASK);
301 iowrite32(addr_mask, &hw->reg->WOL_ADDR_MASK);
302 /* wait busy */
303 pch_gbe_wait_clr_bit(&hw->reg->WOL_ADDR_MASK, PCH_GBE_WLA_BUSY);
304 iowrite32(0, &hw->reg->WOL_ST);
305 iowrite32((wu_evt | PCH_GBE_WLC_WOL_MODE), &hw->reg->WOL_CTRL);
306 iowrite32(0x02, &hw->reg->TCPIP_ACC);
307 iowrite32(PCH_GBE_INT_ENABLE_MASK, &hw->reg->INT_EN);
308 } else {
309 iowrite32(0, &hw->reg->WOL_CTRL);
310 iowrite32(0, &hw->reg->WOL_ST);
311 }
312 return;
313}
314
315/**
316 * pch_gbe_mac_ctrl_miim - Control MIIM interface
317 * @hw: Pointer to the HW structure
318 * @addr: Address of PHY
319 * @dir: Operetion. (Write or Read)
320 * @reg: Access register of PHY
321 * @data: Write data.
322 *
323 * Returns: Read date.
324 */
325u16 pch_gbe_mac_ctrl_miim(struct pch_gbe_hw *hw, u32 addr, u32 dir, u32 reg,
326 u16 data)
327{
328 u32 data_out = 0;
329 unsigned int i;
330 unsigned long flags;
331
332 spin_lock_irqsave(&hw->miim_lock, flags);
333
334 for (i = 100; i; --i) {
335 if ((ioread32(&hw->reg->MIIM) & PCH_GBE_MIIM_OPER_READY))
336 break;
337 udelay(20);
338 }
339 if (i == 0) {
340 pr_err("pch-gbe.miim won't go Ready\n");
341 spin_unlock_irqrestore(&hw->miim_lock, flags);
342 return 0; /* No way to indicate timeout error */
343 }
344 iowrite32(((reg << PCH_GBE_MIIM_REG_ADDR_SHIFT) |
345 (addr << PCH_GBE_MIIM_PHY_ADDR_SHIFT) |
346 dir | data), &hw->reg->MIIM);
347 for (i = 0; i < 100; i++) {
348 udelay(20);
349 data_out = ioread32(&hw->reg->MIIM);
350 if ((data_out & PCH_GBE_MIIM_OPER_READY))
351 break;
352 }
353 spin_unlock_irqrestore(&hw->miim_lock, flags);
354
355 pr_debug("PHY %s: reg=%d, data=0x%04X\n",
356 dir == PCH_GBE_MIIM_OPER_READ ? "READ" : "WRITE", reg,
357 dir == PCH_GBE_MIIM_OPER_READ ? data_out : data);
358 return (u16) data_out;
359}
360
361/**
362 * pch_gbe_mac_set_pause_packet - Set pause packet
363 * @hw: Pointer to the HW structure
364 */
365static void pch_gbe_mac_set_pause_packet(struct pch_gbe_hw *hw)
366{
367 unsigned long tmp2, tmp3;
368
369 /* Set Pause packet */
370 tmp2 = hw->mac.addr[1];
371 tmp2 = (tmp2 << 8) | hw->mac.addr[0];
372 tmp2 = PCH_GBE_PAUSE_PKT2_VALUE | (tmp2 << 16);
373
374 tmp3 = hw->mac.addr[5];
375 tmp3 = (tmp3 << 8) | hw->mac.addr[4];
376 tmp3 = (tmp3 << 8) | hw->mac.addr[3];
377 tmp3 = (tmp3 << 8) | hw->mac.addr[2];
378
379 iowrite32(PCH_GBE_PAUSE_PKT1_VALUE, &hw->reg->PAUSE_PKT1);
380 iowrite32(tmp2, &hw->reg->PAUSE_PKT2);
381 iowrite32(tmp3, &hw->reg->PAUSE_PKT3);
382 iowrite32(PCH_GBE_PAUSE_PKT4_VALUE, &hw->reg->PAUSE_PKT4);
383 iowrite32(PCH_GBE_PAUSE_PKT5_VALUE, &hw->reg->PAUSE_PKT5);
384
385 /* Transmit Pause Packet */
386 iowrite32(PCH_GBE_PS_PKT_RQ, &hw->reg->PAUSE_REQ);
387
388 pr_debug("PAUSE_PKT1-5 reg : 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
389 ioread32(&hw->reg->PAUSE_PKT1), ioread32(&hw->reg->PAUSE_PKT2),
390 ioread32(&hw->reg->PAUSE_PKT3), ioread32(&hw->reg->PAUSE_PKT4),
391 ioread32(&hw->reg->PAUSE_PKT5));
392
393 return;
394}
395
396
397/**
398 * pch_gbe_alloc_queues - Allocate memory for all rings
399 * @adapter: Board private structure to initialize
400 * Returns
401 * 0: Successfully
402 * Negative value: Failed
403 */
404static int pch_gbe_alloc_queues(struct pch_gbe_adapter *adapter)
405{
406 int size;
407
408 size = (int)sizeof(struct pch_gbe_tx_ring);
409 adapter->tx_ring = kzalloc(size, GFP_KERNEL);
410 if (!adapter->tx_ring)
411 return -ENOMEM;
412 size = (int)sizeof(struct pch_gbe_rx_ring);
413 adapter->rx_ring = kzalloc(size, GFP_KERNEL);
414 if (!adapter->rx_ring) {
415 kfree(adapter->tx_ring);
416 return -ENOMEM;
417 }
418 return 0;
419}
420
421/**
422 * pch_gbe_init_stats - Initialize status
423 * @adapter: Board private structure to initialize
424 */
425static void pch_gbe_init_stats(struct pch_gbe_adapter *adapter)
426{
427 memset(&adapter->stats, 0, sizeof(adapter->stats));
428 return;
429}
430
431/**
432 * pch_gbe_init_phy - Initialize PHY
433 * @adapter: Board private structure to initialize
434 * Returns
435 * 0: Successfully
436 * Negative value: Failed
437 */
438static int pch_gbe_init_phy(struct pch_gbe_adapter *adapter)
439{
440 struct net_device *netdev = adapter->netdev;
441 u32 addr;
442 u16 bmcr, stat;
443
444 /* Discover phy addr by searching addrs in order {1,0,2,..., 31} */
445 for (addr = 0; addr < PCH_GBE_PHY_REGS_LEN; addr++) {
446 adapter->mii.phy_id = (addr == 0) ? 1 : (addr == 1) ? 0 : addr;
447 bmcr = pch_gbe_mdio_read(netdev, adapter->mii.phy_id, MII_BMCR);
448 stat = pch_gbe_mdio_read(netdev, adapter->mii.phy_id, MII_BMSR);
449 stat = pch_gbe_mdio_read(netdev, adapter->mii.phy_id, MII_BMSR);
450 if (!((bmcr == 0xFFFF) || ((stat == 0) && (bmcr == 0))))
451 break;
452 }
453 adapter->hw.phy.addr = adapter->mii.phy_id;
454 pr_debug("phy_addr = %d\n", adapter->mii.phy_id);
455 if (addr == 32)
456 return -EAGAIN;
457 /* Selected the phy and isolate the rest */
458 for (addr = 0; addr < PCH_GBE_PHY_REGS_LEN; addr++) {
459 if (addr != adapter->mii.phy_id) {
460 pch_gbe_mdio_write(netdev, addr, MII_BMCR,
461 BMCR_ISOLATE);
462 } else {
463 bmcr = pch_gbe_mdio_read(netdev, addr, MII_BMCR);
464 pch_gbe_mdio_write(netdev, addr, MII_BMCR,
465 bmcr & ~BMCR_ISOLATE);
466 }
467 }
468
469 /* MII setup */
470 adapter->mii.phy_id_mask = 0x1F;
471 adapter->mii.reg_num_mask = 0x1F;
472 adapter->mii.dev = adapter->netdev;
473 adapter->mii.mdio_read = pch_gbe_mdio_read;
474 adapter->mii.mdio_write = pch_gbe_mdio_write;
475 adapter->mii.supports_gmii = mii_check_gmii_support(&adapter->mii);
476 return 0;
477}
478
479/**
480 * pch_gbe_mdio_read - The read function for mii
481 * @netdev: Network interface device structure
482 * @addr: Phy ID
483 * @reg: Access location
484 * Returns
485 * 0: Successfully
486 * Negative value: Failed
487 */
488static int pch_gbe_mdio_read(struct net_device *netdev, int addr, int reg)
489{
490 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
491 struct pch_gbe_hw *hw = &adapter->hw;
492
493 return pch_gbe_mac_ctrl_miim(hw, addr, PCH_GBE_HAL_MIIM_READ, reg,
494 (u16) 0);
495}
496
497/**
498 * pch_gbe_mdio_write - The write function for mii
499 * @netdev: Network interface device structure
500 * @addr: Phy ID (not used)
501 * @reg: Access location
502 * @data: Write data
503 */
504static void pch_gbe_mdio_write(struct net_device *netdev,
505 int addr, int reg, int data)
506{
507 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
508 struct pch_gbe_hw *hw = &adapter->hw;
509
510 pch_gbe_mac_ctrl_miim(hw, addr, PCH_GBE_HAL_MIIM_WRITE, reg, data);
511}
512
513/**
514 * pch_gbe_reset_task - Reset processing at the time of transmission timeout
515 * @work: Pointer of board private structure
516 */
517static void pch_gbe_reset_task(struct work_struct *work)
518{
519 struct pch_gbe_adapter *adapter;
520 adapter = container_of(work, struct pch_gbe_adapter, reset_task);
521
522 pch_gbe_reinit_locked(adapter);
523}
524
525/**
526 * pch_gbe_reinit_locked- Re-initialization
527 * @adapter: Board private structure
528 */
529void pch_gbe_reinit_locked(struct pch_gbe_adapter *adapter)
530{
531 struct net_device *netdev = adapter->netdev;
532
533 rtnl_lock();
534 if (netif_running(netdev)) {
535 pch_gbe_down(adapter);
536 pch_gbe_up(adapter);
537 }
538 rtnl_unlock();
539}
540
541/**
542 * pch_gbe_reset - Reset GbE
543 * @adapter: Board private structure
544 */
545void pch_gbe_reset(struct pch_gbe_adapter *adapter)
546{
547 pch_gbe_mac_reset_hw(&adapter->hw);
548 /* Setup the receive address. */
549 pch_gbe_mac_init_rx_addrs(&adapter->hw, PCH_GBE_MAR_ENTRIES);
550 if (pch_gbe_hal_init_hw(&adapter->hw))
551 pr_err("Hardware Error\n");
552}
553
554/**
555 * pch_gbe_free_irq - Free an interrupt
556 * @adapter: Board private structure
557 */
558static void pch_gbe_free_irq(struct pch_gbe_adapter *adapter)
559{
560 struct net_device *netdev = adapter->netdev;
561
562 free_irq(adapter->pdev->irq, netdev);
563 if (adapter->have_msi) {
564 pci_disable_msi(adapter->pdev);
565 pr_debug("call pci_disable_msi\n");
566 }
567}
568
569/**
570 * pch_gbe_irq_disable - Mask off interrupt generation on the NIC
571 * @adapter: Board private structure
572 */
573static void pch_gbe_irq_disable(struct pch_gbe_adapter *adapter)
574{
575 struct pch_gbe_hw *hw = &adapter->hw;
576
577 atomic_inc(&adapter->irq_sem);
578 iowrite32(0, &hw->reg->INT_EN);
579 ioread32(&hw->reg->INT_ST);
580 synchronize_irq(adapter->pdev->irq);
581
582 pr_debug("INT_EN reg : 0x%08x\n", ioread32(&hw->reg->INT_EN));
583}
584
585/**
586 * pch_gbe_irq_enable - Enable default interrupt generation settings
587 * @adapter: Board private structure
588 */
589static void pch_gbe_irq_enable(struct pch_gbe_adapter *adapter)
590{
591 struct pch_gbe_hw *hw = &adapter->hw;
592
593 if (likely(atomic_dec_and_test(&adapter->irq_sem)))
594 iowrite32(PCH_GBE_INT_ENABLE_MASK, &hw->reg->INT_EN);
595 ioread32(&hw->reg->INT_ST);
596 pr_debug("INT_EN reg : 0x%08x\n", ioread32(&hw->reg->INT_EN));
597}
598
599
600
601/**
602 * pch_gbe_setup_tctl - configure the Transmit control registers
603 * @adapter: Board private structure
604 */
605static void pch_gbe_setup_tctl(struct pch_gbe_adapter *adapter)
606{
607 struct pch_gbe_hw *hw = &adapter->hw;
608 u32 tx_mode, tcpip;
609
610 tx_mode = PCH_GBE_TM_LONG_PKT |
611 PCH_GBE_TM_ST_AND_FD |
612 PCH_GBE_TM_SHORT_PKT |
613 PCH_GBE_TM_TH_TX_STRT_8 |
614 PCH_GBE_TM_TH_ALM_EMP_4 | PCH_GBE_TM_TH_ALM_FULL_8;
615
616 iowrite32(tx_mode, &hw->reg->TX_MODE);
617
618 tcpip = ioread32(&hw->reg->TCPIP_ACC);
619 tcpip |= PCH_GBE_TX_TCPIPACC_EN;
620 iowrite32(tcpip, &hw->reg->TCPIP_ACC);
621 return;
622}
623
624/**
625 * pch_gbe_configure_tx - Configure Transmit Unit after Reset
626 * @adapter: Board private structure
627 */
628static void pch_gbe_configure_tx(struct pch_gbe_adapter *adapter)
629{
630 struct pch_gbe_hw *hw = &adapter->hw;
631 u32 tdba, tdlen, dctrl;
632
633 pr_debug("dma addr = 0x%08llx size = 0x%08x\n",
634 (unsigned long long)adapter->tx_ring->dma,
635 adapter->tx_ring->size);
636
637 /* Setup the HW Tx Head and Tail descriptor pointers */
638 tdba = adapter->tx_ring->dma;
639 tdlen = adapter->tx_ring->size - 0x10;
640 iowrite32(tdba, &hw->reg->TX_DSC_BASE);
641 iowrite32(tdlen, &hw->reg->TX_DSC_SIZE);
642 iowrite32(tdba, &hw->reg->TX_DSC_SW_P);
643
644 /* Enables Transmission DMA */
645 dctrl = ioread32(&hw->reg->DMA_CTRL);
646 dctrl |= PCH_GBE_TX_DMA_EN;
647 iowrite32(dctrl, &hw->reg->DMA_CTRL);
648}
649
650/**
651 * pch_gbe_setup_rctl - Configure the receive control registers
652 * @adapter: Board private structure
653 */
654static void pch_gbe_setup_rctl(struct pch_gbe_adapter *adapter)
655{
656 struct pch_gbe_hw *hw = &adapter->hw;
657 u32 rx_mode, tcpip;
658
659 rx_mode = PCH_GBE_ADD_FIL_EN | PCH_GBE_MLT_FIL_EN |
660 PCH_GBE_RH_ALM_EMP_4 | PCH_GBE_RH_ALM_FULL_4 | PCH_GBE_RH_RD_TRG_8;
661
662 iowrite32(rx_mode, &hw->reg->RX_MODE);
663
664 tcpip = ioread32(&hw->reg->TCPIP_ACC);
665
666 if (adapter->rx_csum) {
667 tcpip &= ~PCH_GBE_RX_TCPIPACC_OFF;
668 tcpip |= PCH_GBE_RX_TCPIPACC_EN;
669 } else {
670 tcpip |= PCH_GBE_RX_TCPIPACC_OFF;
671 tcpip &= ~PCH_GBE_RX_TCPIPACC_EN;
672 }
673 iowrite32(tcpip, &hw->reg->TCPIP_ACC);
674 return;
675}
676
677/**
678 * pch_gbe_configure_rx - Configure Receive Unit after Reset
679 * @adapter: Board private structure
680 */
681static void pch_gbe_configure_rx(struct pch_gbe_adapter *adapter)
682{
683 struct pch_gbe_hw *hw = &adapter->hw;
684 u32 rdba, rdlen, rctl, rxdma;
685
686 pr_debug("dma adr = 0x%08llx size = 0x%08x\n",
687 (unsigned long long)adapter->rx_ring->dma,
688 adapter->rx_ring->size);
689
690 pch_gbe_mac_force_mac_fc(hw);
691
692 /* Disables Receive MAC */
693 rctl = ioread32(&hw->reg->MAC_RX_EN);
694 iowrite32((rctl & ~PCH_GBE_MRE_MAC_RX_EN), &hw->reg->MAC_RX_EN);
695
696 /* Disables Receive DMA */
697 rxdma = ioread32(&hw->reg->DMA_CTRL);
698 rxdma &= ~PCH_GBE_RX_DMA_EN;
699 iowrite32(rxdma, &hw->reg->DMA_CTRL);
700
701 pr_debug("MAC_RX_EN reg = 0x%08x DMA_CTRL reg = 0x%08x\n",
702 ioread32(&hw->reg->MAC_RX_EN),
703 ioread32(&hw->reg->DMA_CTRL));
704
705 /* Setup the HW Rx Head and Tail Descriptor Pointers and
706 * the Base and Length of the Rx Descriptor Ring */
707 rdba = adapter->rx_ring->dma;
708 rdlen = adapter->rx_ring->size - 0x10;
709 iowrite32(rdba, &hw->reg->RX_DSC_BASE);
710 iowrite32(rdlen, &hw->reg->RX_DSC_SIZE);
711 iowrite32((rdba + rdlen), &hw->reg->RX_DSC_SW_P);
712
713 /* Enables Receive DMA */
714 rxdma = ioread32(&hw->reg->DMA_CTRL);
715 rxdma |= PCH_GBE_RX_DMA_EN;
716 iowrite32(rxdma, &hw->reg->DMA_CTRL);
717 /* Enables Receive */
718 iowrite32(PCH_GBE_MRE_MAC_RX_EN, &hw->reg->MAC_RX_EN);
719}
720
721/**
722 * pch_gbe_unmap_and_free_tx_resource - Unmap and free tx socket buffer
723 * @adapter: Board private structure
724 * @buffer_info: Buffer information structure
725 */
726static void pch_gbe_unmap_and_free_tx_resource(
727 struct pch_gbe_adapter *adapter, struct pch_gbe_buffer *buffer_info)
728{
729 if (buffer_info->mapped) {
730 dma_unmap_single(&adapter->pdev->dev, buffer_info->dma,
731 buffer_info->length, DMA_TO_DEVICE);
732 buffer_info->mapped = false;
733 }
734 if (buffer_info->skb) {
735 dev_kfree_skb_any(buffer_info->skb);
736 buffer_info->skb = NULL;
737 }
738}
739
740/**
741 * pch_gbe_unmap_and_free_rx_resource - Unmap and free rx socket buffer
742 * @adapter: Board private structure
743 * @buffer_info: Buffer information structure
744 */
745static void pch_gbe_unmap_and_free_rx_resource(
746 struct pch_gbe_adapter *adapter,
747 struct pch_gbe_buffer *buffer_info)
748{
749 if (buffer_info->mapped) {
750 dma_unmap_single(&adapter->pdev->dev, buffer_info->dma,
751 buffer_info->length, DMA_FROM_DEVICE);
752 buffer_info->mapped = false;
753 }
754 if (buffer_info->skb) {
755 dev_kfree_skb_any(buffer_info->skb);
756 buffer_info->skb = NULL;
757 }
758}
759
760/**
761 * pch_gbe_clean_tx_ring - Free Tx Buffers
762 * @adapter: Board private structure
763 * @tx_ring: Ring to be cleaned
764 */
765static void pch_gbe_clean_tx_ring(struct pch_gbe_adapter *adapter,
766 struct pch_gbe_tx_ring *tx_ring)
767{
768 struct pch_gbe_hw *hw = &adapter->hw;
769 struct pch_gbe_buffer *buffer_info;
770 unsigned long size;
771 unsigned int i;
772
773 /* Free all the Tx ring sk_buffs */
774 for (i = 0; i < tx_ring->count; i++) {
775 buffer_info = &tx_ring->buffer_info[i];
776 pch_gbe_unmap_and_free_tx_resource(adapter, buffer_info);
777 }
778 pr_debug("call pch_gbe_unmap_and_free_tx_resource() %d count\n", i);
779
780 size = (unsigned long)sizeof(struct pch_gbe_buffer) * tx_ring->count;
781 memset(tx_ring->buffer_info, 0, size);
782
783 /* Zero out the descriptor ring */
784 memset(tx_ring->desc, 0, tx_ring->size);
785 tx_ring->next_to_use = 0;
786 tx_ring->next_to_clean = 0;
787 iowrite32(tx_ring->dma, &hw->reg->TX_DSC_HW_P);
788 iowrite32((tx_ring->size - 0x10), &hw->reg->TX_DSC_SIZE);
789}
790
791/**
792 * pch_gbe_clean_rx_ring - Free Rx Buffers
793 * @adapter: Board private structure
794 * @rx_ring: Ring to free buffers from
795 */
796static void
797pch_gbe_clean_rx_ring(struct pch_gbe_adapter *adapter,
798 struct pch_gbe_rx_ring *rx_ring)
799{
800 struct pch_gbe_hw *hw = &adapter->hw;
801 struct pch_gbe_buffer *buffer_info;
802 unsigned long size;
803 unsigned int i;
804
805 /* Free all the Rx ring sk_buffs */
806 for (i = 0; i < rx_ring->count; i++) {
807 buffer_info = &rx_ring->buffer_info[i];
808 pch_gbe_unmap_and_free_rx_resource(adapter, buffer_info);
809 }
810 pr_debug("call pch_gbe_unmap_and_free_rx_resource() %d count\n", i);
811 size = (unsigned long)sizeof(struct pch_gbe_buffer) * rx_ring->count;
812 memset(rx_ring->buffer_info, 0, size);
813
814 /* Zero out the descriptor ring */
815 memset(rx_ring->desc, 0, rx_ring->size);
816 rx_ring->next_to_clean = 0;
817 rx_ring->next_to_use = 0;
818 iowrite32(rx_ring->dma, &hw->reg->RX_DSC_HW_P);
819 iowrite32((rx_ring->size - 0x10), &hw->reg->RX_DSC_SIZE);
820}
821
822static void pch_gbe_set_rgmii_ctrl(struct pch_gbe_adapter *adapter, u16 speed,
823 u16 duplex)
824{
825 struct pch_gbe_hw *hw = &adapter->hw;
826 unsigned long rgmii = 0;
827
828 /* Set the RGMII control. */
829#ifdef PCH_GBE_MAC_IFOP_RGMII
830 switch (speed) {
831 case SPEED_10:
832 rgmii = (PCH_GBE_RGMII_RATE_2_5M |
833 PCH_GBE_MAC_RGMII_CTRL_SETTING);
834 break;
835 case SPEED_100:
836 rgmii = (PCH_GBE_RGMII_RATE_25M |
837 PCH_GBE_MAC_RGMII_CTRL_SETTING);
838 break;
839 case SPEED_1000:
840 rgmii = (PCH_GBE_RGMII_RATE_125M |
841 PCH_GBE_MAC_RGMII_CTRL_SETTING);
842 break;
843 }
844 iowrite32(rgmii, &hw->reg->RGMII_CTRL);
845#else /* GMII */
846 rgmii = 0;
847 iowrite32(rgmii, &hw->reg->RGMII_CTRL);
848#endif
849}
850static void pch_gbe_set_mode(struct pch_gbe_adapter *adapter, u16 speed,
851 u16 duplex)
852{
853 struct net_device *netdev = adapter->netdev;
854 struct pch_gbe_hw *hw = &adapter->hw;
855 unsigned long mode = 0;
856
857 /* Set the communication mode */
858 switch (speed) {
859 case SPEED_10:
860 mode = PCH_GBE_MODE_MII_ETHER;
861 netdev->tx_queue_len = 10;
862 break;
863 case SPEED_100:
864 mode = PCH_GBE_MODE_MII_ETHER;
865 netdev->tx_queue_len = 100;
866 break;
867 case SPEED_1000:
868 mode = PCH_GBE_MODE_GMII_ETHER;
869 break;
870 }
871 if (duplex == DUPLEX_FULL)
872 mode |= PCH_GBE_MODE_FULL_DUPLEX;
873 else
874 mode |= PCH_GBE_MODE_HALF_DUPLEX;
875 iowrite32(mode, &hw->reg->MODE);
876}
877
878/**
879 * pch_gbe_watchdog - Watchdog process
880 * @data: Board private structure
881 */
882static void pch_gbe_watchdog(unsigned long data)
883{
884 struct pch_gbe_adapter *adapter = (struct pch_gbe_adapter *)data;
885 struct net_device *netdev = adapter->netdev;
886 struct pch_gbe_hw *hw = &adapter->hw;
887 struct ethtool_cmd cmd;
888
889 pr_debug("right now = %ld\n", jiffies);
890
891 pch_gbe_update_stats(adapter);
892 if ((mii_link_ok(&adapter->mii)) && (!netif_carrier_ok(netdev))) {
893 netdev->tx_queue_len = adapter->tx_queue_len;
894 /* mii library handles link maintenance tasks */
895 if (mii_ethtool_gset(&adapter->mii, &cmd)) {
896 pr_err("ethtool get setting Error\n");
897 mod_timer(&adapter->watchdog_timer,
898 round_jiffies(jiffies +
899 PCH_GBE_WATCHDOG_PERIOD));
900 return;
901 }
902 hw->mac.link_speed = cmd.speed;
903 hw->mac.link_duplex = cmd.duplex;
904 /* Set the RGMII control. */
905 pch_gbe_set_rgmii_ctrl(adapter, hw->mac.link_speed,
906 hw->mac.link_duplex);
907 /* Set the communication mode */
908 pch_gbe_set_mode(adapter, hw->mac.link_speed,
909 hw->mac.link_duplex);
910 netdev_dbg(netdev,
911 "Link is Up %d Mbps %s-Duplex\n",
912 cmd.speed,
913 cmd.duplex == DUPLEX_FULL ? "Full" : "Half");
914 netif_carrier_on(netdev);
915 netif_wake_queue(netdev);
916 } else if ((!mii_link_ok(&adapter->mii)) &&
917 (netif_carrier_ok(netdev))) {
918 netdev_dbg(netdev, "NIC Link is Down\n");
919 hw->mac.link_speed = SPEED_10;
920 hw->mac.link_duplex = DUPLEX_HALF;
921 netif_carrier_off(netdev);
922 netif_stop_queue(netdev);
923 }
924 mod_timer(&adapter->watchdog_timer,
925 round_jiffies(jiffies + PCH_GBE_WATCHDOG_PERIOD));
926}
927
928/**
929 * pch_gbe_tx_queue - Carry out queuing of the transmission data
930 * @adapter: Board private structure
931 * @tx_ring: Tx descriptor ring structure
932 * @skb: Sockt buffer structure
933 */
934static void pch_gbe_tx_queue(struct pch_gbe_adapter *adapter,
935 struct pch_gbe_tx_ring *tx_ring,
936 struct sk_buff *skb)
937{
938 struct pch_gbe_hw *hw = &adapter->hw;
939 struct pch_gbe_tx_desc *tx_desc;
940 struct pch_gbe_buffer *buffer_info;
941 struct sk_buff *tmp_skb;
942 unsigned int frame_ctrl;
943 unsigned int ring_num;
944 unsigned long flags;
945
946 /*-- Set frame control --*/
947 frame_ctrl = 0;
948 if (unlikely(skb->len < PCH_GBE_SHORT_PKT))
949 frame_ctrl |= PCH_GBE_TXD_CTRL_APAD;
950 if (unlikely(!adapter->tx_csum))
951 frame_ctrl |= PCH_GBE_TXD_CTRL_TCPIP_ACC_OFF;
952
953 /* Performs checksum processing */
954 /*
955 * It is because the hardware accelerator does not support a checksum,
956 * when the received data size is less than 64 bytes.
957 */
958 if ((skb->len < PCH_GBE_SHORT_PKT) && (adapter->tx_csum)) {
959 frame_ctrl |= PCH_GBE_TXD_CTRL_APAD |
960 PCH_GBE_TXD_CTRL_TCPIP_ACC_OFF;
961 if (skb->protocol == htons(ETH_P_IP)) {
962 struct iphdr *iph = ip_hdr(skb);
963 unsigned int offset;
964 iph->check = 0;
965 iph->check = ip_fast_csum((u8 *) iph, iph->ihl);
966 offset = skb_transport_offset(skb);
967 if (iph->protocol == IPPROTO_TCP) {
968 skb->csum = 0;
969 tcp_hdr(skb)->check = 0;
970 skb->csum = skb_checksum(skb, offset,
971 skb->len - offset, 0);
972 tcp_hdr(skb)->check =
973 csum_tcpudp_magic(iph->saddr,
974 iph->daddr,
975 skb->len - offset,
976 IPPROTO_TCP,
977 skb->csum);
978 } else if (iph->protocol == IPPROTO_UDP) {
979 skb->csum = 0;
980 udp_hdr(skb)->check = 0;
981 skb->csum =
982 skb_checksum(skb, offset,
983 skb->len - offset, 0);
984 udp_hdr(skb)->check =
985 csum_tcpudp_magic(iph->saddr,
986 iph->daddr,
987 skb->len - offset,
988 IPPROTO_UDP,
989 skb->csum);
990 }
991 }
992 }
993 spin_lock_irqsave(&tx_ring->tx_lock, flags);
994 ring_num = tx_ring->next_to_use;
995 if (unlikely((ring_num + 1) == tx_ring->count))
996 tx_ring->next_to_use = 0;
997 else
998 tx_ring->next_to_use = ring_num + 1;
999
1000 spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
1001 buffer_info = &tx_ring->buffer_info[ring_num];
1002 tmp_skb = buffer_info->skb;
1003
1004 /* [Header:14][payload] ---> [Header:14][paddong:2][payload] */
1005 memcpy(tmp_skb->data, skb->data, ETH_HLEN);
1006 tmp_skb->data[ETH_HLEN] = 0x00;
1007 tmp_skb->data[ETH_HLEN + 1] = 0x00;
1008 tmp_skb->len = skb->len;
1009 memcpy(&tmp_skb->data[ETH_HLEN + 2], &skb->data[ETH_HLEN],
1010 (skb->len - ETH_HLEN));
1011 /*-- Set Buffer infomation --*/
1012 buffer_info->length = tmp_skb->len;
1013 buffer_info->dma = dma_map_single(&adapter->pdev->dev, tmp_skb->data,
1014 buffer_info->length,
1015 DMA_TO_DEVICE);
1016 if (dma_mapping_error(&adapter->pdev->dev, buffer_info->dma)) {
1017 pr_err("TX DMA map failed\n");
1018 buffer_info->dma = 0;
1019 buffer_info->time_stamp = 0;
1020 tx_ring->next_to_use = ring_num;
1021 return;
1022 }
1023 buffer_info->mapped = true;
1024 buffer_info->time_stamp = jiffies;
1025
1026 /*-- Set Tx descriptor --*/
1027 tx_desc = PCH_GBE_TX_DESC(*tx_ring, ring_num);
1028 tx_desc->buffer_addr = (buffer_info->dma);
1029 tx_desc->length = (tmp_skb->len);
1030 tx_desc->tx_words_eob = ((tmp_skb->len + 3));
1031 tx_desc->tx_frame_ctrl = (frame_ctrl);
1032 tx_desc->gbec_status = (DSC_INIT16);
1033
1034 if (unlikely(++ring_num == tx_ring->count))
1035 ring_num = 0;
1036
1037 /* Update software pointer of TX descriptor */
1038 iowrite32(tx_ring->dma +
1039 (int)sizeof(struct pch_gbe_tx_desc) * ring_num,
1040 &hw->reg->TX_DSC_SW_P);
1041 dev_kfree_skb_any(skb);
1042}
1043
1044/**
1045 * pch_gbe_update_stats - Update the board statistics counters
1046 * @adapter: Board private structure
1047 */
1048void pch_gbe_update_stats(struct pch_gbe_adapter *adapter)
1049{
1050 struct net_device *netdev = adapter->netdev;
1051 struct pci_dev *pdev = adapter->pdev;
1052 struct pch_gbe_hw_stats *stats = &adapter->stats;
1053 unsigned long flags;
1054
1055 /*
1056 * Prevent stats update while adapter is being reset, or if the pci
1057 * connection is down.
1058 */
1059 if ((pdev->error_state) && (pdev->error_state != pci_channel_io_normal))
1060 return;
1061
1062 spin_lock_irqsave(&adapter->stats_lock, flags);
1063
1064 /* Update device status "adapter->stats" */
1065 stats->rx_errors = stats->rx_crc_errors + stats->rx_frame_errors;
1066 stats->tx_errors = stats->tx_length_errors +
1067 stats->tx_aborted_errors +
1068 stats->tx_carrier_errors + stats->tx_timeout_count;
1069
1070 /* Update network device status "adapter->net_stats" */
1071 netdev->stats.rx_packets = stats->rx_packets;
1072 netdev->stats.rx_bytes = stats->rx_bytes;
1073 netdev->stats.rx_dropped = stats->rx_dropped;
1074 netdev->stats.tx_packets = stats->tx_packets;
1075 netdev->stats.tx_bytes = stats->tx_bytes;
1076 netdev->stats.tx_dropped = stats->tx_dropped;
1077 /* Fill out the OS statistics structure */
1078 netdev->stats.multicast = stats->multicast;
1079 netdev->stats.collisions = stats->collisions;
1080 /* Rx Errors */
1081 netdev->stats.rx_errors = stats->rx_errors;
1082 netdev->stats.rx_crc_errors = stats->rx_crc_errors;
1083 netdev->stats.rx_frame_errors = stats->rx_frame_errors;
1084 /* Tx Errors */
1085 netdev->stats.tx_errors = stats->tx_errors;
1086 netdev->stats.tx_aborted_errors = stats->tx_aborted_errors;
1087 netdev->stats.tx_carrier_errors = stats->tx_carrier_errors;
1088
1089 spin_unlock_irqrestore(&adapter->stats_lock, flags);
1090}
1091
1092/**
1093 * pch_gbe_intr - Interrupt Handler
1094 * @irq: Interrupt number
1095 * @data: Pointer to a network interface device structure
1096 * Returns
1097 * - IRQ_HANDLED: Our interrupt
1098 * - IRQ_NONE: Not our interrupt
1099 */
1100static irqreturn_t pch_gbe_intr(int irq, void *data)
1101{
1102 struct net_device *netdev = data;
1103 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
1104 struct pch_gbe_hw *hw = &adapter->hw;
1105 u32 int_st;
1106 u32 int_en;
1107
1108 /* Check request status */
1109 int_st = ioread32(&hw->reg->INT_ST);
1110 int_st = int_st & ioread32(&hw->reg->INT_EN);
1111 /* When request status is no interruption factor */
1112 if (unlikely(!int_st))
1113 return IRQ_NONE; /* Not our interrupt. End processing. */
1114 pr_debug("%s occur int_st = 0x%08x\n", __func__, int_st);
1115 if (int_st & PCH_GBE_INT_RX_FRAME_ERR)
1116 adapter->stats.intr_rx_frame_err_count++;
1117 if (int_st & PCH_GBE_INT_RX_FIFO_ERR)
1118 adapter->stats.intr_rx_fifo_err_count++;
1119 if (int_st & PCH_GBE_INT_RX_DMA_ERR)
1120 adapter->stats.intr_rx_dma_err_count++;
1121 if (int_st & PCH_GBE_INT_TX_FIFO_ERR)
1122 adapter->stats.intr_tx_fifo_err_count++;
1123 if (int_st & PCH_GBE_INT_TX_DMA_ERR)
1124 adapter->stats.intr_tx_dma_err_count++;
1125 if (int_st & PCH_GBE_INT_TCPIP_ERR)
1126 adapter->stats.intr_tcpip_err_count++;
1127 /* When Rx descriptor is empty */
1128 if ((int_st & PCH_GBE_INT_RX_DSC_EMP)) {
1129 adapter->stats.intr_rx_dsc_empty_count++;
1130 pr_err("Rx descriptor is empty\n");
1131 int_en = ioread32(&hw->reg->INT_EN);
1132 iowrite32((int_en & ~PCH_GBE_INT_RX_DSC_EMP), &hw->reg->INT_EN);
1133 if (hw->mac.tx_fc_enable) {
1134 /* Set Pause packet */
1135 pch_gbe_mac_set_pause_packet(hw);
1136 }
1137 if ((int_en & (PCH_GBE_INT_RX_DMA_CMPLT | PCH_GBE_INT_TX_CMPLT))
1138 == 0) {
1139 return IRQ_HANDLED;
1140 }
1141 }
1142
1143 /* When request status is Receive interruption */
1144 if ((int_st & (PCH_GBE_INT_RX_DMA_CMPLT | PCH_GBE_INT_TX_CMPLT))) {
1145 if (likely(napi_schedule_prep(&adapter->napi))) {
1146 /* Enable only Rx Descriptor empty */
1147 atomic_inc(&adapter->irq_sem);
1148 int_en = ioread32(&hw->reg->INT_EN);
1149 int_en &=
1150 ~(PCH_GBE_INT_RX_DMA_CMPLT | PCH_GBE_INT_TX_CMPLT);
1151 iowrite32(int_en, &hw->reg->INT_EN);
1152 /* Start polling for NAPI */
1153 __napi_schedule(&adapter->napi);
1154 }
1155 }
1156 pr_debug("return = 0x%08x INT_EN reg = 0x%08x\n",
1157 IRQ_HANDLED, ioread32(&hw->reg->INT_EN));
1158 return IRQ_HANDLED;
1159}
1160
1161/**
1162 * pch_gbe_alloc_rx_buffers - Replace used receive buffers; legacy & extended
1163 * @adapter: Board private structure
1164 * @rx_ring: Rx descriptor ring
1165 * @cleaned_count: Cleaned count
1166 */
1167static void
1168pch_gbe_alloc_rx_buffers(struct pch_gbe_adapter *adapter,
1169 struct pch_gbe_rx_ring *rx_ring, int cleaned_count)
1170{
1171 struct net_device *netdev = adapter->netdev;
1172 struct pci_dev *pdev = adapter->pdev;
1173 struct pch_gbe_hw *hw = &adapter->hw;
1174 struct pch_gbe_rx_desc *rx_desc;
1175 struct pch_gbe_buffer *buffer_info;
1176 struct sk_buff *skb;
1177 unsigned int i;
1178 unsigned int bufsz;
1179
1180 bufsz = adapter->rx_buffer_len + PCH_GBE_DMA_ALIGN;
1181 i = rx_ring->next_to_use;
1182
1183 while ((cleaned_count--)) {
1184 buffer_info = &rx_ring->buffer_info[i];
1185 skb = buffer_info->skb;
1186 if (skb) {
1187 skb_trim(skb, 0);
1188 } else {
1189 skb = netdev_alloc_skb(netdev, bufsz);
1190 if (unlikely(!skb)) {
1191 /* Better luck next round */
1192 adapter->stats.rx_alloc_buff_failed++;
1193 break;
1194 }
1195 /* 64byte align */
1196 skb_reserve(skb, PCH_GBE_DMA_ALIGN);
1197
1198 buffer_info->skb = skb;
1199 buffer_info->length = adapter->rx_buffer_len;
1200 }
1201 buffer_info->dma = dma_map_single(&pdev->dev,
1202 skb->data,
1203 buffer_info->length,
1204 DMA_FROM_DEVICE);
1205 if (dma_mapping_error(&adapter->pdev->dev, buffer_info->dma)) {
1206 dev_kfree_skb(skb);
1207 buffer_info->skb = NULL;
1208 buffer_info->dma = 0;
1209 adapter->stats.rx_alloc_buff_failed++;
1210 break; /* while !buffer_info->skb */
1211 }
1212 buffer_info->mapped = true;
1213 rx_desc = PCH_GBE_RX_DESC(*rx_ring, i);
1214 rx_desc->buffer_addr = (buffer_info->dma);
1215 rx_desc->gbec_status = DSC_INIT16;
1216
1217 pr_debug("i = %d buffer_info->dma = 0x08%llx buffer_info->length = 0x%x\n",
1218 i, (unsigned long long)buffer_info->dma,
1219 buffer_info->length);
1220
1221 if (unlikely(++i == rx_ring->count))
1222 i = 0;
1223 }
1224 if (likely(rx_ring->next_to_use != i)) {
1225 rx_ring->next_to_use = i;
1226 if (unlikely(i-- == 0))
1227 i = (rx_ring->count - 1);
1228 iowrite32(rx_ring->dma +
1229 (int)sizeof(struct pch_gbe_rx_desc) * i,
1230 &hw->reg->RX_DSC_SW_P);
1231 }
1232 return;
1233}
1234
1235/**
1236 * pch_gbe_alloc_tx_buffers - Allocate transmit buffers
1237 * @adapter: Board private structure
1238 * @tx_ring: Tx descriptor ring
1239 */
1240static void pch_gbe_alloc_tx_buffers(struct pch_gbe_adapter *adapter,
1241 struct pch_gbe_tx_ring *tx_ring)
1242{
1243 struct pch_gbe_buffer *buffer_info;
1244 struct sk_buff *skb;
1245 unsigned int i;
1246 unsigned int bufsz;
1247 struct pch_gbe_tx_desc *tx_desc;
1248
1249 bufsz =
1250 adapter->hw.mac.max_frame_size + PCH_GBE_DMA_ALIGN + NET_IP_ALIGN;
1251
1252 for (i = 0; i < tx_ring->count; i++) {
1253 buffer_info = &tx_ring->buffer_info[i];
1254 skb = netdev_alloc_skb(adapter->netdev, bufsz);
1255 skb_reserve(skb, PCH_GBE_DMA_ALIGN);
1256 buffer_info->skb = skb;
1257 tx_desc = PCH_GBE_TX_DESC(*tx_ring, i);
1258 tx_desc->gbec_status = (DSC_INIT16);
1259 }
1260 return;
1261}
1262
1263/**
1264 * pch_gbe_clean_tx - Reclaim resources after transmit completes
1265 * @adapter: Board private structure
1266 * @tx_ring: Tx descriptor ring
1267 * Returns
1268 * true: Cleaned the descriptor
1269 * false: Not cleaned the descriptor
1270 */
1271static bool
1272pch_gbe_clean_tx(struct pch_gbe_adapter *adapter,
1273 struct pch_gbe_tx_ring *tx_ring)
1274{
1275 struct pch_gbe_tx_desc *tx_desc;
1276 struct pch_gbe_buffer *buffer_info;
1277 struct sk_buff *skb;
1278 unsigned int i;
1279 unsigned int cleaned_count = 0;
1280 bool cleaned = false;
1281
1282 pr_debug("next_to_clean : %d\n", tx_ring->next_to_clean);
1283
1284 i = tx_ring->next_to_clean;
1285 tx_desc = PCH_GBE_TX_DESC(*tx_ring, i);
1286 pr_debug("gbec_status:0x%04x dma_status:0x%04x\n",
1287 tx_desc->gbec_status, tx_desc->dma_status);
1288
1289 while ((tx_desc->gbec_status & DSC_INIT16) == 0x0000) {
1290 pr_debug("gbec_status:0x%04x\n", tx_desc->gbec_status);
1291 cleaned = true;
1292 buffer_info = &tx_ring->buffer_info[i];
1293 skb = buffer_info->skb;
1294
1295 if ((tx_desc->gbec_status & PCH_GBE_TXD_GMAC_STAT_ABT)) {
1296 adapter->stats.tx_aborted_errors++;
1297 pr_err("Transfer Abort Error\n");
1298 } else if ((tx_desc->gbec_status & PCH_GBE_TXD_GMAC_STAT_CRSER)
1299 ) {
1300 adapter->stats.tx_carrier_errors++;
1301 pr_err("Transfer Carrier Sense Error\n");
1302 } else if ((tx_desc->gbec_status & PCH_GBE_TXD_GMAC_STAT_EXCOL)
1303 ) {
1304 adapter->stats.tx_aborted_errors++;
1305 pr_err("Transfer Collision Abort Error\n");
1306 } else if ((tx_desc->gbec_status &
1307 (PCH_GBE_TXD_GMAC_STAT_SNGCOL |
1308 PCH_GBE_TXD_GMAC_STAT_MLTCOL))) {
1309 adapter->stats.collisions++;
1310 adapter->stats.tx_packets++;
1311 adapter->stats.tx_bytes += skb->len;
1312 pr_debug("Transfer Collision\n");
1313 } else if ((tx_desc->gbec_status & PCH_GBE_TXD_GMAC_STAT_CMPLT)
1314 ) {
1315 adapter->stats.tx_packets++;
1316 adapter->stats.tx_bytes += skb->len;
1317 }
1318 if (buffer_info->mapped) {
1319 pr_debug("unmap buffer_info->dma : %d\n", i);
1320 dma_unmap_single(&adapter->pdev->dev, buffer_info->dma,
1321 buffer_info->length, DMA_TO_DEVICE);
1322 buffer_info->mapped = false;
1323 }
1324 if (buffer_info->skb) {
1325 pr_debug("trim buffer_info->skb : %d\n", i);
1326 skb_trim(buffer_info->skb, 0);
1327 }
1328 tx_desc->gbec_status = DSC_INIT16;
1329 if (unlikely(++i == tx_ring->count))
1330 i = 0;
1331 tx_desc = PCH_GBE_TX_DESC(*tx_ring, i);
1332
1333 /* weight of a sort for tx, to avoid endless transmit cleanup */
1334 if (cleaned_count++ == PCH_GBE_TX_WEIGHT)
1335 break;
1336 }
1337 pr_debug("called pch_gbe_unmap_and_free_tx_resource() %d count\n",
1338 cleaned_count);
1339 /* Recover from running out of Tx resources in xmit_frame */
1340 if (unlikely(cleaned && (netif_queue_stopped(adapter->netdev)))) {
1341 netif_wake_queue(adapter->netdev);
1342 adapter->stats.tx_restart_count++;
1343 pr_debug("Tx wake queue\n");
1344 }
1345 spin_lock(&adapter->tx_queue_lock);
1346 tx_ring->next_to_clean = i;
1347 spin_unlock(&adapter->tx_queue_lock);
1348 pr_debug("next_to_clean : %d\n", tx_ring->next_to_clean);
1349 return cleaned;
1350}
1351
1352/**
1353 * pch_gbe_clean_rx - Send received data up the network stack; legacy
1354 * @adapter: Board private structure
1355 * @rx_ring: Rx descriptor ring
1356 * @work_done: Completed count
1357 * @work_to_do: Request count
1358 * Returns
1359 * true: Cleaned the descriptor
1360 * false: Not cleaned the descriptor
1361 */
1362static bool
1363pch_gbe_clean_rx(struct pch_gbe_adapter *adapter,
1364 struct pch_gbe_rx_ring *rx_ring,
1365 int *work_done, int work_to_do)
1366{
1367 struct net_device *netdev = adapter->netdev;
1368 struct pci_dev *pdev = adapter->pdev;
1369 struct pch_gbe_buffer *buffer_info;
1370 struct pch_gbe_rx_desc *rx_desc;
1371 u32 length;
1372 unsigned char tmp_packet[ETH_HLEN];
1373 unsigned int i;
1374 unsigned int cleaned_count = 0;
1375 bool cleaned = false;
1376 struct sk_buff *skb;
1377 u8 dma_status;
1378 u16 gbec_status;
1379 u32 tcp_ip_status;
1380 u8 skb_copy_flag = 0;
1381 u8 skb_padding_flag = 0;
1382
1383 i = rx_ring->next_to_clean;
1384
1385 while (*work_done < work_to_do) {
1386 /* Check Rx descriptor status */
1387 rx_desc = PCH_GBE_RX_DESC(*rx_ring, i);
1388 if (rx_desc->gbec_status == DSC_INIT16)
1389 break;
1390 cleaned = true;
1391 cleaned_count++;
1392
1393 dma_status = rx_desc->dma_status;
1394 gbec_status = rx_desc->gbec_status;
1395 tcp_ip_status = rx_desc->tcp_ip_status;
1396 rx_desc->gbec_status = DSC_INIT16;
1397 buffer_info = &rx_ring->buffer_info[i];
1398 skb = buffer_info->skb;
1399
1400 /* unmap dma */
1401 dma_unmap_single(&pdev->dev, buffer_info->dma,
1402 buffer_info->length, DMA_FROM_DEVICE);
1403 buffer_info->mapped = false;
1404 /* Prefetch the packet */
1405 prefetch(skb->data);
1406
1407 pr_debug("RxDecNo = 0x%04x Status[DMA:0x%02x GBE:0x%04x "
1408 "TCP:0x%08x] BufInf = 0x%p\n",
1409 i, dma_status, gbec_status, tcp_ip_status,
1410 buffer_info);
1411 /* Error check */
1412 if (unlikely(gbec_status & PCH_GBE_RXD_GMAC_STAT_NOTOCTAL)) {
1413 adapter->stats.rx_frame_errors++;
1414 pr_err("Receive Not Octal Error\n");
1415 } else if (unlikely(gbec_status &
1416 PCH_GBE_RXD_GMAC_STAT_NBLERR)) {
1417 adapter->stats.rx_frame_errors++;
1418 pr_err("Receive Nibble Error\n");
1419 } else if (unlikely(gbec_status &
1420 PCH_GBE_RXD_GMAC_STAT_CRCERR)) {
1421 adapter->stats.rx_crc_errors++;
1422 pr_err("Receive CRC Error\n");
1423 } else {
1424 /* get receive length */
1425 /* length convert[-3], padding[-2] */
1426 length = (rx_desc->rx_words_eob) - 3 - 2;
1427
1428 /* Decide the data conversion method */
1429 if (!adapter->rx_csum) {
1430 /* [Header:14][payload] */
1431 skb_padding_flag = 0;
1432 skb_copy_flag = 1;
1433 } else {
1434 /* [Header:14][padding:2][payload] */
1435 skb_padding_flag = 1;
1436 if (length < copybreak)
1437 skb_copy_flag = 1;
1438 else
1439 skb_copy_flag = 0;
1440 }
1441
1442 /* Data conversion */
1443 if (skb_copy_flag) { /* recycle skb */
1444 struct sk_buff *new_skb;
1445 new_skb =
1446 netdev_alloc_skb(netdev,
1447 length + NET_IP_ALIGN);
1448 if (new_skb) {
1449 if (!skb_padding_flag) {
1450 skb_reserve(new_skb,
1451 NET_IP_ALIGN);
1452 }
1453 memcpy(new_skb->data, skb->data,
1454 length);
1455 /* save the skb
1456 * in buffer_info as good */
1457 skb = new_skb;
1458 } else if (!skb_padding_flag) {
1459 /* dorrop error */
1460 pr_err("New skb allocation Error\n");
1461 goto dorrop;
1462 }
1463 } else {
1464 buffer_info->skb = NULL;
1465 }
1466 if (skb_padding_flag) {
1467 memcpy(&tmp_packet[0], &skb->data[0], ETH_HLEN);
1468 memcpy(&skb->data[NET_IP_ALIGN], &tmp_packet[0],
1469 ETH_HLEN);
1470 skb_reserve(skb, NET_IP_ALIGN);
1471
1472 }
1473
1474 /* update status of driver */
1475 adapter->stats.rx_bytes += length;
1476 adapter->stats.rx_packets++;
1477 if ((gbec_status & PCH_GBE_RXD_GMAC_STAT_MARMLT))
1478 adapter->stats.multicast++;
1479 /* Write meta date of skb */
1480 skb_put(skb, length);
1481 skb->protocol = eth_type_trans(skb, netdev);
1482 if ((tcp_ip_status & PCH_GBE_RXD_ACC_STAT_TCPIPOK) ==
1483 PCH_GBE_RXD_ACC_STAT_TCPIPOK) {
1484 skb->ip_summed = CHECKSUM_UNNECESSARY;
1485 } else {
1486 skb->ip_summed = CHECKSUM_NONE;
1487 }
1488 napi_gro_receive(&adapter->napi, skb);
1489 (*work_done)++;
1490 pr_debug("Receive skb->ip_summed: %d length: %d\n",
1491 skb->ip_summed, length);
1492 }
1493dorrop:
1494 /* return some buffers to hardware, one at a time is too slow */
1495 if (unlikely(cleaned_count >= PCH_GBE_RX_BUFFER_WRITE)) {
1496 pch_gbe_alloc_rx_buffers(adapter, rx_ring,
1497 cleaned_count);
1498 cleaned_count = 0;
1499 }
1500 if (++i == rx_ring->count)
1501 i = 0;
1502 }
1503 rx_ring->next_to_clean = i;
1504 if (cleaned_count)
1505 pch_gbe_alloc_rx_buffers(adapter, rx_ring, cleaned_count);
1506 return cleaned;
1507}
1508
1509/**
1510 * pch_gbe_setup_tx_resources - Allocate Tx resources (Descriptors)
1511 * @adapter: Board private structure
1512 * @tx_ring: Tx descriptor ring (for a specific queue) to setup
1513 * Returns
1514 * 0: Successfully
1515 * Negative value: Failed
1516 */
1517int pch_gbe_setup_tx_resources(struct pch_gbe_adapter *adapter,
1518 struct pch_gbe_tx_ring *tx_ring)
1519{
1520 struct pci_dev *pdev = adapter->pdev;
1521 struct pch_gbe_tx_desc *tx_desc;
1522 int size;
1523 int desNo;
1524
1525 size = (int)sizeof(struct pch_gbe_buffer) * tx_ring->count;
1526 tx_ring->buffer_info = vmalloc(size);
1527 if (!tx_ring->buffer_info) {
1528 pr_err("Unable to allocate memory for the buffer infomation\n");
1529 return -ENOMEM;
1530 }
1531 memset(tx_ring->buffer_info, 0, size);
1532
1533 tx_ring->size = tx_ring->count * (int)sizeof(struct pch_gbe_tx_desc);
1534
1535 tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size,
1536 &tx_ring->dma, GFP_KERNEL);
1537 if (!tx_ring->desc) {
1538 vfree(tx_ring->buffer_info);
1539 pr_err("Unable to allocate memory for the transmit descriptor ring\n");
1540 return -ENOMEM;
1541 }
1542 memset(tx_ring->desc, 0, tx_ring->size);
1543
1544 tx_ring->next_to_use = 0;
1545 tx_ring->next_to_clean = 0;
1546 spin_lock_init(&tx_ring->tx_lock);
1547
1548 for (desNo = 0; desNo < tx_ring->count; desNo++) {
1549 tx_desc = PCH_GBE_TX_DESC(*tx_ring, desNo);
1550 tx_desc->gbec_status = DSC_INIT16;
1551 }
1552 pr_debug("tx_ring->desc = 0x%p tx_ring->dma = 0x%08llx\n"
1553 "next_to_clean = 0x%08x next_to_use = 0x%08x\n",
1554 tx_ring->desc, (unsigned long long)tx_ring->dma,
1555 tx_ring->next_to_clean, tx_ring->next_to_use);
1556 return 0;
1557}
1558
1559/**
1560 * pch_gbe_setup_rx_resources - Allocate Rx resources (Descriptors)
1561 * @adapter: Board private structure
1562 * @rx_ring: Rx descriptor ring (for a specific queue) to setup
1563 * Returns
1564 * 0: Successfully
1565 * Negative value: Failed
1566 */
1567int pch_gbe_setup_rx_resources(struct pch_gbe_adapter *adapter,
1568 struct pch_gbe_rx_ring *rx_ring)
1569{
1570 struct pci_dev *pdev = adapter->pdev;
1571 struct pch_gbe_rx_desc *rx_desc;
1572 int size;
1573 int desNo;
1574
1575 size = (int)sizeof(struct pch_gbe_buffer) * rx_ring->count;
1576 rx_ring->buffer_info = vmalloc(size);
1577 if (!rx_ring->buffer_info) {
1578 pr_err("Unable to allocate memory for the receive descriptor ring\n");
1579 return -ENOMEM;
1580 }
1581 memset(rx_ring->buffer_info, 0, size);
1582 rx_ring->size = rx_ring->count * (int)sizeof(struct pch_gbe_rx_desc);
1583 rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size,
1584 &rx_ring->dma, GFP_KERNEL);
1585
1586 if (!rx_ring->desc) {
1587 pr_err("Unable to allocate memory for the receive descriptor ring\n");
1588 vfree(rx_ring->buffer_info);
1589 return -ENOMEM;
1590 }
1591 memset(rx_ring->desc, 0, rx_ring->size);
1592 rx_ring->next_to_clean = 0;
1593 rx_ring->next_to_use = 0;
1594 for (desNo = 0; desNo < rx_ring->count; desNo++) {
1595 rx_desc = PCH_GBE_RX_DESC(*rx_ring, desNo);
1596 rx_desc->gbec_status = DSC_INIT16;
1597 }
1598 pr_debug("rx_ring->desc = 0x%p rx_ring->dma = 0x%08llx "
1599 "next_to_clean = 0x%08x next_to_use = 0x%08x\n",
1600 rx_ring->desc, (unsigned long long)rx_ring->dma,
1601 rx_ring->next_to_clean, rx_ring->next_to_use);
1602 return 0;
1603}
1604
1605/**
1606 * pch_gbe_free_tx_resources - Free Tx Resources
1607 * @adapter: Board private structure
1608 * @tx_ring: Tx descriptor ring for a specific queue
1609 */
1610void pch_gbe_free_tx_resources(struct pch_gbe_adapter *adapter,
1611 struct pch_gbe_tx_ring *tx_ring)
1612{
1613 struct pci_dev *pdev = adapter->pdev;
1614
1615 pch_gbe_clean_tx_ring(adapter, tx_ring);
1616 vfree(tx_ring->buffer_info);
1617 tx_ring->buffer_info = NULL;
1618 pci_free_consistent(pdev, tx_ring->size, tx_ring->desc, tx_ring->dma);
1619 tx_ring->desc = NULL;
1620}
1621
1622/**
1623 * pch_gbe_free_rx_resources - Free Rx Resources
1624 * @adapter: Board private structure
1625 * @rx_ring: Ring to clean the resources from
1626 */
1627void pch_gbe_free_rx_resources(struct pch_gbe_adapter *adapter,
1628 struct pch_gbe_rx_ring *rx_ring)
1629{
1630 struct pci_dev *pdev = adapter->pdev;
1631
1632 pch_gbe_clean_rx_ring(adapter, rx_ring);
1633 vfree(rx_ring->buffer_info);
1634 rx_ring->buffer_info = NULL;
1635 pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, rx_ring->dma);
1636 rx_ring->desc = NULL;
1637}
1638
1639/**
1640 * pch_gbe_request_irq - Allocate an interrupt line
1641 * @adapter: Board private structure
1642 * Returns
1643 * 0: Successfully
1644 * Negative value: Failed
1645 */
1646static int pch_gbe_request_irq(struct pch_gbe_adapter *adapter)
1647{
1648 struct net_device *netdev = adapter->netdev;
1649 int err;
1650 int flags;
1651
1652 flags = IRQF_SHARED;
1653 adapter->have_msi = false;
1654 err = pci_enable_msi(adapter->pdev);
1655 pr_debug("call pci_enable_msi\n");
1656 if (err) {
1657 pr_debug("call pci_enable_msi - Error: %d\n", err);
1658 } else {
1659 flags = 0;
1660 adapter->have_msi = true;
1661 }
1662 err = request_irq(adapter->pdev->irq, &pch_gbe_intr,
1663 flags, netdev->name, netdev);
1664 if (err)
1665 pr_err("Unable to allocate interrupt Error: %d\n", err);
1666 pr_debug("adapter->have_msi : %d flags : 0x%04x return : 0x%04x\n",
1667 adapter->have_msi, flags, err);
1668 return err;
1669}
1670
1671
1672static void pch_gbe_set_multi(struct net_device *netdev);
1673/**
1674 * pch_gbe_up - Up GbE network device
1675 * @adapter: Board private structure
1676 * Returns
1677 * 0: Successfully
1678 * Negative value: Failed
1679 */
1680int pch_gbe_up(struct pch_gbe_adapter *adapter)
1681{
1682 struct net_device *netdev = adapter->netdev;
1683 struct pch_gbe_tx_ring *tx_ring = adapter->tx_ring;
1684 struct pch_gbe_rx_ring *rx_ring = adapter->rx_ring;
1685 int err;
1686
1687 /* hardware has been reset, we need to reload some things */
1688 pch_gbe_set_multi(netdev);
1689
1690 pch_gbe_setup_tctl(adapter);
1691 pch_gbe_configure_tx(adapter);
1692 pch_gbe_setup_rctl(adapter);
1693 pch_gbe_configure_rx(adapter);
1694
1695 err = pch_gbe_request_irq(adapter);
1696 if (err) {
1697 pr_err("Error: can't bring device up\n");
1698 return err;
1699 }
1700 pch_gbe_alloc_tx_buffers(adapter, tx_ring);
1701 pch_gbe_alloc_rx_buffers(adapter, rx_ring, rx_ring->count);
1702 adapter->tx_queue_len = netdev->tx_queue_len;
1703
1704 mod_timer(&adapter->watchdog_timer, jiffies);
1705
1706 napi_enable(&adapter->napi);
1707 pch_gbe_irq_enable(adapter);
1708 netif_start_queue(adapter->netdev);
1709
1710 return 0;
1711}
1712
1713/**
1714 * pch_gbe_down - Down GbE network device
1715 * @adapter: Board private structure
1716 */
1717void pch_gbe_down(struct pch_gbe_adapter *adapter)
1718{
1719 struct net_device *netdev = adapter->netdev;
1720
1721 /* signal that we're down so the interrupt handler does not
1722 * reschedule our watchdog timer */
1723 napi_disable(&adapter->napi);
1724 atomic_set(&adapter->irq_sem, 0);
1725
1726 pch_gbe_irq_disable(adapter);
1727 pch_gbe_free_irq(adapter);
1728
1729 del_timer_sync(&adapter->watchdog_timer);
1730
1731 netdev->tx_queue_len = adapter->tx_queue_len;
1732 netif_carrier_off(netdev);
1733 netif_stop_queue(netdev);
1734
1735 pch_gbe_reset(adapter);
1736 pch_gbe_clean_tx_ring(adapter, adapter->tx_ring);
1737 pch_gbe_clean_rx_ring(adapter, adapter->rx_ring);
1738}
1739
1740/**
1741 * pch_gbe_sw_init - Initialize general software structures (struct pch_gbe_adapter)
1742 * @adapter: Board private structure to initialize
1743 * Returns
1744 * 0: Successfully
1745 * Negative value: Failed
1746 */
1747static int pch_gbe_sw_init(struct pch_gbe_adapter *adapter)
1748{
1749 struct pch_gbe_hw *hw = &adapter->hw;
1750 struct net_device *netdev = adapter->netdev;
1751
1752 adapter->rx_buffer_len = PCH_GBE_FRAME_SIZE_2048;
1753 hw->mac.max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
1754 hw->mac.min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
1755
1756 /* Initialize the hardware-specific values */
1757 if (pch_gbe_hal_setup_init_funcs(hw)) {
1758 pr_err("Hardware Initialization Failure\n");
1759 return -EIO;
1760 }
1761 if (pch_gbe_alloc_queues(adapter)) {
1762 pr_err("Unable to allocate memory for queues\n");
1763 return -ENOMEM;
1764 }
1765 spin_lock_init(&adapter->hw.miim_lock);
1766 spin_lock_init(&adapter->tx_queue_lock);
1767 spin_lock_init(&adapter->stats_lock);
1768 spin_lock_init(&adapter->ethtool_lock);
1769 atomic_set(&adapter->irq_sem, 0);
1770 pch_gbe_irq_disable(adapter);
1771
1772 pch_gbe_init_stats(adapter);
1773
1774 pr_debug("rx_buffer_len : %d mac.min_frame_size : %d mac.max_frame_size : %d\n",
1775 (u32) adapter->rx_buffer_len,
1776 hw->mac.min_frame_size, hw->mac.max_frame_size);
1777 return 0;
1778}
1779
1780/**
1781 * pch_gbe_open - Called when a network interface is made active
1782 * @netdev: Network interface device structure
1783 * Returns
1784 * 0: Successfully
1785 * Negative value: Failed
1786 */
1787static int pch_gbe_open(struct net_device *netdev)
1788{
1789 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
1790 struct pch_gbe_hw *hw = &adapter->hw;
1791 int err;
1792
1793 /* allocate transmit descriptors */
1794 err = pch_gbe_setup_tx_resources(adapter, adapter->tx_ring);
1795 if (err)
1796 goto err_setup_tx;
1797 /* allocate receive descriptors */
1798 err = pch_gbe_setup_rx_resources(adapter, adapter->rx_ring);
1799 if (err)
1800 goto err_setup_rx;
1801 pch_gbe_hal_power_up_phy(hw);
1802 err = pch_gbe_up(adapter);
1803 if (err)
1804 goto err_up;
1805 pr_debug("Success End\n");
1806 return 0;
1807
1808err_up:
1809 if (!adapter->wake_up_evt)
1810 pch_gbe_hal_power_down_phy(hw);
1811 pch_gbe_free_rx_resources(adapter, adapter->rx_ring);
1812err_setup_rx:
1813 pch_gbe_free_tx_resources(adapter, adapter->tx_ring);
1814err_setup_tx:
1815 pch_gbe_reset(adapter);
1816 pr_err("Error End\n");
1817 return err;
1818}
1819
1820/**
1821 * pch_gbe_stop - Disables a network interface
1822 * @netdev: Network interface device structure
1823 * Returns
1824 * 0: Successfully
1825 */
1826static int pch_gbe_stop(struct net_device *netdev)
1827{
1828 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
1829 struct pch_gbe_hw *hw = &adapter->hw;
1830
1831 pch_gbe_down(adapter);
1832 if (!adapter->wake_up_evt)
1833 pch_gbe_hal_power_down_phy(hw);
1834 pch_gbe_free_tx_resources(adapter, adapter->tx_ring);
1835 pch_gbe_free_rx_resources(adapter, adapter->rx_ring);
1836 return 0;
1837}
1838
1839/**
1840 * pch_gbe_xmit_frame - Packet transmitting start
1841 * @skb: Socket buffer structure
1842 * @netdev: Network interface device structure
1843 * Returns
1844 * - NETDEV_TX_OK: Normal end
1845 * - NETDEV_TX_BUSY: Error end
1846 */
1847static int pch_gbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1848{
1849 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
1850 struct pch_gbe_tx_ring *tx_ring = adapter->tx_ring;
1851 unsigned long flags;
1852
1853 if (unlikely(skb->len > (adapter->hw.mac.max_frame_size - 4))) {
1854 pr_err("Transfer length Error: skb len: %d > max: %d\n",
1855 skb->len, adapter->hw.mac.max_frame_size);
1856 dev_kfree_skb_any(skb);
1857 adapter->stats.tx_length_errors++;
1858 return NETDEV_TX_OK;
1859 }
1860 if (!spin_trylock_irqsave(&tx_ring->tx_lock, flags)) {
1861 /* Collision - tell upper layer to requeue */
1862 return NETDEV_TX_LOCKED;
1863 }
1864 if (unlikely(!PCH_GBE_DESC_UNUSED(tx_ring))) {
1865 netif_stop_queue(netdev);
1866 spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
1867 pr_debug("Return : BUSY next_to use : 0x%08x next_to clean : 0x%08x\n",
1868 tx_ring->next_to_use, tx_ring->next_to_clean);
1869 return NETDEV_TX_BUSY;
1870 }
1871 spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
1872
1873 /* CRC,ITAG no support */
1874 pch_gbe_tx_queue(adapter, tx_ring, skb);
1875 return NETDEV_TX_OK;
1876}
1877
1878/**
1879 * pch_gbe_get_stats - Get System Network Statistics
1880 * @netdev: Network interface device structure
1881 * Returns: The current stats
1882 */
1883static struct net_device_stats *pch_gbe_get_stats(struct net_device *netdev)
1884{
1885 /* only return the current stats */
1886 return &netdev->stats;
1887}
1888
1889/**
1890 * pch_gbe_set_multi - Multicast and Promiscuous mode set
1891 * @netdev: Network interface device structure
1892 */
1893static void pch_gbe_set_multi(struct net_device *netdev)
1894{
1895 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
1896 struct pch_gbe_hw *hw = &adapter->hw;
1897 struct netdev_hw_addr *ha;
1898 u8 *mta_list;
1899 u32 rctl;
1900 int i;
1901 int mc_count;
1902
1903 pr_debug("netdev->flags : 0x%08x\n", netdev->flags);
1904
1905 /* Check for Promiscuous and All Multicast modes */
1906 rctl = ioread32(&hw->reg->RX_MODE);
1907 mc_count = netdev_mc_count(netdev);
1908 if ((netdev->flags & IFF_PROMISC)) {
1909 rctl &= ~PCH_GBE_ADD_FIL_EN;
1910 rctl &= ~PCH_GBE_MLT_FIL_EN;
1911 } else if ((netdev->flags & IFF_ALLMULTI)) {
1912 /* all the multicasting receive permissions */
1913 rctl |= PCH_GBE_ADD_FIL_EN;
1914 rctl &= ~PCH_GBE_MLT_FIL_EN;
1915 } else {
1916 if (mc_count >= PCH_GBE_MAR_ENTRIES) {
1917 /* all the multicasting receive permissions */
1918 rctl |= PCH_GBE_ADD_FIL_EN;
1919 rctl &= ~PCH_GBE_MLT_FIL_EN;
1920 } else {
1921 rctl |= (PCH_GBE_ADD_FIL_EN | PCH_GBE_MLT_FIL_EN);
1922 }
1923 }
1924 iowrite32(rctl, &hw->reg->RX_MODE);
1925
1926 if (mc_count >= PCH_GBE_MAR_ENTRIES)
1927 return;
1928 mta_list = kmalloc(mc_count * ETH_ALEN, GFP_ATOMIC);
1929 if (!mta_list)
1930 return;
1931
1932 /* The shared function expects a packed array of only addresses. */
1933 i = 0;
1934 netdev_for_each_mc_addr(ha, netdev) {
1935 if (i == mc_count)
1936 break;
1937 memcpy(mta_list + (i++ * ETH_ALEN), &ha->addr, ETH_ALEN);
1938 }
1939 pch_gbe_mac_mc_addr_list_update(hw, mta_list, i, 1,
1940 PCH_GBE_MAR_ENTRIES);
1941 kfree(mta_list);
1942
1943 pr_debug("RX_MODE reg(check bit31,30 ADD,MLT) : 0x%08x netdev->mc_count : 0x%08x\n",
1944 ioread32(&hw->reg->RX_MODE), mc_count);
1945}
1946
1947/**
1948 * pch_gbe_set_mac - Change the Ethernet Address of the NIC
1949 * @netdev: Network interface device structure
1950 * @addr: Pointer to an address structure
1951 * Returns
1952 * 0: Successfully
1953 * -EADDRNOTAVAIL: Failed
1954 */
1955static int pch_gbe_set_mac(struct net_device *netdev, void *addr)
1956{
1957 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
1958 struct sockaddr *skaddr = addr;
1959 int ret_val;
1960
1961 if (!is_valid_ether_addr(skaddr->sa_data)) {
1962 ret_val = -EADDRNOTAVAIL;
1963 } else {
1964 memcpy(netdev->dev_addr, skaddr->sa_data, netdev->addr_len);
1965 memcpy(adapter->hw.mac.addr, skaddr->sa_data, netdev->addr_len);
1966 pch_gbe_mac_mar_set(&adapter->hw, adapter->hw.mac.addr, 0);
1967 ret_val = 0;
1968 }
1969 pr_debug("ret_val : 0x%08x\n", ret_val);
1970 pr_debug("dev_addr : %pM\n", netdev->dev_addr);
1971 pr_debug("mac_addr : %pM\n", adapter->hw.mac.addr);
1972 pr_debug("MAC_ADR1AB reg : 0x%08x 0x%08x\n",
1973 ioread32(&adapter->hw.reg->mac_adr[0].high),
1974 ioread32(&adapter->hw.reg->mac_adr[0].low));
1975 return ret_val;
1976}
1977
1978/**
1979 * pch_gbe_change_mtu - Change the Maximum Transfer Unit
1980 * @netdev: Network interface device structure
1981 * @new_mtu: New value for maximum frame size
1982 * Returns
1983 * 0: Successfully
1984 * -EINVAL: Failed
1985 */
1986static int pch_gbe_change_mtu(struct net_device *netdev, int new_mtu)
1987{
1988 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
1989 int max_frame;
1990
1991 max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
1992 if ((max_frame < ETH_ZLEN + ETH_FCS_LEN) ||
1993 (max_frame > PCH_GBE_MAX_JUMBO_FRAME_SIZE)) {
1994 pr_err("Invalid MTU setting\n");
1995 return -EINVAL;
1996 }
1997 if (max_frame <= PCH_GBE_FRAME_SIZE_2048)
1998 adapter->rx_buffer_len = PCH_GBE_FRAME_SIZE_2048;
1999 else if (max_frame <= PCH_GBE_FRAME_SIZE_4096)
2000 adapter->rx_buffer_len = PCH_GBE_FRAME_SIZE_4096;
2001 else if (max_frame <= PCH_GBE_FRAME_SIZE_8192)
2002 adapter->rx_buffer_len = PCH_GBE_FRAME_SIZE_8192;
2003 else
2004 adapter->rx_buffer_len = PCH_GBE_MAX_JUMBO_FRAME_SIZE;
2005 netdev->mtu = new_mtu;
2006 adapter->hw.mac.max_frame_size = max_frame;
2007
2008 if (netif_running(netdev))
2009 pch_gbe_reinit_locked(adapter);
2010 else
2011 pch_gbe_reset(adapter);
2012
2013 pr_debug("max_frame : %d rx_buffer_len : %d mtu : %d max_frame_size : %d\n",
2014 max_frame, (u32) adapter->rx_buffer_len, netdev->mtu,
2015 adapter->hw.mac.max_frame_size);
2016 return 0;
2017}
2018
2019/**
2020 * pch_gbe_ioctl - Controls register through a MII interface
2021 * @netdev: Network interface device structure
2022 * @ifr: Pointer to ifr structure
2023 * @cmd: Control command
2024 * Returns
2025 * 0: Successfully
2026 * Negative value: Failed
2027 */
2028static int pch_gbe_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
2029{
2030 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2031
2032 pr_debug("cmd : 0x%04x\n", cmd);
2033
2034 return generic_mii_ioctl(&adapter->mii, if_mii(ifr), cmd, NULL);
2035}
2036
2037/**
2038 * pch_gbe_tx_timeout - Respond to a Tx Hang
2039 * @netdev: Network interface device structure
2040 */
2041static void pch_gbe_tx_timeout(struct net_device *netdev)
2042{
2043 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2044
2045 /* Do the reset outside of interrupt context */
2046 adapter->stats.tx_timeout_count++;
2047 schedule_work(&adapter->reset_task);
2048}
2049
2050/**
2051 * pch_gbe_napi_poll - NAPI receive and transfer polling callback
2052 * @napi: Pointer of polling device struct
2053 * @budget: The maximum number of a packet
2054 * Returns
2055 * false: Exit the polling mode
2056 * true: Continue the polling mode
2057 */
2058static int pch_gbe_napi_poll(struct napi_struct *napi, int budget)
2059{
2060 struct pch_gbe_adapter *adapter =
2061 container_of(napi, struct pch_gbe_adapter, napi);
2062 struct net_device *netdev = adapter->netdev;
2063 int work_done = 0;
2064 bool poll_end_flag = false;
2065 bool cleaned = false;
2066
2067 pr_debug("budget : %d\n", budget);
2068
2069 /* Keep link state information with original netdev */
2070 if (!netif_carrier_ok(netdev)) {
2071 poll_end_flag = true;
2072 } else {
2073 cleaned = pch_gbe_clean_tx(adapter, adapter->tx_ring);
2074 pch_gbe_clean_rx(adapter, adapter->rx_ring, &work_done, budget);
2075
2076 if (cleaned)
2077 work_done = budget;
2078 /* If no Tx and not enough Rx work done,
2079 * exit the polling mode
2080 */
2081 if ((work_done < budget) || !netif_running(netdev))
2082 poll_end_flag = true;
2083 }
2084
2085 if (poll_end_flag) {
2086 napi_complete(napi);
2087 pch_gbe_irq_enable(adapter);
2088 }
2089
2090 pr_debug("poll_end_flag : %d work_done : %d budget : %d\n",
2091 poll_end_flag, work_done, budget);
2092
2093 return work_done;
2094}
2095
2096#ifdef CONFIG_NET_POLL_CONTROLLER
2097/**
2098 * pch_gbe_netpoll - Used by things like netconsole to send skbs
2099 * @netdev: Network interface device structure
2100 */
2101static void pch_gbe_netpoll(struct net_device *netdev)
2102{
2103 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2104
2105 disable_irq(adapter->pdev->irq);
2106 pch_gbe_intr(adapter->pdev->irq, netdev);
2107 enable_irq(adapter->pdev->irq);
2108}
2109#endif
2110
2111static const struct net_device_ops pch_gbe_netdev_ops = {
2112 .ndo_open = pch_gbe_open,
2113 .ndo_stop = pch_gbe_stop,
2114 .ndo_start_xmit = pch_gbe_xmit_frame,
2115 .ndo_get_stats = pch_gbe_get_stats,
2116 .ndo_set_mac_address = pch_gbe_set_mac,
2117 .ndo_tx_timeout = pch_gbe_tx_timeout,
2118 .ndo_change_mtu = pch_gbe_change_mtu,
2119 .ndo_do_ioctl = pch_gbe_ioctl,
2120 .ndo_set_multicast_list = &pch_gbe_set_multi,
2121#ifdef CONFIG_NET_POLL_CONTROLLER
2122 .ndo_poll_controller = pch_gbe_netpoll,
2123#endif
2124};
2125
2126static pci_ers_result_t pch_gbe_io_error_detected(struct pci_dev *pdev,
2127 pci_channel_state_t state)
2128{
2129 struct net_device *netdev = pci_get_drvdata(pdev);
2130 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2131
2132 netif_device_detach(netdev);
2133 if (netif_running(netdev))
2134 pch_gbe_down(adapter);
2135 pci_disable_device(pdev);
2136 /* Request a slot slot reset. */
2137 return PCI_ERS_RESULT_NEED_RESET;
2138}
2139
2140static pci_ers_result_t pch_gbe_io_slot_reset(struct pci_dev *pdev)
2141{
2142 struct net_device *netdev = pci_get_drvdata(pdev);
2143 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2144 struct pch_gbe_hw *hw = &adapter->hw;
2145
2146 if (pci_enable_device(pdev)) {
2147 pr_err("Cannot re-enable PCI device after reset\n");
2148 return PCI_ERS_RESULT_DISCONNECT;
2149 }
2150 pci_set_master(pdev);
2151 pci_enable_wake(pdev, PCI_D0, 0);
2152 pch_gbe_hal_power_up_phy(hw);
2153 pch_gbe_reset(adapter);
2154 /* Clear wake up status */
2155 pch_gbe_mac_set_wol_event(hw, 0);
2156
2157 return PCI_ERS_RESULT_RECOVERED;
2158}
2159
2160static void pch_gbe_io_resume(struct pci_dev *pdev)
2161{
2162 struct net_device *netdev = pci_get_drvdata(pdev);
2163 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2164
2165 if (netif_running(netdev)) {
2166 if (pch_gbe_up(adapter)) {
2167 pr_debug("can't bring device back up after reset\n");
2168 return;
2169 }
2170 }
2171 netif_device_attach(netdev);
2172}
2173
2174static int __pch_gbe_suspend(struct pci_dev *pdev)
2175{
2176 struct net_device *netdev = pci_get_drvdata(pdev);
2177 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2178 struct pch_gbe_hw *hw = &adapter->hw;
2179 u32 wufc = adapter->wake_up_evt;
2180 int retval = 0;
2181
2182 netif_device_detach(netdev);
2183 if (netif_running(netdev))
2184 pch_gbe_down(adapter);
2185 if (wufc) {
2186 pch_gbe_set_multi(netdev);
2187 pch_gbe_setup_rctl(adapter);
2188 pch_gbe_configure_rx(adapter);
2189 pch_gbe_set_rgmii_ctrl(adapter, hw->mac.link_speed,
2190 hw->mac.link_duplex);
2191 pch_gbe_set_mode(adapter, hw->mac.link_speed,
2192 hw->mac.link_duplex);
2193 pch_gbe_mac_set_wol_event(hw, wufc);
2194 pci_disable_device(pdev);
2195 } else {
2196 pch_gbe_hal_power_down_phy(hw);
2197 pch_gbe_mac_set_wol_event(hw, wufc);
2198 pci_disable_device(pdev);
2199 }
2200 return retval;
2201}
2202
2203#ifdef CONFIG_PM
2204static int pch_gbe_suspend(struct device *device)
2205{
2206 struct pci_dev *pdev = to_pci_dev(device);
2207
2208 return __pch_gbe_suspend(pdev);
2209}
2210
2211static int pch_gbe_resume(struct device *device)
2212{
2213 struct pci_dev *pdev = to_pci_dev(device);
2214 struct net_device *netdev = pci_get_drvdata(pdev);
2215 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2216 struct pch_gbe_hw *hw = &adapter->hw;
2217 u32 err;
2218
2219 err = pci_enable_device(pdev);
2220 if (err) {
2221 pr_err("Cannot enable PCI device from suspend\n");
2222 return err;
2223 }
2224 pci_set_master(pdev);
2225 pch_gbe_hal_power_up_phy(hw);
2226 pch_gbe_reset(adapter);
2227 /* Clear wake on lan control and status */
2228 pch_gbe_mac_set_wol_event(hw, 0);
2229
2230 if (netif_running(netdev))
2231 pch_gbe_up(adapter);
2232 netif_device_attach(netdev);
2233
2234 return 0;
2235}
2236#endif /* CONFIG_PM */
2237
2238static void pch_gbe_shutdown(struct pci_dev *pdev)
2239{
2240 __pch_gbe_suspend(pdev);
2241 if (system_state == SYSTEM_POWER_OFF) {
2242 pci_wake_from_d3(pdev, true);
2243 pci_set_power_state(pdev, PCI_D3hot);
2244 }
2245}
2246
2247static void pch_gbe_remove(struct pci_dev *pdev)
2248{
2249 struct net_device *netdev = pci_get_drvdata(pdev);
2250 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2251
2252 flush_scheduled_work();
2253 unregister_netdev(netdev);
2254
2255 pch_gbe_hal_phy_hw_reset(&adapter->hw);
2256
2257 kfree(adapter->tx_ring);
2258 kfree(adapter->rx_ring);
2259
2260 iounmap(adapter->hw.reg);
2261 pci_release_regions(pdev);
2262 free_netdev(netdev);
2263 pci_disable_device(pdev);
2264}
2265
2266static int pch_gbe_probe(struct pci_dev *pdev,
2267 const struct pci_device_id *pci_id)
2268{
2269 struct net_device *netdev;
2270 struct pch_gbe_adapter *adapter;
2271 int ret;
2272
2273 ret = pci_enable_device(pdev);
2274 if (ret)
2275 return ret;
2276
2277 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64))
2278 || pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
2279 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2280 if (ret) {
2281 ret = pci_set_consistent_dma_mask(pdev,
2282 DMA_BIT_MASK(32));
2283 if (ret) {
2284 dev_err(&pdev->dev, "ERR: No usable DMA "
2285 "configuration, aborting\n");
2286 goto err_disable_device;
2287 }
2288 }
2289 }
2290
2291 ret = pci_request_regions(pdev, KBUILD_MODNAME);
2292 if (ret) {
2293 dev_err(&pdev->dev,
2294 "ERR: Can't reserve PCI I/O and memory resources\n");
2295 goto err_disable_device;
2296 }
2297 pci_set_master(pdev);
2298
2299 netdev = alloc_etherdev((int)sizeof(struct pch_gbe_adapter));
2300 if (!netdev) {
2301 ret = -ENOMEM;
2302 dev_err(&pdev->dev,
2303 "ERR: Can't allocate and set up an Ethernet device\n");
2304 goto err_release_pci;
2305 }
2306 SET_NETDEV_DEV(netdev, &pdev->dev);
2307
2308 pci_set_drvdata(pdev, netdev);
2309 adapter = netdev_priv(netdev);
2310 adapter->netdev = netdev;
2311 adapter->pdev = pdev;
2312 adapter->hw.back = adapter;
2313 adapter->hw.reg = pci_iomap(pdev, PCH_GBE_PCI_BAR, 0);
2314 if (!adapter->hw.reg) {
2315 ret = -EIO;
2316 dev_err(&pdev->dev, "Can't ioremap\n");
2317 goto err_free_netdev;
2318 }
2319
2320 netdev->netdev_ops = &pch_gbe_netdev_ops;
2321 netdev->watchdog_timeo = PCH_GBE_WATCHDOG_PERIOD;
2322 netif_napi_add(netdev, &adapter->napi,
2323 pch_gbe_napi_poll, PCH_GBE_RX_WEIGHT);
2324 netdev->features = NETIF_F_HW_CSUM | NETIF_F_GRO;
2325 pch_gbe_set_ethtool_ops(netdev);
2326
2327 pch_gbe_mac_reset_hw(&adapter->hw);
2328
2329 /* setup the private structure */
2330 ret = pch_gbe_sw_init(adapter);
2331 if (ret)
2332 goto err_iounmap;
2333
2334 /* Initialize PHY */
2335 ret = pch_gbe_init_phy(adapter);
2336 if (ret) {
2337 dev_err(&pdev->dev, "PHY initialize error\n");
2338 goto err_free_adapter;
2339 }
2340 pch_gbe_hal_get_bus_info(&adapter->hw);
2341
2342 /* Read the MAC address. and store to the private data */
2343 ret = pch_gbe_hal_read_mac_addr(&adapter->hw);
2344 if (ret) {
2345 dev_err(&pdev->dev, "MAC address Read Error\n");
2346 goto err_free_adapter;
2347 }
2348
2349 memcpy(netdev->dev_addr, adapter->hw.mac.addr, netdev->addr_len);
2350 if (!is_valid_ether_addr(netdev->dev_addr)) {
2351 dev_err(&pdev->dev, "Invalid MAC Address\n");
2352 ret = -EIO;
2353 goto err_free_adapter;
2354 }
2355 setup_timer(&adapter->watchdog_timer, pch_gbe_watchdog,
2356 (unsigned long)adapter);
2357
2358 INIT_WORK(&adapter->reset_task, pch_gbe_reset_task);
2359
2360 pch_gbe_check_options(adapter);
2361
2362 if (adapter->tx_csum)
2363 netdev->features |= NETIF_F_HW_CSUM;
2364 else
2365 netdev->features &= ~NETIF_F_HW_CSUM;
2366
2367 /* initialize the wol settings based on the eeprom settings */
2368 adapter->wake_up_evt = PCH_GBE_WL_INIT_SETTING;
2369 dev_info(&pdev->dev, "MAC address : %pM\n", netdev->dev_addr);
2370
2371 /* reset the hardware with the new settings */
2372 pch_gbe_reset(adapter);
2373
2374 ret = register_netdev(netdev);
2375 if (ret)
2376 goto err_free_adapter;
2377 /* tell the stack to leave us alone until pch_gbe_open() is called */
2378 netif_carrier_off(netdev);
2379 netif_stop_queue(netdev);
2380
2381 dev_dbg(&pdev->dev, "OKIsemi(R) PCH Network Connection\n");
2382
2383 device_set_wakeup_enable(&pdev->dev, 1);
2384 return 0;
2385
2386err_free_adapter:
2387 pch_gbe_hal_phy_hw_reset(&adapter->hw);
2388 kfree(adapter->tx_ring);
2389 kfree(adapter->rx_ring);
2390err_iounmap:
2391 iounmap(adapter->hw.reg);
2392err_free_netdev:
2393 free_netdev(netdev);
2394err_release_pci:
2395 pci_release_regions(pdev);
2396err_disable_device:
2397 pci_disable_device(pdev);
2398 return ret;
2399}
2400
2401static DEFINE_PCI_DEVICE_TABLE(pch_gbe_pcidev_id) = {
2402 {.vendor = PCI_VENDOR_ID_INTEL,
2403 .device = PCI_DEVICE_ID_INTEL_IOH1_GBE,
2404 .subvendor = PCI_ANY_ID,
2405 .subdevice = PCI_ANY_ID,
2406 .class = (PCI_CLASS_NETWORK_ETHERNET << 8),
2407 .class_mask = (0xFFFF00)
2408 },
2409 /* required last entry */
2410 {0}
2411};
2412
2413#ifdef CONFIG_PM
2414static const struct dev_pm_ops pch_gbe_pm_ops = {
2415 .suspend = pch_gbe_suspend,
2416 .resume = pch_gbe_resume,
2417 .freeze = pch_gbe_suspend,
2418 .thaw = pch_gbe_resume,
2419 .poweroff = pch_gbe_suspend,
2420 .restore = pch_gbe_resume,
2421};
2422#endif
2423
2424static struct pci_error_handlers pch_gbe_err_handler = {
2425 .error_detected = pch_gbe_io_error_detected,
2426 .slot_reset = pch_gbe_io_slot_reset,
2427 .resume = pch_gbe_io_resume
2428};
2429
2430static struct pci_driver pch_gbe_pcidev = {
2431 .name = KBUILD_MODNAME,
2432 .id_table = pch_gbe_pcidev_id,
2433 .probe = pch_gbe_probe,
2434 .remove = pch_gbe_remove,
2435#ifdef CONFIG_PM_OPS
2436 .driver.pm = &pch_gbe_pm_ops,
2437#endif
2438 .shutdown = pch_gbe_shutdown,
2439 .err_handler = &pch_gbe_err_handler
2440};
2441
2442
2443static int __init pch_gbe_init_module(void)
2444{
2445 int ret;
2446
2447 ret = pci_register_driver(&pch_gbe_pcidev);
2448 if (copybreak != PCH_GBE_COPYBREAK_DEFAULT) {
2449 if (copybreak == 0) {
2450 pr_info("copybreak disabled\n");
2451 } else {
2452 pr_info("copybreak enabled for packets <= %u bytes\n",
2453 copybreak);
2454 }
2455 }
2456 return ret;
2457}
2458
2459static void __exit pch_gbe_exit_module(void)
2460{
2461 pci_unregister_driver(&pch_gbe_pcidev);
2462}
2463
2464module_init(pch_gbe_init_module);
2465module_exit(pch_gbe_exit_module);
2466
2467MODULE_DESCRIPTION("OKI semiconductor PCH Gigabit ethernet Driver");
2468MODULE_AUTHOR("OKI semiconductor, <masa-korg@dsn.okisemi.com>");
2469MODULE_LICENSE("GPL");
2470MODULE_VERSION(DRV_VERSION);
2471MODULE_DEVICE_TABLE(pci, pch_gbe_pcidev_id);
2472
2473module_param(copybreak, uint, 0644);
2474MODULE_PARM_DESC(copybreak,
2475 "Maximum size of packet that is copied to a new buffer on receive");
2476
2477/* pch_gbe_main.c */
diff --git a/drivers/net/pch_gbe/pch_gbe_param.c b/drivers/net/pch_gbe/pch_gbe_param.c
new file mode 100644
index 000000000000..2510146fc560
--- /dev/null
+++ b/drivers/net/pch_gbe/pch_gbe_param.c
@@ -0,0 +1,499 @@
1/*
2 * Copyright (C) 1999 - 2010 Intel Corporation.
3 * Copyright (C) 2010 OKI SEMICONDUCTOR Co., LTD.
4 *
5 * This code was derived from the Intel e1000e Linux driver.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; version 2 of the License.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
19 */
20
21#include "pch_gbe.h"
22
23#define OPTION_UNSET -1
24#define OPTION_DISABLED 0
25#define OPTION_ENABLED 1
26
27/**
28 * TxDescriptors - Transmit Descriptor Count
29 * @Valid Range: PCH_GBE_MIN_TXD - PCH_GBE_MAX_TXD
30 * @Default Value: PCH_GBE_DEFAULT_TXD
31 */
32static int TxDescriptors = OPTION_UNSET;
33module_param(TxDescriptors, int, 0);
34MODULE_PARM_DESC(TxDescriptors, "Number of transmit descriptors");
35
36/**
37 * RxDescriptors -Receive Descriptor Count
38 * @Valid Range: PCH_GBE_MIN_RXD - PCH_GBE_MAX_RXD
39 * @Default Value: PCH_GBE_DEFAULT_RXD
40 */
41static int RxDescriptors = OPTION_UNSET;
42module_param(RxDescriptors, int, 0);
43MODULE_PARM_DESC(RxDescriptors, "Number of receive descriptors");
44
45/**
46 * Speed - User Specified Speed Override
47 * @Valid Range: 0, 10, 100, 1000
48 * - 0: auto-negotiate at all supported speeds
49 * - 10: only link at 10 Mbps
50 * - 100: only link at 100 Mbps
51 * - 1000: only link at 1000 Mbps
52 * @Default Value: 0
53 */
54static int Speed = OPTION_UNSET;
55module_param(Speed, int, 0);
56MODULE_PARM_DESC(Speed, "Speed setting");
57
58/**
59 * Duplex - User Specified Duplex Override
60 * @Valid Range: 0-2
61 * - 0: auto-negotiate for duplex
62 * - 1: only link at half duplex
63 * - 2: only link at full duplex
64 * @Default Value: 0
65 */
66static int Duplex = OPTION_UNSET;
67module_param(Duplex, int, 0);
68MODULE_PARM_DESC(Duplex, "Duplex setting");
69
70#define HALF_DUPLEX 1
71#define FULL_DUPLEX 2
72
73/**
74 * AutoNeg - Auto-negotiation Advertisement Override
75 * @Valid Range: 0x01-0x0F, 0x20-0x2F
76 *
77 * The AutoNeg value is a bit mask describing which speed and duplex
78 * combinations should be advertised during auto-negotiation.
79 * The supported speed and duplex modes are listed below
80 *
81 * Bit 7 6 5 4 3 2 1 0
82 * Speed (Mbps) N/A N/A 1000 N/A 100 100 10 10
83 * Duplex Full Full Half Full Half
84 *
85 * @Default Value: 0x2F (copper)
86 */
87static int AutoNeg = OPTION_UNSET;
88module_param(AutoNeg, int, 0);
89MODULE_PARM_DESC(AutoNeg, "Advertised auto-negotiation setting");
90
91#define PHY_ADVERTISE_10_HALF 0x0001
92#define PHY_ADVERTISE_10_FULL 0x0002
93#define PHY_ADVERTISE_100_HALF 0x0004
94#define PHY_ADVERTISE_100_FULL 0x0008
95#define PHY_ADVERTISE_1000_HALF 0x0010 /* Not used, just FYI */
96#define PHY_ADVERTISE_1000_FULL 0x0020
97#define PCH_AUTONEG_ADVERTISE_DEFAULT 0x2F
98
99/**
100 * FlowControl - User Specified Flow Control Override
101 * @Valid Range: 0-3
102 * - 0: No Flow Control
103 * - 1: Rx only, respond to PAUSE frames but do not generate them
104 * - 2: Tx only, generate PAUSE frames but ignore them on receive
105 * - 3: Full Flow Control Support
106 * @Default Value: Read flow control settings from the EEPROM
107 */
108static int FlowControl = OPTION_UNSET;
109module_param(FlowControl, int, 0);
110MODULE_PARM_DESC(FlowControl, "Flow Control setting");
111
112/*
113 * XsumRX - Receive Checksum Offload Enable/Disable
114 * @Valid Range: 0, 1
115 * - 0: disables all checksum offload
116 * - 1: enables receive IP/TCP/UDP checksum offload
117 * @Default Value: PCH_GBE_DEFAULT_RX_CSUM
118 */
119static int XsumRX = OPTION_UNSET;
120module_param(XsumRX, int, 0);
121MODULE_PARM_DESC(XsumRX, "Disable or enable Receive Checksum offload");
122
123#define PCH_GBE_DEFAULT_RX_CSUM true /* trueorfalse */
124
125/*
126 * XsumTX - Transmit Checksum Offload Enable/Disable
127 * @Valid Range: 0, 1
128 * - 0: disables all checksum offload
129 * - 1: enables transmit IP/TCP/UDP checksum offload
130 * @Default Value: PCH_GBE_DEFAULT_TX_CSUM
131 */
132static int XsumTX = OPTION_UNSET;
133module_param(XsumTX, int, 0);
134MODULE_PARM_DESC(XsumTX, "Disable or enable Transmit Checksum offload");
135
136#define PCH_GBE_DEFAULT_TX_CSUM true /* trueorfalse */
137
138/**
139 * pch_gbe_option - Force the MAC's flow control settings
140 * @hw: Pointer to the HW structure
141 * Returns
142 * 0: Successful.
143 * Negative value: Failed.
144 */
145struct pch_gbe_option {
146 enum { enable_option, range_option, list_option } type;
147 char *name;
148 char *err;
149 int def;
150 union {
151 struct { /* range_option info */
152 int min;
153 int max;
154 } r;
155 struct { /* list_option info */
156 int nr;
157 const struct pch_gbe_opt_list { int i; char *str; } *p;
158 } l;
159 } arg;
160};
161
162static const struct pch_gbe_opt_list speed_list[] = {
163 { 0, "" },
164 { SPEED_10, "" },
165 { SPEED_100, "" },
166 { SPEED_1000, "" }
167};
168
169static const struct pch_gbe_opt_list dplx_list[] = {
170 { 0, "" },
171 { HALF_DUPLEX, "" },
172 { FULL_DUPLEX, "" }
173};
174
175static const struct pch_gbe_opt_list an_list[] =
176 #define AA "AutoNeg advertising "
177 {{ 0x01, AA "10/HD" },
178 { 0x02, AA "10/FD" },
179 { 0x03, AA "10/FD, 10/HD" },
180 { 0x04, AA "100/HD" },
181 { 0x05, AA "100/HD, 10/HD" },
182 { 0x06, AA "100/HD, 10/FD" },
183 { 0x07, AA "100/HD, 10/FD, 10/HD" },
184 { 0x08, AA "100/FD" },
185 { 0x09, AA "100/FD, 10/HD" },
186 { 0x0a, AA "100/FD, 10/FD" },
187 { 0x0b, AA "100/FD, 10/FD, 10/HD" },
188 { 0x0c, AA "100/FD, 100/HD" },
189 { 0x0d, AA "100/FD, 100/HD, 10/HD" },
190 { 0x0e, AA "100/FD, 100/HD, 10/FD" },
191 { 0x0f, AA "100/FD, 100/HD, 10/FD, 10/HD" },
192 { 0x20, AA "1000/FD" },
193 { 0x21, AA "1000/FD, 10/HD" },
194 { 0x22, AA "1000/FD, 10/FD" },
195 { 0x23, AA "1000/FD, 10/FD, 10/HD" },
196 { 0x24, AA "1000/FD, 100/HD" },
197 { 0x25, AA "1000/FD, 100/HD, 10/HD" },
198 { 0x26, AA "1000/FD, 100/HD, 10/FD" },
199 { 0x27, AA "1000/FD, 100/HD, 10/FD, 10/HD" },
200 { 0x28, AA "1000/FD, 100/FD" },
201 { 0x29, AA "1000/FD, 100/FD, 10/HD" },
202 { 0x2a, AA "1000/FD, 100/FD, 10/FD" },
203 { 0x2b, AA "1000/FD, 100/FD, 10/FD, 10/HD" },
204 { 0x2c, AA "1000/FD, 100/FD, 100/HD" },
205 { 0x2d, AA "1000/FD, 100/FD, 100/HD, 10/HD" },
206 { 0x2e, AA "1000/FD, 100/FD, 100/HD, 10/FD" },
207 { 0x2f, AA "1000/FD, 100/FD, 100/HD, 10/FD, 10/HD" }
208};
209
210static const struct pch_gbe_opt_list fc_list[] = {
211 { PCH_GBE_FC_NONE, "Flow Control Disabled" },
212 { PCH_GBE_FC_RX_PAUSE, "Flow Control Receive Only" },
213 { PCH_GBE_FC_TX_PAUSE, "Flow Control Transmit Only" },
214 { PCH_GBE_FC_FULL, "Flow Control Enabled" }
215};
216
217/**
218 * pch_gbe_validate_option - Validate option
219 * @value: value
220 * @opt: option
221 * @adapter: Board private structure
222 * Returns
223 * 0: Successful.
224 * Negative value: Failed.
225 */
226static int pch_gbe_validate_option(int *value,
227 const struct pch_gbe_option *opt,
228 struct pch_gbe_adapter *adapter)
229{
230 if (*value == OPTION_UNSET) {
231 *value = opt->def;
232 return 0;
233 }
234
235 switch (opt->type) {
236 case enable_option:
237 switch (*value) {
238 case OPTION_ENABLED:
239 pr_debug("%s Enabled\n", opt->name);
240 return 0;
241 case OPTION_DISABLED:
242 pr_debug("%s Disabled\n", opt->name);
243 return 0;
244 }
245 break;
246 case range_option:
247 if (*value >= opt->arg.r.min && *value <= opt->arg.r.max) {
248 pr_debug("%s set to %i\n", opt->name, *value);
249 return 0;
250 }
251 break;
252 case list_option: {
253 int i;
254 const struct pch_gbe_opt_list *ent;
255
256 for (i = 0; i < opt->arg.l.nr; i++) {
257 ent = &opt->arg.l.p[i];
258 if (*value == ent->i) {
259 if (ent->str[0] != '\0')
260 pr_debug("%s\n", ent->str);
261 return 0;
262 }
263 }
264 }
265 break;
266 default:
267 BUG();
268 }
269
270 pr_debug("Invalid %s value specified (%i) %s\n",
271 opt->name, *value, opt->err);
272 *value = opt->def;
273 return -1;
274}
275
276/**
277 * pch_gbe_check_copper_options - Range Checking for Link Options, Copper Version
278 * @adapter: Board private structure
279 */
280static void pch_gbe_check_copper_options(struct pch_gbe_adapter *adapter)
281{
282 struct pch_gbe_hw *hw = &adapter->hw;
283 int speed, dplx;
284
285 { /* Speed */
286 static const struct pch_gbe_option opt = {
287 .type = list_option,
288 .name = "Speed",
289 .err = "parameter ignored",
290 .def = 0,
291 .arg = { .l = { .nr = (int)ARRAY_SIZE(speed_list),
292 .p = speed_list } }
293 };
294 speed = Speed;
295 pch_gbe_validate_option(&speed, &opt, adapter);
296 }
297 { /* Duplex */
298 static const struct pch_gbe_option opt = {
299 .type = list_option,
300 .name = "Duplex",
301 .err = "parameter ignored",
302 .def = 0,
303 .arg = { .l = { .nr = (int)ARRAY_SIZE(dplx_list),
304 .p = dplx_list } }
305 };
306 dplx = Duplex;
307 pch_gbe_validate_option(&dplx, &opt, adapter);
308 }
309
310 { /* Autoneg */
311 static const struct pch_gbe_option opt = {
312 .type = list_option,
313 .name = "AutoNeg",
314 .err = "parameter ignored",
315 .def = PCH_AUTONEG_ADVERTISE_DEFAULT,
316 .arg = { .l = { .nr = (int)ARRAY_SIZE(an_list),
317 .p = an_list} }
318 };
319 if (speed || dplx) {
320 pr_debug("AutoNeg specified along with Speed or Duplex, AutoNeg parameter ignored\n");
321 hw->phy.autoneg_advertised = opt.def;
322 } else {
323 hw->phy.autoneg_advertised = AutoNeg;
324 pch_gbe_validate_option(
325 (int *)(&hw->phy.autoneg_advertised),
326 &opt, adapter);
327 }
328 }
329
330 switch (speed + dplx) {
331 case 0:
332 hw->mac.autoneg = hw->mac.fc_autoneg = 1;
333 if ((speed || dplx))
334 pr_debug("Speed and duplex autonegotiation enabled\n");
335 hw->mac.link_speed = SPEED_10;
336 hw->mac.link_duplex = DUPLEX_HALF;
337 break;
338 case HALF_DUPLEX:
339 pr_debug("Half Duplex specified without Speed\n");
340 pr_debug("Using Autonegotiation at Half Duplex only\n");
341 hw->mac.autoneg = hw->mac.fc_autoneg = 1;
342 hw->phy.autoneg_advertised = PHY_ADVERTISE_10_HALF |
343 PHY_ADVERTISE_100_HALF;
344 hw->mac.link_speed = SPEED_10;
345 hw->mac.link_duplex = DUPLEX_HALF;
346 break;
347 case FULL_DUPLEX:
348 pr_debug("Full Duplex specified without Speed\n");
349 pr_debug("Using Autonegotiation at Full Duplex only\n");
350 hw->mac.autoneg = hw->mac.fc_autoneg = 1;
351 hw->phy.autoneg_advertised = PHY_ADVERTISE_10_FULL |
352 PHY_ADVERTISE_100_FULL |
353 PHY_ADVERTISE_1000_FULL;
354 hw->mac.link_speed = SPEED_10;
355 hw->mac.link_duplex = DUPLEX_FULL;
356 break;
357 case SPEED_10:
358 pr_debug("10 Mbps Speed specified without Duplex\n");
359 pr_debug("Using Autonegotiation at 10 Mbps only\n");
360 hw->mac.autoneg = hw->mac.fc_autoneg = 1;
361 hw->phy.autoneg_advertised = PHY_ADVERTISE_10_HALF |
362 PHY_ADVERTISE_10_FULL;
363 hw->mac.link_speed = SPEED_10;
364 hw->mac.link_duplex = DUPLEX_HALF;
365 break;
366 case SPEED_10 + HALF_DUPLEX:
367 pr_debug("Forcing to 10 Mbps Half Duplex\n");
368 hw->mac.autoneg = hw->mac.fc_autoneg = 0;
369 hw->phy.autoneg_advertised = 0;
370 hw->mac.link_speed = SPEED_10;
371 hw->mac.link_duplex = DUPLEX_HALF;
372 break;
373 case SPEED_10 + FULL_DUPLEX:
374 pr_debug("Forcing to 10 Mbps Full Duplex\n");
375 hw->mac.autoneg = hw->mac.fc_autoneg = 0;
376 hw->phy.autoneg_advertised = 0;
377 hw->mac.link_speed = SPEED_10;
378 hw->mac.link_duplex = DUPLEX_FULL;
379 break;
380 case SPEED_100:
381 pr_debug("100 Mbps Speed specified without Duplex\n");
382 pr_debug("Using Autonegotiation at 100 Mbps only\n");
383 hw->mac.autoneg = hw->mac.fc_autoneg = 1;
384 hw->phy.autoneg_advertised = PHY_ADVERTISE_100_HALF |
385 PHY_ADVERTISE_100_FULL;
386 hw->mac.link_speed = SPEED_100;
387 hw->mac.link_duplex = DUPLEX_HALF;
388 break;
389 case SPEED_100 + HALF_DUPLEX:
390 pr_debug("Forcing to 100 Mbps Half Duplex\n");
391 hw->mac.autoneg = hw->mac.fc_autoneg = 0;
392 hw->phy.autoneg_advertised = 0;
393 hw->mac.link_speed = SPEED_100;
394 hw->mac.link_duplex = DUPLEX_HALF;
395 break;
396 case SPEED_100 + FULL_DUPLEX:
397 pr_debug("Forcing to 100 Mbps Full Duplex\n");
398 hw->mac.autoneg = hw->mac.fc_autoneg = 0;
399 hw->phy.autoneg_advertised = 0;
400 hw->mac.link_speed = SPEED_100;
401 hw->mac.link_duplex = DUPLEX_FULL;
402 break;
403 case SPEED_1000:
404 pr_debug("1000 Mbps Speed specified without Duplex\n");
405 goto full_duplex_only;
406 case SPEED_1000 + HALF_DUPLEX:
407 pr_debug("Half Duplex is not supported at 1000 Mbps\n");
408 /* fall through */
409 case SPEED_1000 + FULL_DUPLEX:
410full_duplex_only:
411 pr_debug("Using Autonegotiation at 1000 Mbps Full Duplex only\n");
412 hw->mac.autoneg = hw->mac.fc_autoneg = 1;
413 hw->phy.autoneg_advertised = PHY_ADVERTISE_1000_FULL;
414 hw->mac.link_speed = SPEED_1000;
415 hw->mac.link_duplex = DUPLEX_FULL;
416 break;
417 default:
418 BUG();
419 }
420}
421
422/**
423 * pch_gbe_check_options - Range Checking for Command Line Parameters
424 * @adapter: Board private structure
425 */
426void pch_gbe_check_options(struct pch_gbe_adapter *adapter)
427{
428 struct pch_gbe_hw *hw = &adapter->hw;
429
430 { /* Transmit Descriptor Count */
431 static const struct pch_gbe_option opt = {
432 .type = range_option,
433 .name = "Transmit Descriptors",
434 .err = "using default of "
435 __MODULE_STRING(PCH_GBE_DEFAULT_TXD),
436 .def = PCH_GBE_DEFAULT_TXD,
437 .arg = { .r = { .min = PCH_GBE_MIN_TXD } },
438 .arg = { .r = { .max = PCH_GBE_MAX_TXD } }
439 };
440 struct pch_gbe_tx_ring *tx_ring = adapter->tx_ring;
441 tx_ring->count = TxDescriptors;
442 pch_gbe_validate_option(&tx_ring->count, &opt, adapter);
443 tx_ring->count = roundup(tx_ring->count,
444 PCH_GBE_TX_DESC_MULTIPLE);
445 }
446 { /* Receive Descriptor Count */
447 static const struct pch_gbe_option opt = {
448 .type = range_option,
449 .name = "Receive Descriptors",
450 .err = "using default of "
451 __MODULE_STRING(PCH_GBE_DEFAULT_RXD),
452 .def = PCH_GBE_DEFAULT_RXD,
453 .arg = { .r = { .min = PCH_GBE_MIN_RXD } },
454 .arg = { .r = { .max = PCH_GBE_MAX_RXD } }
455 };
456 struct pch_gbe_rx_ring *rx_ring = adapter->rx_ring;
457 rx_ring->count = RxDescriptors;
458 pch_gbe_validate_option(&rx_ring->count, &opt, adapter);
459 rx_ring->count = roundup(rx_ring->count,
460 PCH_GBE_RX_DESC_MULTIPLE);
461 }
462 { /* Checksum Offload Enable/Disable */
463 static const struct pch_gbe_option opt = {
464 .type = enable_option,
465 .name = "Checksum Offload",
466 .err = "defaulting to Enabled",
467 .def = PCH_GBE_DEFAULT_RX_CSUM
468 };
469 adapter->rx_csum = XsumRX;
470 pch_gbe_validate_option((int *)(&adapter->rx_csum),
471 &opt, adapter);
472 }
473 { /* Checksum Offload Enable/Disable */
474 static const struct pch_gbe_option opt = {
475 .type = enable_option,
476 .name = "Checksum Offload",
477 .err = "defaulting to Enabled",
478 .def = PCH_GBE_DEFAULT_TX_CSUM
479 };
480 adapter->tx_csum = XsumTX;
481 pch_gbe_validate_option((int *)(&adapter->tx_csum),
482 &opt, adapter);
483 }
484 { /* Flow Control */
485 static const struct pch_gbe_option opt = {
486 .type = list_option,
487 .name = "Flow Control",
488 .err = "reading default settings from EEPROM",
489 .def = PCH_GBE_FC_DEFAULT,
490 .arg = { .l = { .nr = (int)ARRAY_SIZE(fc_list),
491 .p = fc_list } }
492 };
493 hw->mac.fc = FlowControl;
494 pch_gbe_validate_option((int *)(&hw->mac.fc),
495 &opt, adapter);
496 }
497
498 pch_gbe_check_copper_options(adapter);
499}
diff --git a/drivers/net/pch_gbe/pch_gbe_phy.c b/drivers/net/pch_gbe/pch_gbe_phy.c
new file mode 100644
index 000000000000..923a687acd30
--- /dev/null
+++ b/drivers/net/pch_gbe/pch_gbe_phy.c
@@ -0,0 +1,274 @@
1/*
2 * Copyright (C) 1999 - 2010 Intel Corporation.
3 * Copyright (C) 2010 OKI SEMICONDUCTOR Co., LTD.
4 *
5 * This code was derived from the Intel e1000e Linux driver.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; version 2 of the License.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
19 */
20
21#include "pch_gbe.h"
22#include "pch_gbe_phy.h"
23
24#define PHY_MAX_REG_ADDRESS 0x1F /* 5 bit address bus (0-0x1F) */
25
26/* PHY 1000 MII Register/Bit Definitions */
27/* PHY Registers defined by IEEE */
28#define PHY_CONTROL 0x00 /* Control Register */
29#define PHY_STATUS 0x01 /* Status Regiser */
30#define PHY_ID1 0x02 /* Phy Id Register (word 1) */
31#define PHY_ID2 0x03 /* Phy Id Register (word 2) */
32#define PHY_AUTONEG_ADV 0x04 /* Autoneg Advertisement */
33#define PHY_LP_ABILITY 0x05 /* Link Partner Ability (Base Page) */
34#define PHY_AUTONEG_EXP 0x06 /* Autoneg Expansion Register */
35#define PHY_NEXT_PAGE_TX 0x07 /* Next Page TX */
36#define PHY_LP_NEXT_PAGE 0x08 /* Link Partner Next Page */
37#define PHY_1000T_CTRL 0x09 /* 1000Base-T Control Register */
38#define PHY_1000T_STATUS 0x0A /* 1000Base-T Status Register */
39#define PHY_EXT_STATUS 0x0F /* Extended Status Register */
40#define PHY_PHYSP_CONTROL 0x10 /* PHY Specific Control Register */
41#define PHY_EXT_PHYSP_CONTROL 0x14 /* Extended PHY Specific Control Register */
42#define PHY_LED_CONTROL 0x18 /* LED Control Register */
43#define PHY_EXT_PHYSP_STATUS 0x1B /* Extended PHY Specific Status Register */
44
45/* PHY Control Register */
46#define MII_CR_SPEED_SELECT_MSB 0x0040 /* bits 6,13: 10=1000, 01=100, 00=10 */
47#define MII_CR_COLL_TEST_ENABLE 0x0080 /* Collision test enable */
48#define MII_CR_FULL_DUPLEX 0x0100 /* FDX =1, half duplex =0 */
49#define MII_CR_RESTART_AUTO_NEG 0x0200 /* Restart auto negotiation */
50#define MII_CR_ISOLATE 0x0400 /* Isolate PHY from MII */
51#define MII_CR_POWER_DOWN 0x0800 /* Power down */
52#define MII_CR_AUTO_NEG_EN 0x1000 /* Auto Neg Enable */
53#define MII_CR_SPEED_SELECT_LSB 0x2000 /* bits 6,13: 10=1000, 01=100, 00=10 */
54#define MII_CR_LOOPBACK 0x4000 /* 0 = normal, 1 = loopback */
55#define MII_CR_RESET 0x8000 /* 0 = normal, 1 = PHY reset */
56#define MII_CR_SPEED_1000 0x0040
57#define MII_CR_SPEED_100 0x2000
58#define MII_CR_SPEED_10 0x0000
59
60/* PHY Status Register */
61#define MII_SR_EXTENDED_CAPS 0x0001 /* Extended register capabilities */
62#define MII_SR_JABBER_DETECT 0x0002 /* Jabber Detected */
63#define MII_SR_LINK_STATUS 0x0004 /* Link Status 1 = link */
64#define MII_SR_AUTONEG_CAPS 0x0008 /* Auto Neg Capable */
65#define MII_SR_REMOTE_FAULT 0x0010 /* Remote Fault Detect */
66#define MII_SR_AUTONEG_COMPLETE 0x0020 /* Auto Neg Complete */
67#define MII_SR_PREAMBLE_SUPPRESS 0x0040 /* Preamble may be suppressed */
68#define MII_SR_EXTENDED_STATUS 0x0100 /* Ext. status info in Reg 0x0F */
69#define MII_SR_100T2_HD_CAPS 0x0200 /* 100T2 Half Duplex Capable */
70#define MII_SR_100T2_FD_CAPS 0x0400 /* 100T2 Full Duplex Capable */
71#define MII_SR_10T_HD_CAPS 0x0800 /* 10T Half Duplex Capable */
72#define MII_SR_10T_FD_CAPS 0x1000 /* 10T Full Duplex Capable */
73#define MII_SR_100X_HD_CAPS 0x2000 /* 100X Half Duplex Capable */
74#define MII_SR_100X_FD_CAPS 0x4000 /* 100X Full Duplex Capable */
75#define MII_SR_100T4_CAPS 0x8000 /* 100T4 Capable */
76
77/* Phy Id Register (word 2) */
78#define PHY_REVISION_MASK 0x000F
79
80/* PHY Specific Control Register */
81#define PHYSP_CTRL_ASSERT_CRS_TX 0x0800
82
83
84/* Default value of PHY register */
85#define PHY_CONTROL_DEFAULT 0x1140 /* Control Register */
86#define PHY_AUTONEG_ADV_DEFAULT 0x01e0 /* Autoneg Advertisement */
87#define PHY_NEXT_PAGE_TX_DEFAULT 0x2001 /* Next Page TX */
88#define PHY_1000T_CTRL_DEFAULT 0x0300 /* 1000Base-T Control Register */
89#define PHY_PHYSP_CONTROL_DEFAULT 0x01EE /* PHY Specific Control Register */
90
91/**
92 * pch_gbe_phy_get_id - Retrieve the PHY ID and revision
93 * @hw: Pointer to the HW structure
94 * Returns
95 * 0: Successful.
96 * Negative value: Failed.
97 */
98s32 pch_gbe_phy_get_id(struct pch_gbe_hw *hw)
99{
100 struct pch_gbe_phy_info *phy = &hw->phy;
101 s32 ret;
102 u16 phy_id1;
103 u16 phy_id2;
104
105 ret = pch_gbe_phy_read_reg_miic(hw, PHY_ID1, &phy_id1);
106 if (ret)
107 return ret;
108 ret = pch_gbe_phy_read_reg_miic(hw, PHY_ID2, &phy_id2);
109 if (ret)
110 return ret;
111 /*
112 * PHY_ID1: [bit15-0:ID(21-6)]
113 * PHY_ID2: [bit15-10:ID(5-0)][bit9-4:Model][bit3-0:revision]
114 */
115 phy->id = (u32)phy_id1;
116 phy->id = ((phy->id << 6) | ((phy_id2 & 0xFC00) >> 10));
117 phy->revision = (u32) (phy_id2 & 0x000F);
118 pr_debug("phy->id : 0x%08x phy->revision : 0x%08x\n",
119 phy->id, phy->revision);
120 return 0;
121}
122
123/**
124 * pch_gbe_phy_read_reg_miic - Read MII control register
125 * @hw: Pointer to the HW structure
126 * @offset: Register offset to be read
127 * @data: Pointer to the read data
128 * Returns
129 * 0: Successful.
130 * -EINVAL: Invalid argument.
131 */
132s32 pch_gbe_phy_read_reg_miic(struct pch_gbe_hw *hw, u32 offset, u16 *data)
133{
134 struct pch_gbe_phy_info *phy = &hw->phy;
135
136 if (offset > PHY_MAX_REG_ADDRESS) {
137 pr_err("PHY Address %d is out of range\n", offset);
138 return -EINVAL;
139 }
140 *data = pch_gbe_mac_ctrl_miim(hw, phy->addr, PCH_GBE_HAL_MIIM_READ,
141 offset, (u16)0);
142 return 0;
143}
144
145/**
146 * pch_gbe_phy_write_reg_miic - Write MII control register
147 * @hw: Pointer to the HW structure
148 * @offset: Register offset to be read
149 * @data: data to write to register at offset
150 * Returns
151 * 0: Successful.
152 * -EINVAL: Invalid argument.
153 */
154s32 pch_gbe_phy_write_reg_miic(struct pch_gbe_hw *hw, u32 offset, u16 data)
155{
156 struct pch_gbe_phy_info *phy = &hw->phy;
157
158 if (offset > PHY_MAX_REG_ADDRESS) {
159 pr_err("PHY Address %d is out of range\n", offset);
160 return -EINVAL;
161 }
162 pch_gbe_mac_ctrl_miim(hw, phy->addr, PCH_GBE_HAL_MIIM_WRITE,
163 offset, data);
164 return 0;
165}
166
167/**
168 * pch_gbe_phy_sw_reset - PHY software reset
169 * @hw: Pointer to the HW structure
170 */
171void pch_gbe_phy_sw_reset(struct pch_gbe_hw *hw)
172{
173 u16 phy_ctrl;
174
175 pch_gbe_phy_read_reg_miic(hw, PHY_CONTROL, &phy_ctrl);
176 phy_ctrl |= MII_CR_RESET;
177 pch_gbe_phy_write_reg_miic(hw, PHY_CONTROL, phy_ctrl);
178 udelay(1);
179}
180
181/**
182 * pch_gbe_phy_hw_reset - PHY hardware reset
183 * @hw: Pointer to the HW structure
184 */
185void pch_gbe_phy_hw_reset(struct pch_gbe_hw *hw)
186{
187 pch_gbe_phy_write_reg_miic(hw, PHY_CONTROL, PHY_CONTROL_DEFAULT);
188 pch_gbe_phy_write_reg_miic(hw, PHY_AUTONEG_ADV,
189 PHY_AUTONEG_ADV_DEFAULT);
190 pch_gbe_phy_write_reg_miic(hw, PHY_NEXT_PAGE_TX,
191 PHY_NEXT_PAGE_TX_DEFAULT);
192 pch_gbe_phy_write_reg_miic(hw, PHY_1000T_CTRL, PHY_1000T_CTRL_DEFAULT);
193 pch_gbe_phy_write_reg_miic(hw, PHY_PHYSP_CONTROL,
194 PHY_PHYSP_CONTROL_DEFAULT);
195}
196
197/**
198 * pch_gbe_phy_power_up - restore link in case the phy was powered down
199 * @hw: Pointer to the HW structure
200 */
201void pch_gbe_phy_power_up(struct pch_gbe_hw *hw)
202{
203 u16 mii_reg;
204
205 mii_reg = 0;
206 /* Just clear the power down bit to wake the phy back up */
207 /* according to the manual, the phy will retain its
208 * settings across a power-down/up cycle */
209 pch_gbe_phy_read_reg_miic(hw, PHY_CONTROL, &mii_reg);
210 mii_reg &= ~MII_CR_POWER_DOWN;
211 pch_gbe_phy_write_reg_miic(hw, PHY_CONTROL, mii_reg);
212}
213
214/**
215 * pch_gbe_phy_power_down - Power down PHY
216 * @hw: Pointer to the HW structure
217 */
218void pch_gbe_phy_power_down(struct pch_gbe_hw *hw)
219{
220 u16 mii_reg;
221
222 mii_reg = 0;
223 /* Power down the PHY so no link is implied when interface is down *
224 * The PHY cannot be powered down if any of the following is TRUE *
225 * (a) WoL is enabled
226 * (b) AMT is active
227 */
228 pch_gbe_phy_read_reg_miic(hw, PHY_CONTROL, &mii_reg);
229 mii_reg |= MII_CR_POWER_DOWN;
230 pch_gbe_phy_write_reg_miic(hw, PHY_CONTROL, mii_reg);
231 mdelay(1);
232}
233
234/**
235 * pch_gbe_phy_set_rgmii - RGMII interface setting
236 * @hw: Pointer to the HW structure
237 */
238inline void pch_gbe_phy_set_rgmii(struct pch_gbe_hw *hw)
239{
240 pch_gbe_phy_sw_reset(hw);
241}
242
243/**
244 * pch_gbe_phy_init_setting - PHY initial setting
245 * @hw: Pointer to the HW structure
246 */
247void pch_gbe_phy_init_setting(struct pch_gbe_hw *hw)
248{
249 struct pch_gbe_adapter *adapter;
250 struct ethtool_cmd cmd;
251 int ret;
252 u16 mii_reg;
253
254 adapter = container_of(hw, struct pch_gbe_adapter, hw);
255 ret = mii_ethtool_gset(&adapter->mii, &cmd);
256 if (ret)
257 pr_err("Error: mii_ethtool_gset\n");
258
259 cmd.speed = hw->mac.link_speed;
260 cmd.duplex = hw->mac.link_duplex;
261 cmd.advertising = hw->phy.autoneg_advertised;
262 cmd.autoneg = hw->mac.autoneg;
263 pch_gbe_phy_write_reg_miic(hw, MII_BMCR, BMCR_RESET);
264 ret = mii_ethtool_sset(&adapter->mii, &cmd);
265 if (ret)
266 pr_err("Error: mii_ethtool_sset\n");
267
268 pch_gbe_phy_sw_reset(hw);
269
270 pch_gbe_phy_read_reg_miic(hw, PHY_PHYSP_CONTROL, &mii_reg);
271 mii_reg |= PHYSP_CTRL_ASSERT_CRS_TX;
272 pch_gbe_phy_write_reg_miic(hw, PHY_PHYSP_CONTROL, mii_reg);
273
274}
diff --git a/drivers/net/pch_gbe/pch_gbe_phy.h b/drivers/net/pch_gbe/pch_gbe_phy.h
new file mode 100644
index 000000000000..03264dc7b5ec
--- /dev/null
+++ b/drivers/net/pch_gbe/pch_gbe_phy.h
@@ -0,0 +1,37 @@
1/*
2 * Copyright (C) 1999 - 2010 Intel Corporation.
3 * Copyright (C) 2010 OKI SEMICONDUCTOR Co., LTD.
4 *
5 * This code was derived from the Intel e1000e Linux driver.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; version 2 of the License.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
19 */
20#ifndef _PCH_GBE_PHY_H_
21#define _PCH_GBE_PHY_H_
22
23#define PCH_GBE_PHY_REGS_LEN 32
24#define PCH_GBE_PHY_RESET_DELAY_US 10
25#define PCH_GBE_MAC_IFOP_RGMII
26
27s32 pch_gbe_phy_get_id(struct pch_gbe_hw *hw);
28s32 pch_gbe_phy_read_reg_miic(struct pch_gbe_hw *hw, u32 offset, u16 *data);
29s32 pch_gbe_phy_write_reg_miic(struct pch_gbe_hw *hw, u32 offset, u16 data);
30void pch_gbe_phy_sw_reset(struct pch_gbe_hw *hw);
31void pch_gbe_phy_hw_reset(struct pch_gbe_hw *hw);
32void pch_gbe_phy_power_up(struct pch_gbe_hw *hw);
33void pch_gbe_phy_power_down(struct pch_gbe_hw *hw);
34void pch_gbe_phy_set_rgmii(struct pch_gbe_hw *hw);
35void pch_gbe_phy_init_setting(struct pch_gbe_hw *hw);
36
37#endif /* _PCH_GBE_PHY_H_ */
diff --git a/drivers/net/pcmcia/3c589_cs.c b/drivers/net/pcmcia/3c589_cs.c
index 7f2baf5eae26..35562a395770 100644
--- a/drivers/net/pcmcia/3c589_cs.c
+++ b/drivers/net/pcmcia/3c589_cs.c
@@ -266,7 +266,7 @@ static int tc589_config(struct pcmcia_device *link)
266 __be16 *phys_addr; 266 __be16 *phys_addr;
267 int ret, i, j, multi = 0, fifo; 267 int ret, i, j, multi = 0, fifo;
268 unsigned int ioaddr; 268 unsigned int ioaddr;
269 char *ram_split[] = {"5:3", "3:1", "1:1", "3:5"}; 269 static const char * const ram_split[] = {"5:3", "3:1", "1:1", "3:5"};
270 u8 *buf; 270 u8 *buf;
271 size_t len; 271 size_t len;
272 272
diff --git a/drivers/net/pcmcia/nmclan_cs.c b/drivers/net/pcmcia/nmclan_cs.c
index 89cf63bb8c91..c1d8ce9e4a6c 100644
--- a/drivers/net/pcmcia/nmclan_cs.c
+++ b/drivers/net/pcmcia/nmclan_cs.c
@@ -521,7 +521,7 @@ static int mace_read(mace_private *lp, unsigned int ioaddr, int reg)
521 spin_unlock_irqrestore(&lp->bank_lock, flags); 521 spin_unlock_irqrestore(&lp->bank_lock, flags);
522 break; 522 break;
523 } 523 }
524 return (data & 0xFF); 524 return data & 0xFF;
525} /* mace_read */ 525} /* mace_read */
526 526
527/* ---------------------------------------------------------------------------- 527/* ----------------------------------------------------------------------------
diff --git a/drivers/net/pcmcia/pcnet_cs.c b/drivers/net/pcmcia/pcnet_cs.c
index e180832c278f..c94311aed1ab 100644
--- a/drivers/net/pcmcia/pcnet_cs.c
+++ b/drivers/net/pcmcia/pcnet_cs.c
@@ -506,7 +506,8 @@ static int pcnet_confcheck(struct pcmcia_device *p_dev,
506 unsigned int vcc, 506 unsigned int vcc,
507 void *priv_data) 507 void *priv_data)
508{ 508{
509 int *has_shmem = priv_data; 509 int *priv = priv_data;
510 int try = (*priv & 0x1);
510 int i; 511 int i;
511 cistpl_io_t *io = &cfg->io; 512 cistpl_io_t *io = &cfg->io;
512 513
@@ -523,77 +524,103 @@ static int pcnet_confcheck(struct pcmcia_device *p_dev,
523 i = p_dev->resource[1]->end = 0; 524 i = p_dev->resource[1]->end = 0;
524 } 525 }
525 526
526 *has_shmem = ((cfg->mem.nwin == 1) && 527 *priv &= ((cfg->mem.nwin == 1) &&
527 (cfg->mem.win[0].len >= 0x4000)); 528 (cfg->mem.win[0].len >= 0x4000)) ? 0x10 : ~0x10;
529
528 p_dev->resource[0]->start = io->win[i].base; 530 p_dev->resource[0]->start = io->win[i].base;
529 p_dev->resource[0]->end = io->win[i].len; 531 p_dev->resource[0]->end = io->win[i].len;
530 p_dev->io_lines = io->flags & CISTPL_IO_LINES_MASK; 532 if (!try)
533 p_dev->io_lines = io->flags & CISTPL_IO_LINES_MASK;
534 else
535 p_dev->io_lines = 16;
531 if (p_dev->resource[0]->end + p_dev->resource[1]->end >= 32) 536 if (p_dev->resource[0]->end + p_dev->resource[1]->end >= 32)
532 return try_io_port(p_dev); 537 return try_io_port(p_dev);
533 538
534 return 0; 539 return -EINVAL;
540}
541
542static hw_info_t *pcnet_try_config(struct pcmcia_device *link,
543 int *has_shmem, int try)
544{
545 struct net_device *dev = link->priv;
546 hw_info_t *local_hw_info;
547 pcnet_dev_t *info = PRIV(dev);
548 int priv = try;
549 int ret;
550
551 ret = pcmcia_loop_config(link, pcnet_confcheck, &priv);
552 if (ret) {
553 dev_warn(&link->dev, "no useable port range found\n");
554 return NULL;
555 }
556 *has_shmem = (priv & 0x10);
557
558 if (!link->irq)
559 return NULL;
560
561 if (resource_size(link->resource[1]) == 8) {
562 link->conf.Attributes |= CONF_ENABLE_SPKR;
563 link->conf.Status = CCSR_AUDIO_ENA;
564 }
565 if ((link->manf_id == MANFID_IBM) &&
566 (link->card_id == PRODID_IBM_HOME_AND_AWAY))
567 link->conf.ConfigIndex |= 0x10;
568
569 ret = pcmcia_request_configuration(link, &link->conf);
570 if (ret)
571 return NULL;
572
573 dev->irq = link->irq;
574 dev->base_addr = link->resource[0]->start;
575
576 if (info->flags & HAS_MISC_REG) {
577 if ((if_port == 1) || (if_port == 2))
578 dev->if_port = if_port;
579 else
580 dev_notice(&link->dev, "invalid if_port requested\n");
581 } else
582 dev->if_port = 0;
583
584 if ((link->conf.ConfigBase == 0x03c0) &&
585 (link->manf_id == 0x149) && (link->card_id == 0xc1ab)) {
586 dev_info(&link->dev,
587 "this is an AX88190 card - use axnet_cs instead.\n");
588 return NULL;
589 }
590
591 local_hw_info = get_hwinfo(link);
592 if (!local_hw_info)
593 local_hw_info = get_prom(link);
594 if (!local_hw_info)
595 local_hw_info = get_dl10019(link);
596 if (!local_hw_info)
597 local_hw_info = get_ax88190(link);
598 if (!local_hw_info)
599 local_hw_info = get_hwired(link);
600
601 return local_hw_info;
535} 602}
536 603
537static int pcnet_config(struct pcmcia_device *link) 604static int pcnet_config(struct pcmcia_device *link)
538{ 605{
539 struct net_device *dev = link->priv; 606 struct net_device *dev = link->priv;
540 pcnet_dev_t *info = PRIV(dev); 607 pcnet_dev_t *info = PRIV(dev);
541 int ret, start_pg, stop_pg, cm_offset; 608 int start_pg, stop_pg, cm_offset;
542 int has_shmem = 0; 609 int has_shmem = 0;
543 hw_info_t *local_hw_info; 610 hw_info_t *local_hw_info;
544 611
545 dev_dbg(&link->dev, "pcnet_config\n"); 612 dev_dbg(&link->dev, "pcnet_config\n");
546 613
547 ret = pcmcia_loop_config(link, pcnet_confcheck, &has_shmem); 614 local_hw_info = pcnet_try_config(link, &has_shmem, 0);
548 if (ret) 615 if (!local_hw_info) {
549 goto failed; 616 /* check whether forcing io_lines to 16 helps... */
550 617 pcmcia_disable_device(link);
551 if (!link->irq) 618 local_hw_info = pcnet_try_config(link, &has_shmem, 1);
552 goto failed; 619 if (local_hw_info == NULL) {
553 620 dev_notice(&link->dev, "unable to read hardware net"
554 if (resource_size(link->resource[1]) == 8) { 621 " address for io base %#3lx\n", dev->base_addr);
555 link->conf.Attributes |= CONF_ENABLE_SPKR; 622 goto failed;
556 link->conf.Status = CCSR_AUDIO_ENA; 623 }
557 }
558 if ((link->manf_id == MANFID_IBM) &&
559 (link->card_id == PRODID_IBM_HOME_AND_AWAY))
560 link->conf.ConfigIndex |= 0x10;
561
562 ret = pcmcia_request_configuration(link, &link->conf);
563 if (ret)
564 goto failed;
565 dev->irq = link->irq;
566 dev->base_addr = link->resource[0]->start;
567 if (info->flags & HAS_MISC_REG) {
568 if ((if_port == 1) || (if_port == 2))
569 dev->if_port = if_port;
570 else
571 pr_notice("invalid if_port requested\n");
572 } else {
573 dev->if_port = 0;
574 }
575
576 if ((link->conf.ConfigBase == 0x03c0) &&
577 (link->manf_id == 0x149) && (link->card_id == 0xc1ab)) {
578 pr_notice("this is an AX88190 card!\n");
579 pr_notice("use axnet_cs instead.\n");
580 goto failed;
581 }
582
583 local_hw_info = get_hwinfo(link);
584 if (local_hw_info == NULL)
585 local_hw_info = get_prom(link);
586 if (local_hw_info == NULL)
587 local_hw_info = get_dl10019(link);
588 if (local_hw_info == NULL)
589 local_hw_info = get_ax88190(link);
590 if (local_hw_info == NULL)
591 local_hw_info = get_hwired(link);
592
593 if (local_hw_info == NULL) {
594 pr_notice("unable to read hardware net address for io base %#3lx\n",
595 dev->base_addr);
596 goto failed;
597 } 624 }
598 625
599 info->flags = local_hw_info->flags; 626 info->flags = local_hw_info->flags;
diff --git a/drivers/net/pcmcia/smc91c92_cs.c b/drivers/net/pcmcia/smc91c92_cs.c
index 3d1c549b7038..7204a4b5529b 100644
--- a/drivers/net/pcmcia/smc91c92_cs.c
+++ b/drivers/net/pcmcia/smc91c92_cs.c
@@ -815,7 +815,7 @@ static int check_sig(struct pcmcia_device *link)
815 ((s >> 8) != (s & 0xff))) { 815 ((s >> 8) != (s & 0xff))) {
816 SMC_SELECT_BANK(3); 816 SMC_SELECT_BANK(3);
817 s = inw(ioaddr + REVISION); 817 s = inw(ioaddr + REVISION);
818 return (s & 0xff); 818 return s & 0xff;
819 } 819 }
820 820
821 if (width) { 821 if (width) {
diff --git a/drivers/net/pcnet32.c b/drivers/net/pcnet32.c
index c200c2821730..aee3bb0358bf 100644
--- a/drivers/net/pcnet32.c
+++ b/drivers/net/pcnet32.c
@@ -376,7 +376,7 @@ static void pcnet32_wio_reset(unsigned long addr)
376static int pcnet32_wio_check(unsigned long addr) 376static int pcnet32_wio_check(unsigned long addr)
377{ 377{
378 outw(88, addr + PCNET32_WIO_RAP); 378 outw(88, addr + PCNET32_WIO_RAP);
379 return (inw(addr + PCNET32_WIO_RAP) == 88); 379 return inw(addr + PCNET32_WIO_RAP) == 88;
380} 380}
381 381
382static struct pcnet32_access pcnet32_wio = { 382static struct pcnet32_access pcnet32_wio = {
@@ -431,7 +431,7 @@ static void pcnet32_dwio_reset(unsigned long addr)
431static int pcnet32_dwio_check(unsigned long addr) 431static int pcnet32_dwio_check(unsigned long addr)
432{ 432{
433 outl(88, addr + PCNET32_DWIO_RAP); 433 outl(88, addr + PCNET32_DWIO_RAP);
434 return ((inl(addr + PCNET32_DWIO_RAP) & 0xffff) == 88); 434 return (inl(addr + PCNET32_DWIO_RAP) & 0xffff) == 88;
435} 435}
436 436
437static struct pcnet32_access pcnet32_dwio = { 437static struct pcnet32_access pcnet32_dwio = {
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index eb799b36c86a..cb3d13e4e074 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -58,7 +58,6 @@ config BROADCOM_PHY
58 58
59config BCM63XX_PHY 59config BCM63XX_PHY
60 tristate "Drivers for Broadcom 63xx SOCs internal PHY" 60 tristate "Drivers for Broadcom 63xx SOCs internal PHY"
61 depends on BCM63XX
62 ---help--- 61 ---help---
63 Currently supports the 6348 and 6358 PHYs. 62 Currently supports the 6348 and 6358 PHYs.
64 63
diff --git a/drivers/net/phy/bcm63xx.c b/drivers/net/phy/bcm63xx.c
index c12815679837..e16f98cb4f04 100644
--- a/drivers/net/phy/bcm63xx.c
+++ b/drivers/net/phy/bcm63xx.c
@@ -131,7 +131,7 @@ static void __exit bcm63xx_phy_exit(void)
131module_init(bcm63xx_phy_init); 131module_init(bcm63xx_phy_init);
132module_exit(bcm63xx_phy_exit); 132module_exit(bcm63xx_phy_exit);
133 133
134static struct mdio_device_id bcm63xx_tbl[] = { 134static struct mdio_device_id __maybe_unused bcm63xx_tbl[] = {
135 { 0x00406000, 0xfffffc00 }, 135 { 0x00406000, 0xfffffc00 },
136 { 0x002bdc00, 0xfffffc00 }, 136 { 0x002bdc00, 0xfffffc00 },
137 { } 137 { }
diff --git a/drivers/net/phy/broadcom.c b/drivers/net/phy/broadcom.c
index 4accd83d3dfe..d84c4224dd12 100644
--- a/drivers/net/phy/broadcom.c
+++ b/drivers/net/phy/broadcom.c
@@ -930,7 +930,7 @@ static void __exit broadcom_exit(void)
930module_init(broadcom_init); 930module_init(broadcom_init);
931module_exit(broadcom_exit); 931module_exit(broadcom_exit);
932 932
933static struct mdio_device_id broadcom_tbl[] = { 933static struct mdio_device_id __maybe_unused broadcom_tbl[] = {
934 { PHY_ID_BCM5411, 0xfffffff0 }, 934 { PHY_ID_BCM5411, 0xfffffff0 },
935 { PHY_ID_BCM5421, 0xfffffff0 }, 935 { PHY_ID_BCM5421, 0xfffffff0 },
936 { PHY_ID_BCM5461, 0xfffffff0 }, 936 { PHY_ID_BCM5461, 0xfffffff0 },
diff --git a/drivers/net/phy/cicada.c b/drivers/net/phy/cicada.c
index 1a325d63756b..d28173161c21 100644
--- a/drivers/net/phy/cicada.c
+++ b/drivers/net/phy/cicada.c
@@ -159,7 +159,7 @@ static void __exit cicada_exit(void)
159module_init(cicada_init); 159module_init(cicada_init);
160module_exit(cicada_exit); 160module_exit(cicada_exit);
161 161
162static struct mdio_device_id cicada_tbl[] = { 162static struct mdio_device_id __maybe_unused cicada_tbl[] = {
163 { 0x000fc410, 0x000ffff0 }, 163 { 0x000fc410, 0x000ffff0 },
164 { 0x000fc440, 0x000fffc0 }, 164 { 0x000fc440, 0x000fffc0 },
165 { } 165 { }
diff --git a/drivers/net/phy/davicom.c b/drivers/net/phy/davicom.c
index 29c17617a2ec..2f774acdb551 100644
--- a/drivers/net/phy/davicom.c
+++ b/drivers/net/phy/davicom.c
@@ -219,7 +219,7 @@ static void __exit davicom_exit(void)
219module_init(davicom_init); 219module_init(davicom_init);
220module_exit(davicom_exit); 220module_exit(davicom_exit);
221 221
222static struct mdio_device_id davicom_tbl[] = { 222static struct mdio_device_id __maybe_unused davicom_tbl[] = {
223 { 0x0181b880, 0x0ffffff0 }, 223 { 0x0181b880, 0x0ffffff0 },
224 { 0x0181b8a0, 0x0ffffff0 }, 224 { 0x0181b8a0, 0x0ffffff0 },
225 { 0x00181b80, 0x0ffffff0 }, 225 { 0x00181b80, 0x0ffffff0 },
diff --git a/drivers/net/phy/et1011c.c b/drivers/net/phy/et1011c.c
index 13995f52d6af..a8eb19ec3183 100644
--- a/drivers/net/phy/et1011c.c
+++ b/drivers/net/phy/et1011c.c
@@ -111,7 +111,7 @@ static void __exit et1011c_exit(void)
111module_init(et1011c_init); 111module_init(et1011c_init);
112module_exit(et1011c_exit); 112module_exit(et1011c_exit);
113 113
114static struct mdio_device_id et1011c_tbl[] = { 114static struct mdio_device_id __maybe_unused et1011c_tbl[] = {
115 { 0x0282f014, 0xfffffff0 }, 115 { 0x0282f014, 0xfffffff0 },
116 { } 116 { }
117}; 117};
diff --git a/drivers/net/phy/icplus.c b/drivers/net/phy/icplus.c
index 3f2583f18a39..c1d2d251fe8b 100644
--- a/drivers/net/phy/icplus.c
+++ b/drivers/net/phy/icplus.c
@@ -134,7 +134,7 @@ static void __exit ip175c_exit(void)
134module_init(ip175c_init); 134module_init(ip175c_init);
135module_exit(ip175c_exit); 135module_exit(ip175c_exit);
136 136
137static struct mdio_device_id icplus_tbl[] = { 137static struct mdio_device_id __maybe_unused icplus_tbl[] = {
138 { 0x02430d80, 0x0ffffff0 }, 138 { 0x02430d80, 0x0ffffff0 },
139 { } 139 { }
140}; 140};
diff --git a/drivers/net/phy/lxt.c b/drivers/net/phy/lxt.c
index 29c39ff85de5..6f6e8b616a62 100644
--- a/drivers/net/phy/lxt.c
+++ b/drivers/net/phy/lxt.c
@@ -223,7 +223,7 @@ static void __exit lxt_exit(void)
223module_init(lxt_init); 223module_init(lxt_init);
224module_exit(lxt_exit); 224module_exit(lxt_exit);
225 225
226static struct mdio_device_id lxt_tbl[] = { 226static struct mdio_device_id __maybe_unused lxt_tbl[] = {
227 { 0x78100000, 0xfffffff0 }, 227 { 0x78100000, 0xfffffff0 },
228 { 0x001378e0, 0xfffffff0 }, 228 { 0x001378e0, 0xfffffff0 },
229 { 0x00137a10, 0xfffffff0 }, 229 { 0x00137a10, 0xfffffff0 },
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index 0101f2bdf400..e2afdce0a437 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -196,20 +196,27 @@ static int m88e1121_config_aneg(struct phy_device *phydev)
196 MII_88E1121_PHY_MSCR_PAGE); 196 MII_88E1121_PHY_MSCR_PAGE);
197 if (err < 0) 197 if (err < 0)
198 return err; 198 return err;
199 mscr = phy_read(phydev, MII_88E1121_PHY_MSCR_REG) &
200 MII_88E1121_PHY_MSCR_DELAY_MASK;
201 199
202 if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID) 200 if ((phydev->interface == PHY_INTERFACE_MODE_RGMII) ||
203 mscr |= (MII_88E1121_PHY_MSCR_RX_DELAY | 201 (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID) ||
204 MII_88E1121_PHY_MSCR_TX_DELAY); 202 (phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
205 else if (phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID) 203 (phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID)) {
206 mscr |= MII_88E1121_PHY_MSCR_RX_DELAY;
207 else if (phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID)
208 mscr |= MII_88E1121_PHY_MSCR_TX_DELAY;
209 204
210 err = phy_write(phydev, MII_88E1121_PHY_MSCR_REG, mscr); 205 mscr = phy_read(phydev, MII_88E1121_PHY_MSCR_REG) &
211 if (err < 0) 206 MII_88E1121_PHY_MSCR_DELAY_MASK;
212 return err; 207
208 if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID)
209 mscr |= (MII_88E1121_PHY_MSCR_RX_DELAY |
210 MII_88E1121_PHY_MSCR_TX_DELAY);
211 else if (phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID)
212 mscr |= MII_88E1121_PHY_MSCR_RX_DELAY;
213 else if (phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID)
214 mscr |= MII_88E1121_PHY_MSCR_TX_DELAY;
215
216 err = phy_write(phydev, MII_88E1121_PHY_MSCR_REG, mscr);
217 if (err < 0)
218 return err;
219 }
213 220
214 phy_write(phydev, MII_88E1121_PHY_PAGE, oldpage); 221 phy_write(phydev, MII_88E1121_PHY_PAGE, oldpage);
215 222
@@ -721,7 +728,7 @@ static void __exit marvell_exit(void)
721module_init(marvell_init); 728module_init(marvell_init);
722module_exit(marvell_exit); 729module_exit(marvell_exit);
723 730
724static struct mdio_device_id marvell_tbl[] = { 731static struct mdio_device_id __maybe_unused marvell_tbl[] = {
725 { 0x01410c60, 0xfffffff0 }, 732 { 0x01410c60, 0xfffffff0 },
726 { 0x01410c90, 0xfffffff0 }, 733 { 0x01410c90, 0xfffffff0 },
727 { 0x01410cc0, 0xfffffff0 }, 734 { 0x01410cc0, 0xfffffff0 },
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index 6a6b8199a0d6..6c58da2b882c 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -308,7 +308,7 @@ static int mdio_bus_suspend(struct device *dev)
308 * may call phy routines that try to grab the same lock, and that may 308 * may call phy routines that try to grab the same lock, and that may
309 * lead to a deadlock. 309 * lead to a deadlock.
310 */ 310 */
311 if (phydev->attached_dev) 311 if (phydev->attached_dev && phydev->adjust_link)
312 phy_stop_machine(phydev); 312 phy_stop_machine(phydev);
313 313
314 if (!mdio_bus_phy_may_suspend(phydev)) 314 if (!mdio_bus_phy_may_suspend(phydev))
@@ -331,7 +331,7 @@ static int mdio_bus_resume(struct device *dev)
331 return ret; 331 return ret;
332 332
333no_resume: 333no_resume:
334 if (phydev->attached_dev) 334 if (phydev->attached_dev && phydev->adjust_link)
335 phy_start_machine(phydev, NULL); 335 phy_start_machine(phydev, NULL);
336 336
337 return 0; 337 return 0;
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index 8bb7db676a5c..0fd1678bc5a9 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -231,7 +231,7 @@ MODULE_DESCRIPTION("Micrel PHY driver");
231MODULE_AUTHOR("David J. Choi"); 231MODULE_AUTHOR("David J. Choi");
232MODULE_LICENSE("GPL"); 232MODULE_LICENSE("GPL");
233 233
234static struct mdio_device_id micrel_tbl[] = { 234static struct mdio_device_id __maybe_unused micrel_tbl[] = {
235 { PHY_ID_KSZ9021, 0x000fff10 }, 235 { PHY_ID_KSZ9021, 0x000fff10 },
236 { PHY_ID_KS8001, 0x00fffff0 }, 236 { PHY_ID_KS8001, 0x00fffff0 },
237 { PHY_ID_KS8737, 0x00fffff0 }, 237 { PHY_ID_KS8737, 0x00fffff0 },
diff --git a/drivers/net/phy/national.c b/drivers/net/phy/national.c
index a73ba0bcc0ce..0620ba963508 100644
--- a/drivers/net/phy/national.c
+++ b/drivers/net/phy/national.c
@@ -151,7 +151,7 @@ MODULE_LICENSE("GPL");
151module_init(ns_init); 151module_init(ns_init);
152module_exit(ns_exit); 152module_exit(ns_exit);
153 153
154static struct mdio_device_id ns_tbl[] = { 154static struct mdio_device_id __maybe_unused ns_tbl[] = {
155 { DP83865_PHY_ID, 0xfffffff0 }, 155 { DP83865_PHY_ID, 0xfffffff0 },
156 { } 156 { }
157}; 157};
diff --git a/drivers/net/phy/qsemi.c b/drivers/net/phy/qsemi.c
index 6736b23f1b28..fe0d0a15d5e1 100644
--- a/drivers/net/phy/qsemi.c
+++ b/drivers/net/phy/qsemi.c
@@ -138,7 +138,7 @@ static void __exit qs6612_exit(void)
138module_init(qs6612_init); 138module_init(qs6612_init);
139module_exit(qs6612_exit); 139module_exit(qs6612_exit);
140 140
141static struct mdio_device_id qs6612_tbl[] = { 141static struct mdio_device_id __maybe_unused qs6612_tbl[] = {
142 { 0x00181440, 0xfffffff0 }, 142 { 0x00181440, 0xfffffff0 },
143 { } 143 { }
144}; 144};
diff --git a/drivers/net/phy/realtek.c b/drivers/net/phy/realtek.c
index f567c0e1aaa1..a4eae750a414 100644
--- a/drivers/net/phy/realtek.c
+++ b/drivers/net/phy/realtek.c
@@ -79,7 +79,7 @@ static void __exit realtek_exit(void)
79module_init(realtek_init); 79module_init(realtek_init);
80module_exit(realtek_exit); 80module_exit(realtek_exit);
81 81
82static struct mdio_device_id realtek_tbl[] = { 82static struct mdio_device_id __maybe_unused realtek_tbl[] = {
83 { 0x001cc912, 0x001fffff }, 83 { 0x001cc912, 0x001fffff },
84 { } 84 { }
85}; 85};
diff --git a/drivers/net/phy/smsc.c b/drivers/net/phy/smsc.c
index 78fa988256fc..342505c976d6 100644
--- a/drivers/net/phy/smsc.c
+++ b/drivers/net/phy/smsc.c
@@ -254,7 +254,7 @@ MODULE_LICENSE("GPL");
254module_init(smsc_init); 254module_init(smsc_init);
255module_exit(smsc_exit); 255module_exit(smsc_exit);
256 256
257static struct mdio_device_id smsc_tbl[] = { 257static struct mdio_device_id __maybe_unused smsc_tbl[] = {
258 { 0x0007c0a0, 0xfffffff0 }, 258 { 0x0007c0a0, 0xfffffff0 },
259 { 0x0007c0b0, 0xfffffff0 }, 259 { 0x0007c0b0, 0xfffffff0 },
260 { 0x0007c0c0, 0xfffffff0 }, 260 { 0x0007c0c0, 0xfffffff0 },
diff --git a/drivers/net/phy/ste10Xp.c b/drivers/net/phy/ste10Xp.c
index 72290099e5e1..187a2fa814f2 100644
--- a/drivers/net/phy/ste10Xp.c
+++ b/drivers/net/phy/ste10Xp.c
@@ -132,7 +132,7 @@ static void __exit ste10Xp_exit(void)
132module_init(ste10Xp_init); 132module_init(ste10Xp_init);
133module_exit(ste10Xp_exit); 133module_exit(ste10Xp_exit);
134 134
135static struct mdio_device_id ste10Xp_tbl[] = { 135static struct mdio_device_id __maybe_unused ste10Xp_tbl[] = {
136 { STE101P_PHY_ID, 0xfffffff0 }, 136 { STE101P_PHY_ID, 0xfffffff0 },
137 { STE100P_PHY_ID, 0xffffffff }, 137 { STE100P_PHY_ID, 0xffffffff },
138 { } 138 { }
diff --git a/drivers/net/phy/vitesse.c b/drivers/net/phy/vitesse.c
index 45cce50a2799..5d8f6e17bd55 100644
--- a/drivers/net/phy/vitesse.c
+++ b/drivers/net/phy/vitesse.c
@@ -192,7 +192,7 @@ static void __exit vsc82xx_exit(void)
192module_init(vsc82xx_init); 192module_init(vsc82xx_init);
193module_exit(vsc82xx_exit); 193module_exit(vsc82xx_exit);
194 194
195static struct mdio_device_id vitesse_tbl[] = { 195static struct mdio_device_id __maybe_unused vitesse_tbl[] = {
196 { PHY_ID_VSC8244, 0x000fffc0 }, 196 { PHY_ID_VSC8244, 0x000fffc0 },
197 { PHY_ID_VSC8221, 0x000ffff0 }, 197 { PHY_ID_VSC8221, 0x000ffff0 },
198 { } 198 { }
diff --git a/drivers/net/plip.c b/drivers/net/plip.c
index 7e82a82422cf..ca4df7f4cf21 100644
--- a/drivers/net/plip.c
+++ b/drivers/net/plip.c
@@ -995,8 +995,10 @@ plip_tx_packet(struct sk_buff *skb, struct net_device *dev)
995static void 995static void
996plip_rewrite_address(const struct net_device *dev, struct ethhdr *eth) 996plip_rewrite_address(const struct net_device *dev, struct ethhdr *eth)
997{ 997{
998 const struct in_device *in_dev = dev->ip_ptr; 998 const struct in_device *in_dev;
999 999
1000 rcu_read_lock();
1001 in_dev = __in_dev_get_rcu(dev);
1000 if (in_dev) { 1002 if (in_dev) {
1001 /* Any address will do - we take the first */ 1003 /* Any address will do - we take the first */
1002 const struct in_ifaddr *ifa = in_dev->ifa_list; 1004 const struct in_ifaddr *ifa = in_dev->ifa_list;
@@ -1006,6 +1008,7 @@ plip_rewrite_address(const struct net_device *dev, struct ethhdr *eth)
1006 memcpy(eth->h_dest+2, &ifa->ifa_address, 4); 1008 memcpy(eth->h_dest+2, &ifa->ifa_address, 4);
1007 } 1009 }
1008 } 1010 }
1011 rcu_read_unlock();
1009} 1012}
1010 1013
1011static int 1014static int
@@ -1088,7 +1091,8 @@ plip_open(struct net_device *dev)
1088 when the device address isn't identical to the address of a 1091 when the device address isn't identical to the address of a
1089 received frame, the kernel incorrectly drops it). */ 1092 received frame, the kernel incorrectly drops it). */
1090 1093
1091 if ((in_dev=dev->ip_ptr) != NULL) { 1094 in_dev=__in_dev_get_rtnl(dev);
1095 if (in_dev) {
1092 /* Any address will do - we take the first. We already 1096 /* Any address will do - we take the first. We already
1093 have the first two bytes filled with 0xfc, from 1097 have the first two bytes filled with 0xfc, from
1094 plip_init_dev(). */ 1098 plip_init_dev(). */
diff --git a/drivers/net/ppp_generic.c b/drivers/net/ppp_generic.c
index 6695a51e09e9..866e221643ab 100644
--- a/drivers/net/ppp_generic.c
+++ b/drivers/net/ppp_generic.c
@@ -1314,8 +1314,13 @@ static int ppp_mp_explode(struct ppp *ppp, struct sk_buff *skb)
1314 hdrlen = (ppp->flags & SC_MP_XSHORTSEQ)? MPHDRLEN_SSN: MPHDRLEN; 1314 hdrlen = (ppp->flags & SC_MP_XSHORTSEQ)? MPHDRLEN_SSN: MPHDRLEN;
1315 i = 0; 1315 i = 0;
1316 list_for_each_entry(pch, &ppp->channels, clist) { 1316 list_for_each_entry(pch, &ppp->channels, clist) {
1317 navail += pch->avail = (pch->chan != NULL); 1317 if (pch->chan) {
1318 pch->speed = pch->chan->speed; 1318 pch->avail = 1;
1319 navail++;
1320 pch->speed = pch->chan->speed;
1321 } else {
1322 pch->avail = 0;
1323 }
1319 if (pch->avail) { 1324 if (pch->avail) {
1320 if (skb_queue_empty(&pch->file.xq) || 1325 if (skb_queue_empty(&pch->file.xq) ||
1321 !pch->had_frag) { 1326 !pch->had_frag) {
@@ -1542,9 +1547,11 @@ ppp_channel_push(struct channel *pch)
1542 * Receive-side routines. 1547 * Receive-side routines.
1543 */ 1548 */
1544 1549
1545/* misuse a few fields of the skb for MP reconstruction */ 1550struct ppp_mp_skb_parm {
1546#define sequence priority 1551 u32 sequence;
1547#define BEbits cb[0] 1552 u8 BEbits;
1553};
1554#define PPP_MP_CB(skb) ((struct ppp_mp_skb_parm *)((skb)->cb))
1548 1555
1549static inline void 1556static inline void
1550ppp_do_recv(struct ppp *ppp, struct sk_buff *skb, struct channel *pch) 1557ppp_do_recv(struct ppp *ppp, struct sk_buff *skb, struct channel *pch)
@@ -1873,13 +1880,13 @@ ppp_receive_mp_frame(struct ppp *ppp, struct sk_buff *skb, struct channel *pch)
1873 seq = (skb->data[3] << 16) | (skb->data[4] << 8)| skb->data[5]; 1880 seq = (skb->data[3] << 16) | (skb->data[4] << 8)| skb->data[5];
1874 mask = 0xffffff; 1881 mask = 0xffffff;
1875 } 1882 }
1876 skb->BEbits = skb->data[2]; 1883 PPP_MP_CB(skb)->BEbits = skb->data[2];
1877 skb_pull(skb, mphdrlen); /* pull off PPP and MP headers */ 1884 skb_pull(skb, mphdrlen); /* pull off PPP and MP headers */
1878 1885
1879 /* 1886 /*
1880 * Do protocol ID decompression on the first fragment of each packet. 1887 * Do protocol ID decompression on the first fragment of each packet.
1881 */ 1888 */
1882 if ((skb->BEbits & B) && (skb->data[0] & 1)) 1889 if ((PPP_MP_CB(skb)->BEbits & B) && (skb->data[0] & 1))
1883 *skb_push(skb, 1) = 0; 1890 *skb_push(skb, 1) = 0;
1884 1891
1885 /* 1892 /*
@@ -1891,7 +1898,7 @@ ppp_receive_mp_frame(struct ppp *ppp, struct sk_buff *skb, struct channel *pch)
1891 seq += mask + 1; 1898 seq += mask + 1;
1892 else if ((int)(seq - ppp->minseq) > (int)(mask >> 1)) 1899 else if ((int)(seq - ppp->minseq) > (int)(mask >> 1))
1893 seq -= mask + 1; /* should never happen */ 1900 seq -= mask + 1; /* should never happen */
1894 skb->sequence = seq; 1901 PPP_MP_CB(skb)->sequence = seq;
1895 pch->lastseq = seq; 1902 pch->lastseq = seq;
1896 1903
1897 /* 1904 /*
@@ -1927,8 +1934,8 @@ ppp_receive_mp_frame(struct ppp *ppp, struct sk_buff *skb, struct channel *pch)
1927 before the start of the queue. */ 1934 before the start of the queue. */
1928 if (skb_queue_len(&ppp->mrq) >= PPP_MP_MAX_QLEN) { 1935 if (skb_queue_len(&ppp->mrq) >= PPP_MP_MAX_QLEN) {
1929 struct sk_buff *mskb = skb_peek(&ppp->mrq); 1936 struct sk_buff *mskb = skb_peek(&ppp->mrq);
1930 if (seq_before(ppp->minseq, mskb->sequence)) 1937 if (seq_before(ppp->minseq, PPP_MP_CB(mskb)->sequence))
1931 ppp->minseq = mskb->sequence; 1938 ppp->minseq = PPP_MP_CB(mskb)->sequence;
1932 } 1939 }
1933 1940
1934 /* Pull completed packets off the queue and receive them. */ 1941 /* Pull completed packets off the queue and receive them. */
@@ -1958,12 +1965,12 @@ ppp_mp_insert(struct ppp *ppp, struct sk_buff *skb)
1958{ 1965{
1959 struct sk_buff *p; 1966 struct sk_buff *p;
1960 struct sk_buff_head *list = &ppp->mrq; 1967 struct sk_buff_head *list = &ppp->mrq;
1961 u32 seq = skb->sequence; 1968 u32 seq = PPP_MP_CB(skb)->sequence;
1962 1969
1963 /* N.B. we don't need to lock the list lock because we have the 1970 /* N.B. we don't need to lock the list lock because we have the
1964 ppp unit receive-side lock. */ 1971 ppp unit receive-side lock. */
1965 skb_queue_walk(list, p) { 1972 skb_queue_walk(list, p) {
1966 if (seq_before(seq, p->sequence)) 1973 if (seq_before(seq, PPP_MP_CB(p)->sequence))
1967 break; 1974 break;
1968 } 1975 }
1969 __skb_queue_before(list, p, skb); 1976 __skb_queue_before(list, p, skb);
@@ -1992,22 +1999,22 @@ ppp_mp_reconstruct(struct ppp *ppp)
1992 tail = NULL; 1999 tail = NULL;
1993 for (p = head; p != (struct sk_buff *) list; p = next) { 2000 for (p = head; p != (struct sk_buff *) list; p = next) {
1994 next = p->next; 2001 next = p->next;
1995 if (seq_before(p->sequence, seq)) { 2002 if (seq_before(PPP_MP_CB(p)->sequence, seq)) {
1996 /* this can't happen, anyway ignore the skb */ 2003 /* this can't happen, anyway ignore the skb */
1997 printk(KERN_ERR "ppp_mp_reconstruct bad seq %u < %u\n", 2004 printk(KERN_ERR "ppp_mp_reconstruct bad seq %u < %u\n",
1998 p->sequence, seq); 2005 PPP_MP_CB(p)->sequence, seq);
1999 head = next; 2006 head = next;
2000 continue; 2007 continue;
2001 } 2008 }
2002 if (p->sequence != seq) { 2009 if (PPP_MP_CB(p)->sequence != seq) {
2003 /* Fragment `seq' is missing. If it is after 2010 /* Fragment `seq' is missing. If it is after
2004 minseq, it might arrive later, so stop here. */ 2011 minseq, it might arrive later, so stop here. */
2005 if (seq_after(seq, minseq)) 2012 if (seq_after(seq, minseq))
2006 break; 2013 break;
2007 /* Fragment `seq' is lost, keep going. */ 2014 /* Fragment `seq' is lost, keep going. */
2008 lost = 1; 2015 lost = 1;
2009 seq = seq_before(minseq, p->sequence)? 2016 seq = seq_before(minseq, PPP_MP_CB(p)->sequence)?
2010 minseq + 1: p->sequence; 2017 minseq + 1: PPP_MP_CB(p)->sequence;
2011 next = p; 2018 next = p;
2012 continue; 2019 continue;
2013 } 2020 }
@@ -2021,7 +2028,7 @@ ppp_mp_reconstruct(struct ppp *ppp)
2021 */ 2028 */
2022 2029
2023 /* B bit set indicates this fragment starts a packet */ 2030 /* B bit set indicates this fragment starts a packet */
2024 if (p->BEbits & B) { 2031 if (PPP_MP_CB(p)->BEbits & B) {
2025 head = p; 2032 head = p;
2026 lost = 0; 2033 lost = 0;
2027 len = 0; 2034 len = 0;
@@ -2030,7 +2037,8 @@ ppp_mp_reconstruct(struct ppp *ppp)
2030 len += p->len; 2037 len += p->len;
2031 2038
2032 /* Got a complete packet yet? */ 2039 /* Got a complete packet yet? */
2033 if (lost == 0 && (p->BEbits & E) && (head->BEbits & B)) { 2040 if (lost == 0 && (PPP_MP_CB(p)->BEbits & E) &&
2041 (PPP_MP_CB(head)->BEbits & B)) {
2034 if (len > ppp->mrru + 2) { 2042 if (len > ppp->mrru + 2) {
2035 ++ppp->dev->stats.rx_length_errors; 2043 ++ppp->dev->stats.rx_length_errors;
2036 printk(KERN_DEBUG "PPP: reconstructed packet" 2044 printk(KERN_DEBUG "PPP: reconstructed packet"
@@ -2056,7 +2064,7 @@ ppp_mp_reconstruct(struct ppp *ppp)
2056 * and we haven't found a complete valid packet yet, 2064 * and we haven't found a complete valid packet yet,
2057 * we can discard up to and including this fragment. 2065 * we can discard up to and including this fragment.
2058 */ 2066 */
2059 if (p->BEbits & E) 2067 if (PPP_MP_CB(p)->BEbits & E)
2060 head = next; 2068 head = next;
2061 2069
2062 ++seq; 2070 ++seq;
@@ -2066,10 +2074,11 @@ ppp_mp_reconstruct(struct ppp *ppp)
2066 if (tail != NULL) { 2074 if (tail != NULL) {
2067 /* If we have discarded any fragments, 2075 /* If we have discarded any fragments,
2068 signal a receive error. */ 2076 signal a receive error. */
2069 if (head->sequence != ppp->nextseq) { 2077 if (PPP_MP_CB(head)->sequence != ppp->nextseq) {
2070 if (ppp->debug & 1) 2078 if (ppp->debug & 1)
2071 printk(KERN_DEBUG " missed pkts %u..%u\n", 2079 printk(KERN_DEBUG " missed pkts %u..%u\n",
2072 ppp->nextseq, head->sequence-1); 2080 ppp->nextseq,
2081 PPP_MP_CB(head)->sequence-1);
2073 ++ppp->dev->stats.rx_dropped; 2082 ++ppp->dev->stats.rx_dropped;
2074 ppp_receive_error(ppp); 2083 ppp_receive_error(ppp);
2075 } 2084 }
@@ -2078,7 +2087,7 @@ ppp_mp_reconstruct(struct ppp *ppp)
2078 /* copy to a single skb */ 2087 /* copy to a single skb */
2079 for (p = head; p != tail->next; p = p->next) 2088 for (p = head; p != tail->next; p = p->next)
2080 skb_copy_bits(p, 0, skb_put(skb, p->len), p->len); 2089 skb_copy_bits(p, 0, skb_put(skb, p->len), p->len);
2081 ppp->nextseq = tail->sequence + 1; 2090 ppp->nextseq = PPP_MP_CB(tail)->sequence + 1;
2082 head = tail->next; 2091 head = tail->next;
2083 } 2092 }
2084 2093
diff --git a/drivers/net/pppoe.c b/drivers/net/pppoe.c
index c07de359dc07..d72fb0519a2a 100644
--- a/drivers/net/pppoe.c
+++ b/drivers/net/pppoe.c
@@ -1124,7 +1124,7 @@ static const struct proto_ops pppoe_ops = {
1124 .ioctl = pppox_ioctl, 1124 .ioctl = pppox_ioctl,
1125}; 1125};
1126 1126
1127static struct pppox_proto pppoe_proto = { 1127static const struct pppox_proto pppoe_proto = {
1128 .create = pppoe_create, 1128 .create = pppoe_create,
1129 .ioctl = pppoe_ioctl, 1129 .ioctl = pppoe_ioctl,
1130 .owner = THIS_MODULE, 1130 .owner = THIS_MODULE,
diff --git a/drivers/net/pppox.c b/drivers/net/pppox.c
index d4191ef9cad1..8c0d170dabcd 100644
--- a/drivers/net/pppox.c
+++ b/drivers/net/pppox.c
@@ -36,9 +36,9 @@
36 36
37#include <asm/uaccess.h> 37#include <asm/uaccess.h>
38 38
39static struct pppox_proto *pppox_protos[PX_MAX_PROTO + 1]; 39static const struct pppox_proto *pppox_protos[PX_MAX_PROTO + 1];
40 40
41int register_pppox_proto(int proto_num, struct pppox_proto *pp) 41int register_pppox_proto(int proto_num, const struct pppox_proto *pp)
42{ 42{
43 if (proto_num < 0 || proto_num > PX_MAX_PROTO) 43 if (proto_num < 0 || proto_num > PX_MAX_PROTO)
44 return -EINVAL; 44 return -EINVAL;
diff --git a/drivers/net/pptp.c b/drivers/net/pptp.c
index 761f0eced724..ccbc91326bfa 100644
--- a/drivers/net/pptp.c
+++ b/drivers/net/pptp.c
@@ -53,7 +53,7 @@ static struct pppox_sock **callid_sock;
53static DEFINE_SPINLOCK(chan_lock); 53static DEFINE_SPINLOCK(chan_lock);
54 54
55static struct proto pptp_sk_proto __read_mostly; 55static struct proto pptp_sk_proto __read_mostly;
56static struct ppp_channel_ops pptp_chan_ops; 56static const struct ppp_channel_ops pptp_chan_ops;
57static const struct proto_ops pptp_ops; 57static const struct proto_ops pptp_ops;
58 58
59#define PPP_LCP_ECHOREQ 0x09 59#define PPP_LCP_ECHOREQ 0x09
@@ -628,7 +628,7 @@ static int pptp_ppp_ioctl(struct ppp_channel *chan, unsigned int cmd,
628 return err; 628 return err;
629} 629}
630 630
631static struct ppp_channel_ops pptp_chan_ops = { 631static const struct ppp_channel_ops pptp_chan_ops = {
632 .start_xmit = pptp_xmit, 632 .start_xmit = pptp_xmit,
633 .ioctl = pptp_ppp_ioctl, 633 .ioctl = pptp_ppp_ioctl,
634}; 634};
@@ -659,12 +659,12 @@ static const struct proto_ops pptp_ops = {
659 .ioctl = pppox_ioctl, 659 .ioctl = pppox_ioctl,
660}; 660};
661 661
662static struct pppox_proto pppox_pptp_proto = { 662static const struct pppox_proto pppox_pptp_proto = {
663 .create = pptp_create, 663 .create = pptp_create,
664 .owner = THIS_MODULE, 664 .owner = THIS_MODULE,
665}; 665};
666 666
667static struct gre_protocol gre_pptp_protocol = { 667static const struct gre_protocol gre_pptp_protocol = {
668 .handler = pptp_rcv, 668 .handler = pptp_rcv,
669}; 669};
670 670
diff --git a/drivers/net/ps3_gelic_wireless.c b/drivers/net/ps3_gelic_wireless.c
index 43b8d7797f0a..4a624a29393f 100644
--- a/drivers/net/ps3_gelic_wireless.c
+++ b/drivers/net/ps3_gelic_wireless.c
@@ -85,12 +85,12 @@ static const int bitrate_list[] = {
85 */ 85 */
86static inline int wpa2_capable(void) 86static inline int wpa2_capable(void)
87{ 87{
88 return (0 <= ps3_compare_firmware_version(2, 0, 0)); 88 return 0 <= ps3_compare_firmware_version(2, 0, 0);
89} 89}
90 90
91static inline int precise_ie(void) 91static inline int precise_ie(void)
92{ 92{
93 return (0 <= ps3_compare_firmware_version(2, 2, 0)); 93 return 0 <= ps3_compare_firmware_version(2, 2, 0);
94} 94}
95/* 95/*
96 * post_eurus_cmd helpers 96 * post_eurus_cmd helpers
@@ -506,7 +506,7 @@ static size_t gelic_wl_synthesize_ie(u8 *buf,
506 start[1] = (buf - start - 2); 506 start[1] = (buf - start - 2);
507 507
508 pr_debug("%s: ->\n", __func__); 508 pr_debug("%s: ->\n", __func__);
509 return (buf - start); 509 return buf - start;
510} 510}
511 511
512struct ie_item { 512struct ie_item {
diff --git a/drivers/net/pxa168_eth.c b/drivers/net/pxa168_eth.c
index 75c2ff99d66d..18c0297743f1 100644
--- a/drivers/net/pxa168_eth.c
+++ b/drivers/net/pxa168_eth.c
@@ -4,6 +4,7 @@
4 * 4 *
5 * Copyright (C) 2010 Marvell International Ltd. 5 * Copyright (C) 2010 Marvell International Ltd.
6 * Sachin Sanap <ssanap@marvell.com> 6 * Sachin Sanap <ssanap@marvell.com>
7 * Zhangfei Gao <zgao6@marvell.com>
7 * Philip Rakity <prakity@marvell.com> 8 * Philip Rakity <prakity@marvell.com>
8 * Mark Brown <markb@marvell.com> 9 * Mark Brown <markb@marvell.com>
9 * 10 *
diff --git a/drivers/net/qlcnic/qlcnic.h b/drivers/net/qlcnic/qlcnic.h
index cc8385a6727e..26c37d3a5868 100644
--- a/drivers/net/qlcnic/qlcnic.h
+++ b/drivers/net/qlcnic/qlcnic.h
@@ -51,8 +51,8 @@
51 51
52#define _QLCNIC_LINUX_MAJOR 5 52#define _QLCNIC_LINUX_MAJOR 5
53#define _QLCNIC_LINUX_MINOR 0 53#define _QLCNIC_LINUX_MINOR 0
54#define _QLCNIC_LINUX_SUBVERSION 9 54#define _QLCNIC_LINUX_SUBVERSION 11
55#define QLCNIC_LINUX_VERSIONID "5.0.9" 55#define QLCNIC_LINUX_VERSIONID "5.0.11"
56#define QLCNIC_DRV_IDC_VER 0x01 56#define QLCNIC_DRV_IDC_VER 0x01
57#define QLCNIC_DRIVER_VERSION ((_QLCNIC_LINUX_MAJOR << 16) |\ 57#define QLCNIC_DRIVER_VERSION ((_QLCNIC_LINUX_MAJOR << 16) |\
58 (_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION)) 58 (_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION))
@@ -94,11 +94,12 @@
94#define FIRST_PAGE_GROUP_START 0 94#define FIRST_PAGE_GROUP_START 0
95#define FIRST_PAGE_GROUP_END 0x100000 95#define FIRST_PAGE_GROUP_END 0x100000
96 96
97#define P3_MAX_MTU (9600) 97#define P3P_MAX_MTU (9600)
98#define P3P_MIN_MTU (68)
98#define QLCNIC_MAX_ETHERHDR 32 /* This contains some padding */ 99#define QLCNIC_MAX_ETHERHDR 32 /* This contains some padding */
99 100
100#define QLCNIC_P3_RX_BUF_MAX_LEN (QLCNIC_MAX_ETHERHDR + ETH_DATA_LEN) 101#define QLCNIC_P3P_RX_BUF_MAX_LEN (QLCNIC_MAX_ETHERHDR + ETH_DATA_LEN)
101#define QLCNIC_P3_RX_JUMBO_BUF_MAX_LEN (QLCNIC_MAX_ETHERHDR + P3_MAX_MTU) 102#define QLCNIC_P3P_RX_JUMBO_BUF_MAX_LEN (QLCNIC_MAX_ETHERHDR + P3P_MAX_MTU)
102#define QLCNIC_CT_DEFAULT_RX_BUF_LEN 2048 103#define QLCNIC_CT_DEFAULT_RX_BUF_LEN 2048
103#define QLCNIC_LRO_BUFFER_EXTRA 2048 104#define QLCNIC_LRO_BUFFER_EXTRA 2048
104 105
@@ -306,20 +307,20 @@ struct uni_data_desc{
306/* Magic number to let user know flash is programmed */ 307/* Magic number to let user know flash is programmed */
307#define QLCNIC_BDINFO_MAGIC 0x12345678 308#define QLCNIC_BDINFO_MAGIC 0x12345678
308 309
309#define QLCNIC_BRDTYPE_P3_REF_QG 0x0021 310#define QLCNIC_BRDTYPE_P3P_REF_QG 0x0021
310#define QLCNIC_BRDTYPE_P3_HMEZ 0x0022 311#define QLCNIC_BRDTYPE_P3P_HMEZ 0x0022
311#define QLCNIC_BRDTYPE_P3_10G_CX4_LP 0x0023 312#define QLCNIC_BRDTYPE_P3P_10G_CX4_LP 0x0023
312#define QLCNIC_BRDTYPE_P3_4_GB 0x0024 313#define QLCNIC_BRDTYPE_P3P_4_GB 0x0024
313#define QLCNIC_BRDTYPE_P3_IMEZ 0x0025 314#define QLCNIC_BRDTYPE_P3P_IMEZ 0x0025
314#define QLCNIC_BRDTYPE_P3_10G_SFP_PLUS 0x0026 315#define QLCNIC_BRDTYPE_P3P_10G_SFP_PLUS 0x0026
315#define QLCNIC_BRDTYPE_P3_10000_BASE_T 0x0027 316#define QLCNIC_BRDTYPE_P3P_10000_BASE_T 0x0027
316#define QLCNIC_BRDTYPE_P3_XG_LOM 0x0028 317#define QLCNIC_BRDTYPE_P3P_XG_LOM 0x0028
317#define QLCNIC_BRDTYPE_P3_4_GB_MM 0x0029 318#define QLCNIC_BRDTYPE_P3P_4_GB_MM 0x0029
318#define QLCNIC_BRDTYPE_P3_10G_SFP_CT 0x002a 319#define QLCNIC_BRDTYPE_P3P_10G_SFP_CT 0x002a
319#define QLCNIC_BRDTYPE_P3_10G_SFP_QT 0x002b 320#define QLCNIC_BRDTYPE_P3P_10G_SFP_QT 0x002b
320#define QLCNIC_BRDTYPE_P3_10G_CX4 0x0031 321#define QLCNIC_BRDTYPE_P3P_10G_CX4 0x0031
321#define QLCNIC_BRDTYPE_P3_10G_XFP 0x0032 322#define QLCNIC_BRDTYPE_P3P_10G_XFP 0x0032
322#define QLCNIC_BRDTYPE_P3_10G_TP 0x0080 323#define QLCNIC_BRDTYPE_P3P_10G_TP 0x0080
323 324
324#define QLCNIC_MSIX_TABLE_OFFSET 0x44 325#define QLCNIC_MSIX_TABLE_OFFSET 0x44
325 326
@@ -718,7 +719,7 @@ struct qlcnic_cardrsp_tx_ctx {
718 719
719/* MAC */ 720/* MAC */
720 721
721#define MC_COUNT_P3 38 722#define MC_COUNT_P3P 38
722 723
723#define QLCNIC_MAC_NOOP 0 724#define QLCNIC_MAC_NOOP 0
724#define QLCNIC_MAC_ADD 1 725#define QLCNIC_MAC_ADD 1
@@ -898,6 +899,16 @@ struct qlcnic_mac_req {
898 u8 mac_addr[6]; 899 u8 mac_addr[6];
899}; 900};
900 901
902struct qlcnic_vlan_req {
903 __le16 vlan_id;
904 __le16 rsvd[3];
905};
906
907struct qlcnic_ipaddr {
908 __be32 ipv4;
909 __be32 ipv6[4];
910};
911
901#define QLCNIC_MSI_ENABLED 0x02 912#define QLCNIC_MSI_ENABLED 0x02
902#define QLCNIC_MSIX_ENABLED 0x04 913#define QLCNIC_MSIX_ENABLED 0x04
903#define QLCNIC_LRO_ENABLED 0x08 914#define QLCNIC_LRO_ENABLED 0x08
@@ -909,6 +920,7 @@ struct qlcnic_mac_req {
909#define QLCNIC_TAGGING_ENABLED 0x100 920#define QLCNIC_TAGGING_ENABLED 0x100
910#define QLCNIC_MACSPOOF 0x200 921#define QLCNIC_MACSPOOF 0x200
911#define QLCNIC_MAC_OVERRIDE_DISABLED 0x400 922#define QLCNIC_MAC_OVERRIDE_DISABLED 0x400
923#define QLCNIC_PROMISC_DISABLED 0x800
912#define QLCNIC_IS_MSI_FAMILY(adapter) \ 924#define QLCNIC_IS_MSI_FAMILY(adapter) \
913 ((adapter)->flags & (QLCNIC_MSI_ENABLED | QLCNIC_MSIX_ENABLED)) 925 ((adapter)->flags & (QLCNIC_MSI_ENABLED | QLCNIC_MSIX_ENABLED))
914 926
@@ -935,7 +947,7 @@ struct qlcnic_mac_req {
935struct qlcnic_filter { 947struct qlcnic_filter {
936 struct hlist_node fnode; 948 struct hlist_node fnode;
937 u8 faddr[ETH_ALEN]; 949 u8 faddr[ETH_ALEN];
938 u16 vlan_id; 950 __le16 vlan_id;
939 unsigned long ftime; 951 unsigned long ftime;
940}; 952};
941 953
@@ -1013,6 +1025,7 @@ struct qlcnic_adapter {
1013 1025
1014 u64 dev_rst_time; 1026 u64 dev_rst_time;
1015 1027
1028 struct vlan_group *vlgrp;
1016 struct qlcnic_npar_info *npars; 1029 struct qlcnic_npar_info *npars;
1017 struct qlcnic_eswitch *eswitch; 1030 struct qlcnic_eswitch *eswitch;
1018 struct qlcnic_nic_template *nic_ops; 1031 struct qlcnic_nic_template *nic_ops;
@@ -1168,6 +1181,18 @@ struct qlcnic_esw_func_cfg {
1168#define QLCNIC_STATS_ESWITCH 2 1181#define QLCNIC_STATS_ESWITCH 2
1169#define QLCNIC_QUERY_RX_COUNTER 0 1182#define QLCNIC_QUERY_RX_COUNTER 0
1170#define QLCNIC_QUERY_TX_COUNTER 1 1183#define QLCNIC_QUERY_TX_COUNTER 1
1184#define QLCNIC_ESW_STATS_NOT_AVAIL 0xffffffffffffffffULL
1185
1186#define QLCNIC_ADD_ESW_STATS(VAL1, VAL2)\
1187do { \
1188 if (((VAL1) == QLCNIC_ESW_STATS_NOT_AVAIL) && \
1189 ((VAL2) != QLCNIC_ESW_STATS_NOT_AVAIL)) \
1190 (VAL1) = (VAL2); \
1191 else if (((VAL1) != QLCNIC_ESW_STATS_NOT_AVAIL) && \
1192 ((VAL2) != QLCNIC_ESW_STATS_NOT_AVAIL)) \
1193 (VAL1) += (VAL2); \
1194} while (0)
1195
1171struct __qlcnic_esw_statistics { 1196struct __qlcnic_esw_statistics {
1172 __le16 context_id; 1197 __le16 context_id;
1173 __le16 version; 1198 __le16 version;
@@ -1273,7 +1298,7 @@ void qlcnic_free_mac_list(struct qlcnic_adapter *adapter);
1273int qlcnic_nic_set_promisc(struct qlcnic_adapter *adapter, u32); 1298int qlcnic_nic_set_promisc(struct qlcnic_adapter *adapter, u32);
1274int qlcnic_config_intr_coalesce(struct qlcnic_adapter *adapter); 1299int qlcnic_config_intr_coalesce(struct qlcnic_adapter *adapter);
1275int qlcnic_config_rss(struct qlcnic_adapter *adapter, int enable); 1300int qlcnic_config_rss(struct qlcnic_adapter *adapter, int enable);
1276int qlcnic_config_ipaddr(struct qlcnic_adapter *adapter, u32 ip, int cmd); 1301int qlcnic_config_ipaddr(struct qlcnic_adapter *adapter, __be32 ip, int cmd);
1277int qlcnic_linkevent_request(struct qlcnic_adapter *adapter, int enable); 1302int qlcnic_linkevent_request(struct qlcnic_adapter *adapter, int enable);
1278void qlcnic_advert_link_change(struct qlcnic_adapter *adapter, int linkup); 1303void qlcnic_advert_link_change(struct qlcnic_adapter *adapter, int linkup);
1279 1304
@@ -1289,6 +1314,8 @@ int qlcnic_set_ilb_mode(struct qlcnic_adapter *adapter);
1289void qlcnic_fetch_mac(struct qlcnic_adapter *, u32, u32, u8, u8 *); 1314void qlcnic_fetch_mac(struct qlcnic_adapter *, u32, u32, u8, u8 *);
1290 1315
1291/* Functions from qlcnic_main.c */ 1316/* Functions from qlcnic_main.c */
1317int qlcnic_request_quiscent_mode(struct qlcnic_adapter *adapter);
1318void qlcnic_clear_quiscent_mode(struct qlcnic_adapter *adapter);
1292int qlcnic_reset_context(struct qlcnic_adapter *); 1319int qlcnic_reset_context(struct qlcnic_adapter *);
1293u32 qlcnic_issue_cmd(struct qlcnic_adapter *adapter, 1320u32 qlcnic_issue_cmd(struct qlcnic_adapter *adapter,
1294 u32 pci_fn, u32 version, u32 arg1, u32 arg2, u32 arg3, u32 cmd); 1321 u32 pci_fn, u32 version, u32 arg1, u32 arg2, u32 arg3, u32 cmd);
@@ -1299,19 +1326,12 @@ netdev_tx_t qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
1299void qlcnic_process_rcv_ring_diag(struct qlcnic_host_sds_ring *sds_ring); 1326void qlcnic_process_rcv_ring_diag(struct qlcnic_host_sds_ring *sds_ring);
1300 1327
1301/* Management functions */ 1328/* Management functions */
1302int qlcnic_set_mac_address(struct qlcnic_adapter *, u8*);
1303int qlcnic_get_mac_address(struct qlcnic_adapter *, u8*); 1329int qlcnic_get_mac_address(struct qlcnic_adapter *, u8*);
1304int qlcnic_get_nic_info(struct qlcnic_adapter *, struct qlcnic_info *, u8); 1330int qlcnic_get_nic_info(struct qlcnic_adapter *, struct qlcnic_info *, u8);
1305int qlcnic_set_nic_info(struct qlcnic_adapter *, struct qlcnic_info *); 1331int qlcnic_set_nic_info(struct qlcnic_adapter *, struct qlcnic_info *);
1306int qlcnic_get_pci_info(struct qlcnic_adapter *, struct qlcnic_pci_info*); 1332int qlcnic_get_pci_info(struct qlcnic_adapter *, struct qlcnic_pci_info*);
1307int qlcnic_reset_partition(struct qlcnic_adapter *, u8);
1308 1333
1309/* eSwitch management functions */ 1334/* eSwitch management functions */
1310int qlcnic_get_eswitch_capabilities(struct qlcnic_adapter *, u8,
1311 struct qlcnic_eswitch *);
1312int qlcnic_get_eswitch_status(struct qlcnic_adapter *, u8,
1313 struct qlcnic_eswitch *);
1314int qlcnic_toggle_eswitch(struct qlcnic_adapter *, u8, u8);
1315int qlcnic_config_switch_port(struct qlcnic_adapter *, 1335int qlcnic_config_switch_port(struct qlcnic_adapter *,
1316 struct qlcnic_esw_func_cfg *); 1336 struct qlcnic_esw_func_cfg *);
1317int qlcnic_get_eswitch_port_config(struct qlcnic_adapter *, 1337int qlcnic_get_eswitch_port_config(struct qlcnic_adapter *,
@@ -1351,7 +1371,7 @@ static const struct qlcnic_brdinfo qlcnic_boards[] = {
1351 {0x1077, 0x8020, 0x1077, 0x20f, 1371 {0x1077, 0x8020, 0x1077, 0x20f,
1352 "3200 Series Single Port 10Gb Intelligent Ethernet Adapter"}, 1372 "3200 Series Single Port 10Gb Intelligent Ethernet Adapter"},
1353 {0x1077, 0x8020, 0x103c, 0x3733, 1373 {0x1077, 0x8020, 0x103c, 0x3733,
1354 "NC523SFP 10Gb 2-port Flex-10 Server Adapter"}, 1374 "NC523SFP 10Gb 2-port Server Adapter"},
1355 {0x1077, 0x8020, 0x0, 0x0, "cLOM8214 1/10GbE Controller"}, 1375 {0x1077, 0x8020, 0x0, 0x0, "cLOM8214 1/10GbE Controller"},
1356}; 1376};
1357 1377
diff --git a/drivers/net/qlcnic/qlcnic_ctx.c b/drivers/net/qlcnic/qlcnic_ctx.c
index 95a821e0b66f..1cdc05dade6b 100644
--- a/drivers/net/qlcnic/qlcnic_ctx.c
+++ b/drivers/net/qlcnic/qlcnic_ctx.c
@@ -556,32 +556,6 @@ void qlcnic_free_hw_resources(struct qlcnic_adapter *adapter)
556 } 556 }
557} 557}
558 558
559/* Set MAC address of a NIC partition */
560int qlcnic_set_mac_address(struct qlcnic_adapter *adapter, u8* mac)
561{
562 int err = 0;
563 u32 arg1, arg2, arg3;
564
565 arg1 = adapter->ahw.pci_func | BIT_9;
566 arg2 = mac[0] | (mac[1] << 8) | (mac[2] << 16) | (mac[3] << 24);
567 arg3 = mac[4] | (mac[5] << 16);
568
569 err = qlcnic_issue_cmd(adapter,
570 adapter->ahw.pci_func,
571 adapter->fw_hal_version,
572 arg1,
573 arg2,
574 arg3,
575 QLCNIC_CDRP_CMD_MAC_ADDRESS);
576
577 if (err != QLCNIC_RCODE_SUCCESS) {
578 dev_err(&adapter->pdev->dev,
579 "Failed to set mac address%d\n", err);
580 err = -EIO;
581 }
582
583 return err;
584}
585 559
586/* Get MAC address of a NIC partition */ 560/* Get MAC address of a NIC partition */
587int qlcnic_get_mac_address(struct qlcnic_adapter *adapter, u8 *mac) 561int qlcnic_get_mac_address(struct qlcnic_adapter *adapter, u8 *mac)
@@ -742,15 +716,15 @@ int qlcnic_get_pci_info(struct qlcnic_adapter *adapter,
742 716
743 if (err == QLCNIC_RCODE_SUCCESS) { 717 if (err == QLCNIC_RCODE_SUCCESS) {
744 for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++, npar++, pci_info++) { 718 for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++, npar++, pci_info++) {
745 pci_info->id = le32_to_cpu(npar->id); 719 pci_info->id = le16_to_cpu(npar->id);
746 pci_info->active = le32_to_cpu(npar->active); 720 pci_info->active = le16_to_cpu(npar->active);
747 pci_info->type = le32_to_cpu(npar->type); 721 pci_info->type = le16_to_cpu(npar->type);
748 pci_info->default_port = 722 pci_info->default_port =
749 le32_to_cpu(npar->default_port); 723 le16_to_cpu(npar->default_port);
750 pci_info->tx_min_bw = 724 pci_info->tx_min_bw =
751 le32_to_cpu(npar->tx_min_bw); 725 le16_to_cpu(npar->tx_min_bw);
752 pci_info->tx_max_bw = 726 pci_info->tx_max_bw =
753 le32_to_cpu(npar->tx_max_bw); 727 le16_to_cpu(npar->tx_max_bw);
754 memcpy(pci_info->mac, npar->mac, ETH_ALEN); 728 memcpy(pci_info->mac, npar->mac, ETH_ALEN);
755 } 729 }
756 } else { 730 } else {
@@ -764,149 +738,6 @@ int qlcnic_get_pci_info(struct qlcnic_adapter *adapter,
764 return err; 738 return err;
765} 739}
766 740
767/* Reset a NIC partition */
768
769int qlcnic_reset_partition(struct qlcnic_adapter *adapter, u8 func_no)
770{
771 int err = -EIO;
772
773 if (adapter->op_mode != QLCNIC_MGMT_FUNC)
774 return err;
775
776 err = qlcnic_issue_cmd(adapter,
777 adapter->ahw.pci_func,
778 adapter->fw_hal_version,
779 func_no,
780 0,
781 0,
782 QLCNIC_CDRP_CMD_RESET_NPAR);
783
784 if (err != QLCNIC_RCODE_SUCCESS) {
785 dev_err(&adapter->pdev->dev,
786 "Failed to issue reset partition%d\n", err);
787 err = -EIO;
788 }
789
790 return err;
791}
792
793/* Get eSwitch Capabilities */
794int qlcnic_get_eswitch_capabilities(struct qlcnic_adapter *adapter, u8 port,
795 struct qlcnic_eswitch *eswitch)
796{
797 int err = -EIO;
798 u32 arg1, arg2;
799
800 if (adapter->op_mode == QLCNIC_NON_PRIV_FUNC)
801 return err;
802
803 err = qlcnic_issue_cmd(adapter,
804 adapter->ahw.pci_func,
805 adapter->fw_hal_version,
806 port,
807 0,
808 0,
809 QLCNIC_CDRP_CMD_GET_ESWITCH_CAPABILITY);
810
811 if (err == QLCNIC_RCODE_SUCCESS) {
812 arg1 = QLCRD32(adapter, QLCNIC_ARG1_CRB_OFFSET);
813 arg2 = QLCRD32(adapter, QLCNIC_ARG2_CRB_OFFSET);
814
815 eswitch->port = arg1 & 0xf;
816 eswitch->max_ucast_filters = LSW(arg2);
817 eswitch->max_active_vlans = MSW(arg2) & 0xfff;
818 if (arg1 & BIT_6)
819 eswitch->flags |= QLCNIC_SWITCH_VLAN_FILTERING;
820 if (arg1 & BIT_7)
821 eswitch->flags |= QLCNIC_SWITCH_PROMISC_MODE;
822 if (arg1 & BIT_8)
823 eswitch->flags |= QLCNIC_SWITCH_PORT_MIRRORING;
824 } else {
825 dev_err(&adapter->pdev->dev,
826 "Failed to get eswitch capabilities%d\n", err);
827 }
828
829 return err;
830}
831
832/* Get current status of eswitch */
833int qlcnic_get_eswitch_status(struct qlcnic_adapter *adapter, u8 port,
834 struct qlcnic_eswitch *eswitch)
835{
836 int err = -EIO;
837 u32 arg1, arg2;
838
839 if (adapter->op_mode != QLCNIC_MGMT_FUNC)
840 return err;
841
842 err = qlcnic_issue_cmd(adapter,
843 adapter->ahw.pci_func,
844 adapter->fw_hal_version,
845 port,
846 0,
847 0,
848 QLCNIC_CDRP_CMD_GET_ESWITCH_STATUS);
849
850 if (err == QLCNIC_RCODE_SUCCESS) {
851 arg1 = QLCRD32(adapter, QLCNIC_ARG1_CRB_OFFSET);
852 arg2 = QLCRD32(adapter, QLCNIC_ARG2_CRB_OFFSET);
853
854 eswitch->port = arg1 & 0xf;
855 eswitch->active_vports = LSB(arg2);
856 eswitch->active_ucast_filters = MSB(arg2);
857 eswitch->active_vlans = LSB(MSW(arg2));
858 if (arg1 & BIT_6)
859 eswitch->flags |= QLCNIC_SWITCH_VLAN_FILTERING;
860 if (arg1 & BIT_8)
861 eswitch->flags |= QLCNIC_SWITCH_PORT_MIRRORING;
862
863 } else {
864 dev_err(&adapter->pdev->dev,
865 "Failed to get eswitch status%d\n", err);
866 }
867
868 return err;
869}
870
871/* Enable/Disable eSwitch */
872int qlcnic_toggle_eswitch(struct qlcnic_adapter *adapter, u8 id, u8 enable)
873{
874 int err = -EIO;
875 u32 arg1, arg2;
876 struct qlcnic_eswitch *eswitch;
877
878 if (adapter->op_mode != QLCNIC_MGMT_FUNC)
879 return err;
880
881 eswitch = &adapter->eswitch[id];
882 if (!eswitch)
883 return err;
884
885 arg1 = eswitch->port | (enable ? BIT_4 : 0);
886 arg2 = eswitch->active_vports | (eswitch->max_ucast_filters << 8) |
887 (eswitch->max_active_vlans << 16);
888 err = qlcnic_issue_cmd(adapter,
889 adapter->ahw.pci_func,
890 adapter->fw_hal_version,
891 arg1,
892 arg2,
893 0,
894 QLCNIC_CDRP_CMD_TOGGLE_ESWITCH);
895
896 if (err != QLCNIC_RCODE_SUCCESS) {
897 dev_err(&adapter->pdev->dev,
898 "Failed to enable eswitch%d\n", eswitch->port);
899 eswitch->flags &= ~QLCNIC_SWITCH_ENABLE;
900 err = -EIO;
901 } else {
902 eswitch->flags |= QLCNIC_SWITCH_ENABLE;
903 dev_info(&adapter->pdev->dev,
904 "Enabled eSwitch for port %d\n", eswitch->port);
905 }
906
907 return err;
908}
909
910/* Configure eSwitch for port mirroring */ 741/* Configure eSwitch for port mirroring */
911int qlcnic_config_port_mirroring(struct qlcnic_adapter *adapter, u8 id, 742int qlcnic_config_port_mirroring(struct qlcnic_adapter *adapter, u8 id,
912 u8 enable_mirroring, u8 pci_func) 743 u8 enable_mirroring, u8 pci_func)
@@ -1016,7 +847,14 @@ int qlcnic_get_eswitch_stats(struct qlcnic_adapter *adapter, const u8 eswitch,
1016 if (adapter->npars == NULL) 847 if (adapter->npars == NULL)
1017 return -EIO; 848 return -EIO;
1018 849
1019 memset(esw_stats, 0, sizeof(struct __qlcnic_esw_statistics)); 850 memset(esw_stats, 0, sizeof(u64));
851 esw_stats->unicast_frames = QLCNIC_ESW_STATS_NOT_AVAIL;
852 esw_stats->multicast_frames = QLCNIC_ESW_STATS_NOT_AVAIL;
853 esw_stats->broadcast_frames = QLCNIC_ESW_STATS_NOT_AVAIL;
854 esw_stats->dropped_frames = QLCNIC_ESW_STATS_NOT_AVAIL;
855 esw_stats->errors = QLCNIC_ESW_STATS_NOT_AVAIL;
856 esw_stats->local_frames = QLCNIC_ESW_STATS_NOT_AVAIL;
857 esw_stats->numbytes = QLCNIC_ESW_STATS_NOT_AVAIL;
1020 esw_stats->context_id = eswitch; 858 esw_stats->context_id = eswitch;
1021 859
1022 for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) { 860 for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
@@ -1029,14 +867,20 @@ int qlcnic_get_eswitch_stats(struct qlcnic_adapter *adapter, const u8 eswitch,
1029 867
1030 esw_stats->size = port_stats.size; 868 esw_stats->size = port_stats.size;
1031 esw_stats->version = port_stats.version; 869 esw_stats->version = port_stats.version;
1032 esw_stats->unicast_frames += port_stats.unicast_frames; 870 QLCNIC_ADD_ESW_STATS(esw_stats->unicast_frames,
1033 esw_stats->multicast_frames += port_stats.multicast_frames; 871 port_stats.unicast_frames);
1034 esw_stats->broadcast_frames += port_stats.broadcast_frames; 872 QLCNIC_ADD_ESW_STATS(esw_stats->multicast_frames,
1035 esw_stats->dropped_frames += port_stats.dropped_frames; 873 port_stats.multicast_frames);
1036 esw_stats->errors += port_stats.errors; 874 QLCNIC_ADD_ESW_STATS(esw_stats->broadcast_frames,
1037 esw_stats->local_frames += port_stats.local_frames; 875 port_stats.broadcast_frames);
1038 esw_stats->numbytes += port_stats.numbytes; 876 QLCNIC_ADD_ESW_STATS(esw_stats->dropped_frames,
1039 877 port_stats.dropped_frames);
878 QLCNIC_ADD_ESW_STATS(esw_stats->errors,
879 port_stats.errors);
880 QLCNIC_ADD_ESW_STATS(esw_stats->local_frames,
881 port_stats.local_frames);
882 QLCNIC_ADD_ESW_STATS(esw_stats->numbytes,
883 port_stats.numbytes);
1040 ret = 0; 884 ret = 0;
1041 } 885 }
1042 return ret; 886 return ret;
diff --git a/drivers/net/qlcnic/qlcnic_ethtool.c b/drivers/net/qlcnic/qlcnic_ethtool.c
index cb9463bd6b1e..25e93a53fca0 100644
--- a/drivers/net/qlcnic/qlcnic_ethtool.c
+++ b/drivers/net/qlcnic/qlcnic_ethtool.c
@@ -78,7 +78,25 @@ static const struct qlcnic_stats qlcnic_gstrings_stats[] = {
78 78
79}; 79};
80 80
81static const char qlcnic_device_gstrings_stats[][ETH_GSTRING_LEN] = {
82 "rx unicast frames",
83 "rx multicast frames",
84 "rx broadcast frames",
85 "rx dropped frames",
86 "rx errors",
87 "rx local frames",
88 "rx numbytes",
89 "tx unicast frames",
90 "tx multicast frames",
91 "tx broadcast frames",
92 "tx dropped frames",
93 "tx errors",
94 "tx local frames",
95 "tx numbytes",
96};
97
81#define QLCNIC_STATS_LEN ARRAY_SIZE(qlcnic_gstrings_stats) 98#define QLCNIC_STATS_LEN ARRAY_SIZE(qlcnic_gstrings_stats)
99#define QLCNIC_DEVICE_STATS_LEN ARRAY_SIZE(qlcnic_device_gstrings_stats)
82 100
83static const char qlcnic_gstrings_test[][ETH_GSTRING_LEN] = { 101static const char qlcnic_gstrings_test[][ETH_GSTRING_LEN] = {
84 "Register_Test_on_offline", 102 "Register_Test_on_offline",
@@ -96,7 +114,7 @@ static const char qlcnic_gstrings_test[][ETH_GSTRING_LEN] = {
96static const u32 diag_registers[] = { 114static const u32 diag_registers[] = {
97 CRB_CMDPEG_STATE, 115 CRB_CMDPEG_STATE,
98 CRB_RCVPEG_STATE, 116 CRB_RCVPEG_STATE,
99 CRB_XG_STATE_P3, 117 CRB_XG_STATE_P3P,
100 CRB_FW_CAPABILITIES_1, 118 CRB_FW_CAPABILITIES_1,
101 ISR_INT_STATE_REG, 119 ISR_INT_STATE_REG,
102 QLCNIC_CRB_DRV_ACTIVE, 120 QLCNIC_CRB_DRV_ACTIVE,
@@ -189,9 +207,9 @@ qlcnic_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
189 goto skip; 207 goto skip;
190 } 208 }
191 209
192 val = QLCRD32(adapter, P3_LINK_SPEED_REG(pcifn)); 210 val = QLCRD32(adapter, P3P_LINK_SPEED_REG(pcifn));
193 ecmd->speed = P3_LINK_SPEED_MHZ * 211 ecmd->speed = P3P_LINK_SPEED_MHZ *
194 P3_LINK_SPEED_VAL(pcifn, val); 212 P3P_LINK_SPEED_VAL(pcifn, val);
195 ecmd->duplex = DUPLEX_FULL; 213 ecmd->duplex = DUPLEX_FULL;
196 ecmd->autoneg = AUTONEG_DISABLE; 214 ecmd->autoneg = AUTONEG_DISABLE;
197 } else 215 } else
@@ -202,42 +220,42 @@ skip:
202 ecmd->transceiver = XCVR_EXTERNAL; 220 ecmd->transceiver = XCVR_EXTERNAL;
203 221
204 switch (adapter->ahw.board_type) { 222 switch (adapter->ahw.board_type) {
205 case QLCNIC_BRDTYPE_P3_REF_QG: 223 case QLCNIC_BRDTYPE_P3P_REF_QG:
206 case QLCNIC_BRDTYPE_P3_4_GB: 224 case QLCNIC_BRDTYPE_P3P_4_GB:
207 case QLCNIC_BRDTYPE_P3_4_GB_MM: 225 case QLCNIC_BRDTYPE_P3P_4_GB_MM:
208 226
209 ecmd->supported |= SUPPORTED_Autoneg; 227 ecmd->supported |= SUPPORTED_Autoneg;
210 ecmd->advertising |= ADVERTISED_Autoneg; 228 ecmd->advertising |= ADVERTISED_Autoneg;
211 case QLCNIC_BRDTYPE_P3_10G_CX4: 229 case QLCNIC_BRDTYPE_P3P_10G_CX4:
212 case QLCNIC_BRDTYPE_P3_10G_CX4_LP: 230 case QLCNIC_BRDTYPE_P3P_10G_CX4_LP:
213 case QLCNIC_BRDTYPE_P3_10000_BASE_T: 231 case QLCNIC_BRDTYPE_P3P_10000_BASE_T:
214 ecmd->supported |= SUPPORTED_TP; 232 ecmd->supported |= SUPPORTED_TP;
215 ecmd->advertising |= ADVERTISED_TP; 233 ecmd->advertising |= ADVERTISED_TP;
216 ecmd->port = PORT_TP; 234 ecmd->port = PORT_TP;
217 ecmd->autoneg = adapter->link_autoneg; 235 ecmd->autoneg = adapter->link_autoneg;
218 break; 236 break;
219 case QLCNIC_BRDTYPE_P3_IMEZ: 237 case QLCNIC_BRDTYPE_P3P_IMEZ:
220 case QLCNIC_BRDTYPE_P3_XG_LOM: 238 case QLCNIC_BRDTYPE_P3P_XG_LOM:
221 case QLCNIC_BRDTYPE_P3_HMEZ: 239 case QLCNIC_BRDTYPE_P3P_HMEZ:
222 ecmd->supported |= SUPPORTED_MII; 240 ecmd->supported |= SUPPORTED_MII;
223 ecmd->advertising |= ADVERTISED_MII; 241 ecmd->advertising |= ADVERTISED_MII;
224 ecmd->port = PORT_MII; 242 ecmd->port = PORT_MII;
225 ecmd->autoneg = AUTONEG_DISABLE; 243 ecmd->autoneg = AUTONEG_DISABLE;
226 break; 244 break;
227 case QLCNIC_BRDTYPE_P3_10G_SFP_PLUS: 245 case QLCNIC_BRDTYPE_P3P_10G_SFP_PLUS:
228 case QLCNIC_BRDTYPE_P3_10G_SFP_CT: 246 case QLCNIC_BRDTYPE_P3P_10G_SFP_CT:
229 case QLCNIC_BRDTYPE_P3_10G_SFP_QT: 247 case QLCNIC_BRDTYPE_P3P_10G_SFP_QT:
230 ecmd->advertising |= ADVERTISED_TP; 248 ecmd->advertising |= ADVERTISED_TP;
231 ecmd->supported |= SUPPORTED_TP; 249 ecmd->supported |= SUPPORTED_TP;
232 check_sfp_module = netif_running(dev) && 250 check_sfp_module = netif_running(dev) &&
233 adapter->has_link_events; 251 adapter->has_link_events;
234 case QLCNIC_BRDTYPE_P3_10G_XFP: 252 case QLCNIC_BRDTYPE_P3P_10G_XFP:
235 ecmd->supported |= SUPPORTED_FIBRE; 253 ecmd->supported |= SUPPORTED_FIBRE;
236 ecmd->advertising |= ADVERTISED_FIBRE; 254 ecmd->advertising |= ADVERTISED_FIBRE;
237 ecmd->port = PORT_FIBRE; 255 ecmd->port = PORT_FIBRE;
238 ecmd->autoneg = AUTONEG_DISABLE; 256 ecmd->autoneg = AUTONEG_DISABLE;
239 break; 257 break;
240 case QLCNIC_BRDTYPE_P3_10G_TP: 258 case QLCNIC_BRDTYPE_P3P_10G_TP:
241 if (adapter->ahw.port_type == QLCNIC_XGBE) { 259 if (adapter->ahw.port_type == QLCNIC_XGBE) {
242 ecmd->autoneg = AUTONEG_DISABLE; 260 ecmd->autoneg = AUTONEG_DISABLE;
243 ecmd->supported |= (SUPPORTED_FIBRE | SUPPORTED_TP); 261 ecmd->supported |= (SUPPORTED_FIBRE | SUPPORTED_TP);
@@ -343,7 +361,7 @@ qlcnic_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *p)
343 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx; 361 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
344 struct qlcnic_host_sds_ring *sds_ring; 362 struct qlcnic_host_sds_ring *sds_ring;
345 u32 *regs_buff = p; 363 u32 *regs_buff = p;
346 int ring, i = 0; 364 int ring, i = 0, j = 0;
347 365
348 memset(p, 0, qlcnic_get_regs_len(dev)); 366 memset(p, 0, qlcnic_get_regs_len(dev));
349 regs->version = (QLCNIC_ETHTOOL_REGS_VER << 24) | 367 regs->version = (QLCNIC_ETHTOOL_REGS_VER << 24) |
@@ -352,8 +370,8 @@ qlcnic_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *p)
352 regs_buff[0] = (0xcafe0000 | (QLCNIC_DEV_INFO_SIZE & 0xffff)); 370 regs_buff[0] = (0xcafe0000 | (QLCNIC_DEV_INFO_SIZE & 0xffff));
353 regs_buff[1] = QLCNIC_MGMT_API_VERSION; 371 regs_buff[1] = QLCNIC_MGMT_API_VERSION;
354 372
355 for (i = QLCNIC_DEV_INFO_SIZE + 1; diag_registers[i] != -1; i++) 373 for (i = QLCNIC_DEV_INFO_SIZE + 1; diag_registers[j] != -1; j++, i++)
356 regs_buff[i] = QLCRD32(adapter, diag_registers[i]); 374 regs_buff[i] = QLCRD32(adapter, diag_registers[j]);
357 375
358 if (!test_bit(__QLCNIC_DEV_UP, &adapter->state)) 376 if (!test_bit(__QLCNIC_DEV_UP, &adapter->state))
359 return; 377 return;
@@ -381,9 +399,9 @@ static u32 qlcnic_test_link(struct net_device *dev)
381 struct qlcnic_adapter *adapter = netdev_priv(dev); 399 struct qlcnic_adapter *adapter = netdev_priv(dev);
382 u32 val; 400 u32 val;
383 401
384 val = QLCRD32(adapter, CRB_XG_STATE_P3); 402 val = QLCRD32(adapter, CRB_XG_STATE_P3P);
385 val = XG_LINK_STATE_P3(adapter->ahw.pci_func, val); 403 val = XG_LINK_STATE_P3P(adapter->ahw.pci_func, val);
386 return (val == XG_LINK_UP_P3) ? 0 : 1; 404 return (val == XG_LINK_UP_P3P) ? 0 : 1;
387} 405}
388 406
389static int 407static int
@@ -625,10 +643,13 @@ static int qlcnic_reg_test(struct net_device *dev)
625 643
626static int qlcnic_get_sset_count(struct net_device *dev, int sset) 644static int qlcnic_get_sset_count(struct net_device *dev, int sset)
627{ 645{
646 struct qlcnic_adapter *adapter = netdev_priv(dev);
628 switch (sset) { 647 switch (sset) {
629 case ETH_SS_TEST: 648 case ETH_SS_TEST:
630 return QLCNIC_TEST_LEN; 649 return QLCNIC_TEST_LEN;
631 case ETH_SS_STATS: 650 case ETH_SS_STATS:
651 if (adapter->flags & QLCNIC_ESWITCH_ENABLED)
652 return QLCNIC_STATS_LEN + QLCNIC_DEVICE_STATS_LEN;
632 return QLCNIC_STATS_LEN; 653 return QLCNIC_STATS_LEN;
633 default: 654 default:
634 return -EOPNOTSUPP; 655 return -EOPNOTSUPP;
@@ -636,6 +657,8 @@ static int qlcnic_get_sset_count(struct net_device *dev, int sset)
636} 657}
637 658
638#define QLC_ILB_PKT_SIZE 64 659#define QLC_ILB_PKT_SIZE 64
660#define QLC_NUM_ILB_PKT 16
661#define QLC_ILB_MAX_RCV_LOOP 10
639 662
640static void qlcnic_create_loopback_buff(unsigned char *data) 663static void qlcnic_create_loopback_buff(unsigned char *data)
641{ 664{
@@ -657,24 +680,34 @@ static int qlcnic_do_ilb_test(struct qlcnic_adapter *adapter)
657 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx; 680 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
658 struct qlcnic_host_sds_ring *sds_ring = &recv_ctx->sds_rings[0]; 681 struct qlcnic_host_sds_ring *sds_ring = &recv_ctx->sds_rings[0];
659 struct sk_buff *skb; 682 struct sk_buff *skb;
660 int i; 683 int i, loop, cnt = 0;
661 684
662 for (i = 0; i < 16; i++) { 685 for (i = 0; i < QLC_NUM_ILB_PKT; i++) {
663 skb = dev_alloc_skb(QLC_ILB_PKT_SIZE); 686 skb = dev_alloc_skb(QLC_ILB_PKT_SIZE);
664 qlcnic_create_loopback_buff(skb->data); 687 qlcnic_create_loopback_buff(skb->data);
665 skb_put(skb, QLC_ILB_PKT_SIZE); 688 skb_put(skb, QLC_ILB_PKT_SIZE);
666 689
667 adapter->diag_cnt = 0; 690 adapter->diag_cnt = 0;
668
669 qlcnic_xmit_frame(skb, adapter->netdev); 691 qlcnic_xmit_frame(skb, adapter->netdev);
670 692
671 msleep(5); 693 loop = 0;
672 694 do {
673 qlcnic_process_rcv_ring_diag(sds_ring); 695 msleep(1);
696 qlcnic_process_rcv_ring_diag(sds_ring);
697 } while (loop++ < QLC_ILB_MAX_RCV_LOOP &&
698 !adapter->diag_cnt);
674 699
675 dev_kfree_skb_any(skb); 700 dev_kfree_skb_any(skb);
701
676 if (!adapter->diag_cnt) 702 if (!adapter->diag_cnt)
677 return -1; 703 dev_warn(&adapter->pdev->dev, "ILB Test: %dth packet"
704 " not recevied\n", i + 1);
705 else
706 cnt++;
707 }
708 if (cnt != i) {
709 dev_warn(&adapter->pdev->dev, "ILB Test failed\n");
710 return -1;
678 } 711 }
679 return 0; 712 return 0;
680} 713}
@@ -694,6 +727,11 @@ static int qlcnic_loopback_test(struct net_device *netdev)
694 if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state)) 727 if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
695 return -EIO; 728 return -EIO;
696 729
730 if (qlcnic_request_quiscent_mode(adapter)) {
731 clear_bit(__QLCNIC_RESETTING, &adapter->state);
732 return -EIO;
733 }
734
697 ret = qlcnic_diag_alloc_res(netdev, QLCNIC_LOOPBACK_TEST); 735 ret = qlcnic_diag_alloc_res(netdev, QLCNIC_LOOPBACK_TEST);
698 if (ret) 736 if (ret)
699 goto clear_it; 737 goto clear_it;
@@ -710,6 +748,7 @@ done:
710 qlcnic_diag_free_res(netdev, max_sds_rings); 748 qlcnic_diag_free_res(netdev, max_sds_rings);
711 749
712clear_it: 750clear_it:
751 qlcnic_clear_quiscent_mode(adapter);
713 adapter->max_sds_rings = max_sds_rings; 752 adapter->max_sds_rings = max_sds_rings;
714 clear_bit(__QLCNIC_RESETTING, &adapter->state); 753 clear_bit(__QLCNIC_RESETTING, &adapter->state);
715 return ret; 754 return ret;
@@ -777,7 +816,8 @@ qlcnic_diag_test(struct net_device *dev, struct ethtool_test *eth_test,
777static void 816static void
778qlcnic_get_strings(struct net_device *dev, u32 stringset, u8 * data) 817qlcnic_get_strings(struct net_device *dev, u32 stringset, u8 * data)
779{ 818{
780 int index; 819 struct qlcnic_adapter *adapter = netdev_priv(dev);
820 int index, i;
781 821
782 switch (stringset) { 822 switch (stringset) {
783 case ETH_SS_TEST: 823 case ETH_SS_TEST:
@@ -790,16 +830,43 @@ qlcnic_get_strings(struct net_device *dev, u32 stringset, u8 * data)
790 qlcnic_gstrings_stats[index].stat_string, 830 qlcnic_gstrings_stats[index].stat_string,
791 ETH_GSTRING_LEN); 831 ETH_GSTRING_LEN);
792 } 832 }
793 break; 833 if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED))
834 return;
835 for (i = 0; i < QLCNIC_DEVICE_STATS_LEN; index++, i++) {
836 memcpy(data + index * ETH_GSTRING_LEN,
837 qlcnic_device_gstrings_stats[i],
838 ETH_GSTRING_LEN);
839 }
794 } 840 }
795} 841}
796 842
843#define QLCNIC_FILL_ESWITCH_STATS(VAL1) \
844 (((VAL1) == QLCNIC_ESW_STATS_NOT_AVAIL) ? 0 : VAL1)
845
846static void
847qlcnic_fill_device_stats(int *index, u64 *data,
848 struct __qlcnic_esw_statistics *stats)
849{
850 int ind = *index;
851
852 data[ind++] = QLCNIC_FILL_ESWITCH_STATS(stats->unicast_frames);
853 data[ind++] = QLCNIC_FILL_ESWITCH_STATS(stats->multicast_frames);
854 data[ind++] = QLCNIC_FILL_ESWITCH_STATS(stats->broadcast_frames);
855 data[ind++] = QLCNIC_FILL_ESWITCH_STATS(stats->dropped_frames);
856 data[ind++] = QLCNIC_FILL_ESWITCH_STATS(stats->errors);
857 data[ind++] = QLCNIC_FILL_ESWITCH_STATS(stats->local_frames);
858 data[ind++] = QLCNIC_FILL_ESWITCH_STATS(stats->numbytes);
859
860 *index = ind;
861}
862
797static void 863static void
798qlcnic_get_ethtool_stats(struct net_device *dev, 864qlcnic_get_ethtool_stats(struct net_device *dev,
799 struct ethtool_stats *stats, u64 * data) 865 struct ethtool_stats *stats, u64 * data)
800{ 866{
801 struct qlcnic_adapter *adapter = netdev_priv(dev); 867 struct qlcnic_adapter *adapter = netdev_priv(dev);
802 int index; 868 struct qlcnic_esw_statistics port_stats;
869 int index, ret;
803 870
804 for (index = 0; index < QLCNIC_STATS_LEN; index++) { 871 for (index = 0; index < QLCNIC_STATS_LEN; index++) {
805 char *p = 872 char *p =
@@ -809,6 +876,24 @@ qlcnic_get_ethtool_stats(struct net_device *dev,
809 (qlcnic_gstrings_stats[index].sizeof_stat == 876 (qlcnic_gstrings_stats[index].sizeof_stat ==
810 sizeof(u64)) ? *(u64 *)p:(*(u32 *)p); 877 sizeof(u64)) ? *(u64 *)p:(*(u32 *)p);
811 } 878 }
879
880 if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED))
881 return;
882
883 memset(&port_stats, 0, sizeof(struct qlcnic_esw_statistics));
884 ret = qlcnic_get_port_stats(adapter, adapter->ahw.pci_func,
885 QLCNIC_QUERY_RX_COUNTER, &port_stats.rx);
886 if (ret)
887 return;
888
889 qlcnic_fill_device_stats(&index, data, &port_stats.rx);
890
891 ret = qlcnic_get_port_stats(adapter, adapter->ahw.pci_func,
892 QLCNIC_QUERY_TX_COUNTER, &port_stats.tx);
893 if (ret)
894 return;
895
896 qlcnic_fill_device_stats(&index, data, &port_stats.tx);
812} 897}
813 898
814static int qlcnic_set_tx_csum(struct net_device *dev, u32 data) 899static int qlcnic_set_tx_csum(struct net_device *dev, u32 data)
@@ -847,7 +932,7 @@ static int qlcnic_set_rx_csum(struct net_device *dev, u32 data)
847 return 0; 932 return 0;
848 } 933 }
849 934
850 if (adapter->flags & QLCNIC_LRO_ENABLED) { 935 if (dev->features & NETIF_F_LRO) {
851 if (qlcnic_config_hw_lro(adapter, QLCNIC_LRO_DISABLED)) 936 if (qlcnic_config_hw_lro(adapter, QLCNIC_LRO_DISABLED))
852 return -EIO; 937 return -EIO;
853 938
@@ -1044,7 +1129,7 @@ static int qlcnic_set_flags(struct net_device *netdev, u32 data)
1044 return -EINVAL; 1129 return -EINVAL;
1045 } 1130 }
1046 1131
1047 if ((data & ETH_FLAG_LRO) && (adapter->flags & QLCNIC_LRO_ENABLED)) 1132 if ((data & ETH_FLAG_LRO) && (netdev->features & NETIF_F_LRO))
1048 return 0; 1133 return 0;
1049 1134
1050 if (data & ETH_FLAG_LRO) { 1135 if (data & ETH_FLAG_LRO) {
diff --git a/drivers/net/qlcnic/qlcnic_hdr.h b/drivers/net/qlcnic/qlcnic_hdr.h
index 716203e41dc7..4290b80cde1a 100644
--- a/drivers/net/qlcnic/qlcnic_hdr.h
+++ b/drivers/net/qlcnic/qlcnic_hdr.h
@@ -556,18 +556,18 @@ enum {
556#define XG_LINK_UP 0x10 556#define XG_LINK_UP 0x10
557#define XG_LINK_DOWN 0x20 557#define XG_LINK_DOWN 0x20
558 558
559#define XG_LINK_UP_P3 0x01 559#define XG_LINK_UP_P3P 0x01
560#define XG_LINK_DOWN_P3 0x02 560#define XG_LINK_DOWN_P3P 0x02
561#define XG_LINK_STATE_P3_MASK 0xf 561#define XG_LINK_STATE_P3P_MASK 0xf
562#define XG_LINK_STATE_P3(pcifn, val) \ 562#define XG_LINK_STATE_P3P(pcifn, val) \
563 (((val) >> ((pcifn) * 4)) & XG_LINK_STATE_P3_MASK) 563 (((val) >> ((pcifn) * 4)) & XG_LINK_STATE_P3P_MASK)
564 564
565#define P3_LINK_SPEED_MHZ 100 565#define P3P_LINK_SPEED_MHZ 100
566#define P3_LINK_SPEED_MASK 0xff 566#define P3P_LINK_SPEED_MASK 0xff
567#define P3_LINK_SPEED_REG(pcifn) \ 567#define P3P_LINK_SPEED_REG(pcifn) \
568 (CRB_PF_LINK_SPEED_1 + (((pcifn) / 4) * 4)) 568 (CRB_PF_LINK_SPEED_1 + (((pcifn) / 4) * 4))
569#define P3_LINK_SPEED_VAL(pcifn, reg) \ 569#define P3P_LINK_SPEED_VAL(pcifn, reg) \
570 (((reg) >> (8 * ((pcifn) & 0x3))) & P3_LINK_SPEED_MASK) 570 (((reg) >> (8 * ((pcifn) & 0x3))) & P3P_LINK_SPEED_MASK)
571 571
572#define QLCNIC_CAM_RAM_BASE (QLCNIC_CRB_CAM + 0x02000) 572#define QLCNIC_CAM_RAM_BASE (QLCNIC_CRB_CAM + 0x02000)
573#define QLCNIC_CAM_RAM(reg) (QLCNIC_CAM_RAM_BASE + (reg)) 573#define QLCNIC_CAM_RAM(reg) (QLCNIC_CAM_RAM_BASE + (reg))
@@ -592,7 +592,7 @@ enum {
592#define CRB_CMDPEG_STATE (QLCNIC_REG(0x50)) 592#define CRB_CMDPEG_STATE (QLCNIC_REG(0x50))
593#define CRB_RCVPEG_STATE (QLCNIC_REG(0x13c)) 593#define CRB_RCVPEG_STATE (QLCNIC_REG(0x13c))
594 594
595#define CRB_XG_STATE_P3 (QLCNIC_REG(0x98)) 595#define CRB_XG_STATE_P3P (QLCNIC_REG(0x98))
596#define CRB_PF_LINK_SPEED_1 (QLCNIC_REG(0xe8)) 596#define CRB_PF_LINK_SPEED_1 (QLCNIC_REG(0xe8))
597#define CRB_PF_LINK_SPEED_2 (QLCNIC_REG(0xec)) 597#define CRB_PF_LINK_SPEED_2 (QLCNIC_REG(0xec))
598 598
diff --git a/drivers/net/qlcnic/qlcnic_hw.c b/drivers/net/qlcnic/qlcnic_hw.c
index c198df90ff3c..7a47a2a7ee27 100644
--- a/drivers/net/qlcnic/qlcnic_hw.c
+++ b/drivers/net/qlcnic/qlcnic_hw.c
@@ -375,10 +375,11 @@ qlcnic_send_cmd_descs(struct qlcnic_adapter *adapter,
375 375
376static int 376static int
377qlcnic_sre_macaddr_change(struct qlcnic_adapter *adapter, u8 *addr, 377qlcnic_sre_macaddr_change(struct qlcnic_adapter *adapter, u8 *addr,
378 u16 vlan_id, unsigned op) 378 __le16 vlan_id, unsigned op)
379{ 379{
380 struct qlcnic_nic_req req; 380 struct qlcnic_nic_req req;
381 struct qlcnic_mac_req *mac_req; 381 struct qlcnic_mac_req *mac_req;
382 struct qlcnic_vlan_req *vlan_req;
382 u64 word; 383 u64 word;
383 384
384 memset(&req, 0, sizeof(struct qlcnic_nic_req)); 385 memset(&req, 0, sizeof(struct qlcnic_nic_req));
@@ -391,7 +392,8 @@ qlcnic_sre_macaddr_change(struct qlcnic_adapter *adapter, u8 *addr,
391 mac_req->op = op; 392 mac_req->op = op;
392 memcpy(mac_req->mac_addr, addr, 6); 393 memcpy(mac_req->mac_addr, addr, 6);
393 394
394 req.words[1] = cpu_to_le64(vlan_id); 395 vlan_req = (struct qlcnic_vlan_req *)&req.words[1];
396 vlan_req->vlan_id = vlan_id;
395 397
396 return qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1); 398 return qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
397} 399}
@@ -440,7 +442,8 @@ void qlcnic_set_multi(struct net_device *netdev)
440 qlcnic_nic_add_mac(adapter, bcast_addr); 442 qlcnic_nic_add_mac(adapter, bcast_addr);
441 443
442 if (netdev->flags & IFF_PROMISC) { 444 if (netdev->flags & IFF_PROMISC) {
443 mode = VPORT_MISS_MODE_ACCEPT_ALL; 445 if (!(adapter->flags & QLCNIC_PROMISC_DISABLED))
446 mode = VPORT_MISS_MODE_ACCEPT_ALL;
444 goto send_fw_cmd; 447 goto send_fw_cmd;
445 } 448 }
446 449
@@ -580,9 +583,6 @@ int qlcnic_config_hw_lro(struct qlcnic_adapter *adapter, int enable)
580 u64 word; 583 u64 word;
581 int rv; 584 int rv;
582 585
583 if ((adapter->flags & QLCNIC_LRO_ENABLED) == enable)
584 return 0;
585
586 memset(&req, 0, sizeof(struct qlcnic_nic_req)); 586 memset(&req, 0, sizeof(struct qlcnic_nic_req));
587 587
588 req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23); 588 req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
@@ -597,8 +597,6 @@ int qlcnic_config_hw_lro(struct qlcnic_adapter *adapter, int enable)
597 dev_err(&adapter->netdev->dev, 597 dev_err(&adapter->netdev->dev,
598 "Could not send configure hw lro request\n"); 598 "Could not send configure hw lro request\n");
599 599
600 adapter->flags ^= QLCNIC_LRO_ENABLED;
601
602 return rv; 600 return rv;
603} 601}
604 602
@@ -676,9 +674,10 @@ int qlcnic_config_rss(struct qlcnic_adapter *adapter, int enable)
676 return rv; 674 return rv;
677} 675}
678 676
679int qlcnic_config_ipaddr(struct qlcnic_adapter *adapter, u32 ip, int cmd) 677int qlcnic_config_ipaddr(struct qlcnic_adapter *adapter, __be32 ip, int cmd)
680{ 678{
681 struct qlcnic_nic_req req; 679 struct qlcnic_nic_req req;
680 struct qlcnic_ipaddr *ipa;
682 u64 word; 681 u64 word;
683 int rv; 682 int rv;
684 683
@@ -689,7 +688,8 @@ int qlcnic_config_ipaddr(struct qlcnic_adapter *adapter, u32 ip, int cmd)
689 req.req_hdr = cpu_to_le64(word); 688 req.req_hdr = cpu_to_le64(word);
690 689
691 req.words[0] = cpu_to_le64(cmd); 690 req.words[0] = cpu_to_le64(cmd);
692 req.words[1] = cpu_to_le64(ip); 691 ipa = (struct qlcnic_ipaddr *)&req.words[1];
692 ipa->ipv4 = ip;
693 693
694 rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1); 694 rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
695 if (rv != 0) 695 if (rv != 0)
@@ -754,9 +754,9 @@ int qlcnic_change_mtu(struct net_device *netdev, int mtu)
754 struct qlcnic_adapter *adapter = netdev_priv(netdev); 754 struct qlcnic_adapter *adapter = netdev_priv(netdev);
755 int rc = 0; 755 int rc = 0;
756 756
757 if (mtu > P3_MAX_MTU) { 757 if (mtu < P3P_MIN_MTU || mtu > P3P_MAX_MTU) {
758 dev_err(&adapter->netdev->dev, "mtu > %d bytes unsupported\n", 758 dev_err(&adapter->netdev->dev, "%d bytes < mtu < %d bytes"
759 P3_MAX_MTU); 759 " not supported\n", P3P_MAX_MTU, P3P_MIN_MTU);
760 return -EINVAL; 760 return -EINVAL;
761 } 761 }
762 762
@@ -1161,31 +1161,31 @@ int qlcnic_get_board_info(struct qlcnic_adapter *adapter)
1161 1161
1162 adapter->ahw.board_type = board_type; 1162 adapter->ahw.board_type = board_type;
1163 1163
1164 if (board_type == QLCNIC_BRDTYPE_P3_4_GB_MM) { 1164 if (board_type == QLCNIC_BRDTYPE_P3P_4_GB_MM) {
1165 u32 gpio = QLCRD32(adapter, QLCNIC_ROMUSB_GLB_PAD_GPIO_I); 1165 u32 gpio = QLCRD32(adapter, QLCNIC_ROMUSB_GLB_PAD_GPIO_I);
1166 if ((gpio & 0x8000) == 0) 1166 if ((gpio & 0x8000) == 0)
1167 board_type = QLCNIC_BRDTYPE_P3_10G_TP; 1167 board_type = QLCNIC_BRDTYPE_P3P_10G_TP;
1168 } 1168 }
1169 1169
1170 switch (board_type) { 1170 switch (board_type) {
1171 case QLCNIC_BRDTYPE_P3_HMEZ: 1171 case QLCNIC_BRDTYPE_P3P_HMEZ:
1172 case QLCNIC_BRDTYPE_P3_XG_LOM: 1172 case QLCNIC_BRDTYPE_P3P_XG_LOM:
1173 case QLCNIC_BRDTYPE_P3_10G_CX4: 1173 case QLCNIC_BRDTYPE_P3P_10G_CX4:
1174 case QLCNIC_BRDTYPE_P3_10G_CX4_LP: 1174 case QLCNIC_BRDTYPE_P3P_10G_CX4_LP:
1175 case QLCNIC_BRDTYPE_P3_IMEZ: 1175 case QLCNIC_BRDTYPE_P3P_IMEZ:
1176 case QLCNIC_BRDTYPE_P3_10G_SFP_PLUS: 1176 case QLCNIC_BRDTYPE_P3P_10G_SFP_PLUS:
1177 case QLCNIC_BRDTYPE_P3_10G_SFP_CT: 1177 case QLCNIC_BRDTYPE_P3P_10G_SFP_CT:
1178 case QLCNIC_BRDTYPE_P3_10G_SFP_QT: 1178 case QLCNIC_BRDTYPE_P3P_10G_SFP_QT:
1179 case QLCNIC_BRDTYPE_P3_10G_XFP: 1179 case QLCNIC_BRDTYPE_P3P_10G_XFP:
1180 case QLCNIC_BRDTYPE_P3_10000_BASE_T: 1180 case QLCNIC_BRDTYPE_P3P_10000_BASE_T:
1181 adapter->ahw.port_type = QLCNIC_XGBE; 1181 adapter->ahw.port_type = QLCNIC_XGBE;
1182 break; 1182 break;
1183 case QLCNIC_BRDTYPE_P3_REF_QG: 1183 case QLCNIC_BRDTYPE_P3P_REF_QG:
1184 case QLCNIC_BRDTYPE_P3_4_GB: 1184 case QLCNIC_BRDTYPE_P3P_4_GB:
1185 case QLCNIC_BRDTYPE_P3_4_GB_MM: 1185 case QLCNIC_BRDTYPE_P3P_4_GB_MM:
1186 adapter->ahw.port_type = QLCNIC_GBE; 1186 adapter->ahw.port_type = QLCNIC_GBE;
1187 break; 1187 break;
1188 case QLCNIC_BRDTYPE_P3_10G_TP: 1188 case QLCNIC_BRDTYPE_P3P_10G_TP:
1189 adapter->ahw.port_type = (adapter->portnum < 2) ? 1189 adapter->ahw.port_type = (adapter->portnum < 2) ?
1190 QLCNIC_XGBE : QLCNIC_GBE; 1190 QLCNIC_XGBE : QLCNIC_GBE;
1191 break; 1191 break;
diff --git a/drivers/net/qlcnic/qlcnic_init.c b/drivers/net/qlcnic/qlcnic_init.c
index 26a7d6bca5c7..0d180c6e41fe 100644
--- a/drivers/net/qlcnic/qlcnic_init.c
+++ b/drivers/net/qlcnic/qlcnic_init.c
@@ -259,14 +259,14 @@ int qlcnic_alloc_sw_resources(struct qlcnic_adapter *adapter)
259 switch (ring) { 259 switch (ring) {
260 case RCV_RING_NORMAL: 260 case RCV_RING_NORMAL:
261 rds_ring->num_desc = adapter->num_rxd; 261 rds_ring->num_desc = adapter->num_rxd;
262 rds_ring->dma_size = QLCNIC_P3_RX_BUF_MAX_LEN; 262 rds_ring->dma_size = QLCNIC_P3P_RX_BUF_MAX_LEN;
263 rds_ring->skb_size = rds_ring->dma_size + NET_IP_ALIGN; 263 rds_ring->skb_size = rds_ring->dma_size + NET_IP_ALIGN;
264 break; 264 break;
265 265
266 case RCV_RING_JUMBO: 266 case RCV_RING_JUMBO:
267 rds_ring->num_desc = adapter->num_jumbo_rxd; 267 rds_ring->num_desc = adapter->num_jumbo_rxd;
268 rds_ring->dma_size = 268 rds_ring->dma_size =
269 QLCNIC_P3_RX_JUMBO_BUF_MAX_LEN; 269 QLCNIC_P3P_RX_JUMBO_BUF_MAX_LEN;
270 270
271 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_HW_LRO) 271 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_HW_LRO)
272 rds_ring->dma_size += QLCNIC_LRO_BUFFER_EXTRA; 272 rds_ring->dma_size += QLCNIC_LRO_BUFFER_EXTRA;
@@ -1015,8 +1015,6 @@ qlcnic_check_fw_hearbeat(struct qlcnic_adapter *adapter)
1015int 1015int
1016qlcnic_need_fw_reset(struct qlcnic_adapter *adapter) 1016qlcnic_need_fw_reset(struct qlcnic_adapter *adapter)
1017{ 1017{
1018 u32 val, version, major, minor, build;
1019
1020 if (qlcnic_check_fw_hearbeat(adapter)) { 1018 if (qlcnic_check_fw_hearbeat(adapter)) {
1021 qlcnic_rom_lock_recovery(adapter); 1019 qlcnic_rom_lock_recovery(adapter);
1022 return 1; 1020 return 1;
@@ -1025,20 +1023,8 @@ qlcnic_need_fw_reset(struct qlcnic_adapter *adapter)
1025 if (adapter->need_fw_reset) 1023 if (adapter->need_fw_reset)
1026 return 1; 1024 return 1;
1027 1025
1028 /* check if we have got newer or different file firmware */ 1026 if (adapter->fw)
1029 if (adapter->fw) { 1027 return 1;
1030
1031 val = qlcnic_get_fw_version(adapter);
1032
1033 version = QLCNIC_DECODE_VERSION(val);
1034
1035 major = QLCRD32(adapter, QLCNIC_FW_VERSION_MAJOR);
1036 minor = QLCRD32(adapter, QLCNIC_FW_VERSION_MINOR);
1037 build = QLCRD32(adapter, QLCNIC_FW_VERSION_SUB);
1038
1039 if (version > QLCNIC_VERSION_CODE(major, minor, build))
1040 return 1;
1041 }
1042 1028
1043 return 0; 1029 return 0;
1044} 1030}
@@ -1174,18 +1160,6 @@ qlcnic_validate_firmware(struct qlcnic_adapter *adapter)
1174 return -EINVAL; 1160 return -EINVAL;
1175 } 1161 }
1176 1162
1177 /* check if flashed firmware is newer */
1178 if (qlcnic_rom_fast_read(adapter,
1179 QLCNIC_FW_VERSION_OFFSET, (int *)&val))
1180 return -EIO;
1181
1182 val = QLCNIC_DECODE_VERSION(val);
1183 if (val > ver) {
1184 dev_info(&pdev->dev, "%s: firmware is older than flash\n",
1185 fw_name[fw_type]);
1186 return -EINVAL;
1187 }
1188
1189 QLCWR32(adapter, QLCNIC_CAM_RAM(0x1fc), QLCNIC_BDINFO_MAGIC); 1163 QLCWR32(adapter, QLCNIC_CAM_RAM(0x1fc), QLCNIC_BDINFO_MAGIC);
1190 return 0; 1164 return 0;
1191} 1165}
@@ -1329,7 +1303,7 @@ qlcnic_alloc_rx_skb(struct qlcnic_adapter *adapter,
1329 return -ENOMEM; 1303 return -ENOMEM;
1330 } 1304 }
1331 1305
1332 skb_reserve(skb, 2); 1306 skb_reserve(skb, NET_IP_ALIGN);
1333 1307
1334 dma = pci_map_single(pdev, skb->data, 1308 dma = pci_map_single(pdev, skb->data,
1335 rds_ring->dma_size, PCI_DMA_FROMDEVICE); 1309 rds_ring->dma_size, PCI_DMA_FROMDEVICE);
@@ -1380,24 +1354,28 @@ static struct sk_buff *qlcnic_process_rxbuf(struct qlcnic_adapter *adapter,
1380} 1354}
1381 1355
1382static int 1356static int
1383qlcnic_check_rx_tagging(struct qlcnic_adapter *adapter, struct sk_buff *skb) 1357qlcnic_check_rx_tagging(struct qlcnic_adapter *adapter, struct sk_buff *skb,
1358 u16 *vlan_tag)
1384{ 1359{
1385 u16 vlan_tag;
1386 struct ethhdr *eth_hdr; 1360 struct ethhdr *eth_hdr;
1387 1361
1388 if (!__vlan_get_tag(skb, &vlan_tag)) { 1362 if (!__vlan_get_tag(skb, vlan_tag)) {
1389 if (vlan_tag == adapter->pvid) { 1363 eth_hdr = (struct ethhdr *) skb->data;
1390 /* strip the tag from the packet and send it up */ 1364 memmove(skb->data + VLAN_HLEN, eth_hdr, ETH_ALEN * 2);
1391 eth_hdr = (struct ethhdr *) skb->data; 1365 skb_pull(skb, VLAN_HLEN);
1392 memmove(skb->data + VLAN_HLEN, eth_hdr, ETH_ALEN * 2); 1366 }
1393 skb_pull(skb, VLAN_HLEN); 1367 if (!adapter->pvid)
1394 return 0; 1368 return 0;
1395 } 1369
1370 if (*vlan_tag == adapter->pvid) {
1371 /* Outer vlan tag. Packet should follow non-vlan path */
1372 *vlan_tag = 0xffff;
1373 return 0;
1396 } 1374 }
1397 if (adapter->flags & QLCNIC_TAGGING_ENABLED) 1375 if (adapter->flags & QLCNIC_TAGGING_ENABLED)
1398 return 0; 1376 return 0;
1399 1377
1400 return -EIO; 1378 return -EINVAL;
1401} 1379}
1402 1380
1403static struct qlcnic_rx_buffer * 1381static struct qlcnic_rx_buffer *
@@ -1411,6 +1389,7 @@ qlcnic_process_rcv(struct qlcnic_adapter *adapter,
1411 struct sk_buff *skb; 1389 struct sk_buff *skb;
1412 struct qlcnic_host_rds_ring *rds_ring; 1390 struct qlcnic_host_rds_ring *rds_ring;
1413 int index, length, cksum, pkt_offset; 1391 int index, length, cksum, pkt_offset;
1392 u16 vid = 0xffff;
1414 1393
1415 if (unlikely(ring >= adapter->max_rds_rings)) 1394 if (unlikely(ring >= adapter->max_rds_rings))
1416 return NULL; 1395 return NULL;
@@ -1439,19 +1418,18 @@ qlcnic_process_rcv(struct qlcnic_adapter *adapter,
1439 if (pkt_offset) 1418 if (pkt_offset)
1440 skb_pull(skb, pkt_offset); 1419 skb_pull(skb, pkt_offset);
1441 1420
1442 skb->truesize = skb->len + sizeof(struct sk_buff); 1421 if (unlikely(qlcnic_check_rx_tagging(adapter, skb, &vid))) {
1443 1422 adapter->stats.rxdropped++;
1444 if (unlikely(adapter->pvid)) { 1423 dev_kfree_skb(skb);
1445 if (qlcnic_check_rx_tagging(adapter, skb)) { 1424 return buffer;
1446 adapter->stats.rxdropped++;
1447 dev_kfree_skb_any(skb);
1448 return buffer;
1449 }
1450 } 1425 }
1451 1426
1452 skb->protocol = eth_type_trans(skb, netdev); 1427 skb->protocol = eth_type_trans(skb, netdev);
1453 1428
1454 napi_gro_receive(&sds_ring->napi, skb); 1429 if ((vid != 0xffff) && adapter->vlgrp)
1430 vlan_gro_receive(&sds_ring->napi, adapter->vlgrp, vid, skb);
1431 else
1432 napi_gro_receive(&sds_ring->napi, skb);
1455 1433
1456 adapter->stats.rx_pkts++; 1434 adapter->stats.rx_pkts++;
1457 adapter->stats.rxbytes += length; 1435 adapter->stats.rxbytes += length;
@@ -1480,6 +1458,7 @@ qlcnic_process_lro(struct qlcnic_adapter *adapter,
1480 int index; 1458 int index;
1481 u16 lro_length, length, data_offset; 1459 u16 lro_length, length, data_offset;
1482 u32 seq_number; 1460 u32 seq_number;
1461 u16 vid = 0xffff;
1483 1462
1484 if (unlikely(ring > adapter->max_rds_rings)) 1463 if (unlikely(ring > adapter->max_rds_rings))
1485 return NULL; 1464 return NULL;
@@ -1510,17 +1489,14 @@ qlcnic_process_lro(struct qlcnic_adapter *adapter,
1510 1489
1511 skb_put(skb, lro_length + data_offset); 1490 skb_put(skb, lro_length + data_offset);
1512 1491
1513 skb->truesize = skb->len + sizeof(struct sk_buff) + skb_headroom(skb);
1514
1515 skb_pull(skb, l2_hdr_offset); 1492 skb_pull(skb, l2_hdr_offset);
1516 1493
1517 if (unlikely(adapter->pvid)) { 1494 if (unlikely(qlcnic_check_rx_tagging(adapter, skb, &vid))) {
1518 if (qlcnic_check_rx_tagging(adapter, skb)) { 1495 adapter->stats.rxdropped++;
1519 adapter->stats.rxdropped++; 1496 dev_kfree_skb(skb);
1520 dev_kfree_skb_any(skb); 1497 return buffer;
1521 return buffer;
1522 }
1523 } 1498 }
1499
1524 skb->protocol = eth_type_trans(skb, netdev); 1500 skb->protocol = eth_type_trans(skb, netdev);
1525 1501
1526 iph = (struct iphdr *)skb->data; 1502 iph = (struct iphdr *)skb->data;
@@ -1535,7 +1511,10 @@ qlcnic_process_lro(struct qlcnic_adapter *adapter,
1535 1511
1536 length = skb->len; 1512 length = skb->len;
1537 1513
1538 netif_receive_skb(skb); 1514 if ((vid != 0xffff) && adapter->vlgrp)
1515 vlan_hwaccel_receive_skb(skb, adapter->vlgrp, vid);
1516 else
1517 netif_receive_skb(skb);
1539 1518
1540 adapter->stats.lro_pkts++; 1519 adapter->stats.lro_pkts++;
1541 adapter->stats.lrobytes += length; 1520 adapter->stats.lrobytes += length;
@@ -1714,6 +1693,18 @@ qlcnic_post_rx_buffers_nodb(struct qlcnic_adapter *adapter,
1714 spin_unlock(&rds_ring->lock); 1693 spin_unlock(&rds_ring->lock);
1715} 1694}
1716 1695
1696static void dump_skb(struct sk_buff *skb)
1697{
1698 int i;
1699 unsigned char *data = skb->data;
1700
1701 for (i = 0; i < skb->len; i++) {
1702 printk("%02x ", data[i]);
1703 if ((i & 0x0f) == 8)
1704 printk("\n");
1705 }
1706}
1707
1717static struct qlcnic_rx_buffer * 1708static struct qlcnic_rx_buffer *
1718qlcnic_process_rcv_diag(struct qlcnic_adapter *adapter, 1709qlcnic_process_rcv_diag(struct qlcnic_adapter *adapter,
1719 struct qlcnic_host_sds_ring *sds_ring, 1710 struct qlcnic_host_sds_ring *sds_ring,
@@ -1744,15 +1735,18 @@ qlcnic_process_rcv_diag(struct qlcnic_adapter *adapter,
1744 if (!skb) 1735 if (!skb)
1745 return buffer; 1736 return buffer;
1746 1737
1747 skb_put(skb, rds_ring->skb_size); 1738 if (length > rds_ring->skb_size)
1739 skb_put(skb, rds_ring->skb_size);
1740 else
1741 skb_put(skb, length);
1748 1742
1749 if (pkt_offset) 1743 if (pkt_offset)
1750 skb_pull(skb, pkt_offset); 1744 skb_pull(skb, pkt_offset);
1751 1745
1752 skb->truesize = skb->len + sizeof(struct sk_buff);
1753
1754 if (!qlcnic_check_loopback_buff(skb->data)) 1746 if (!qlcnic_check_loopback_buff(skb->data))
1755 adapter->diag_cnt++; 1747 adapter->diag_cnt++;
1748 else
1749 dump_skb(skb);
1756 1750
1757 dev_kfree_skb_any(skb); 1751 dev_kfree_skb_any(skb);
1758 adapter->stats.rx_pkts++; 1752 adapter->stats.rx_pkts++;
diff --git a/drivers/net/qlcnic/qlcnic_main.c b/drivers/net/qlcnic/qlcnic_main.c
index 5fd2abd1eb67..f047c7c48314 100644
--- a/drivers/net/qlcnic/qlcnic_main.c
+++ b/drivers/net/qlcnic/qlcnic_main.c
@@ -28,6 +28,7 @@
28 28
29#include "qlcnic.h" 29#include "qlcnic.h"
30 30
31#include <linux/swab.h>
31#include <linux/dma-mapping.h> 32#include <linux/dma-mapping.h>
32#include <linux/if_vlan.h> 33#include <linux/if_vlan.h>
33#include <net/ip.h> 34#include <net/ip.h>
@@ -45,11 +46,7 @@ char qlcnic_driver_name[] = "qlcnic";
45static const char qlcnic_driver_string[] = "QLogic 1/10 GbE " 46static const char qlcnic_driver_string[] = "QLogic 1/10 GbE "
46 "Converged/Intelligent Ethernet Driver v" QLCNIC_LINUX_VERSIONID; 47 "Converged/Intelligent Ethernet Driver v" QLCNIC_LINUX_VERSIONID;
47 48
48static int port_mode = QLCNIC_PORT_MODE_AUTO_NEG; 49static struct workqueue_struct *qlcnic_wq;
49
50/* Default to restricted 1G auto-neg mode */
51static int wol_port_mode = 5;
52
53static int qlcnic_mac_learn; 50static int qlcnic_mac_learn;
54module_param(qlcnic_mac_learn, int, 0644); 51module_param(qlcnic_mac_learn, int, 0644);
55MODULE_PARM_DESC(qlcnic_mac_learn, "Mac Filter (0=disabled, 1=enabled)"); 52MODULE_PARM_DESC(qlcnic_mac_learn, "Mac Filter (0=disabled, 1=enabled)");
@@ -107,7 +104,7 @@ static irqreturn_t qlcnic_msi_intr(int irq, void *data);
107static irqreturn_t qlcnic_msix_intr(int irq, void *data); 104static irqreturn_t qlcnic_msix_intr(int irq, void *data);
108 105
109static struct net_device_stats *qlcnic_get_stats(struct net_device *netdev); 106static struct net_device_stats *qlcnic_get_stats(struct net_device *netdev);
110static void qlcnic_config_indev_addr(struct net_device *dev, unsigned long); 107static void qlcnic_restore_indev_addr(struct net_device *dev, unsigned long);
111static int qlcnic_start_firmware(struct qlcnic_adapter *); 108static int qlcnic_start_firmware(struct qlcnic_adapter *);
112 109
113static void qlcnic_alloc_lb_filters_mem(struct qlcnic_adapter *adapter); 110static void qlcnic_alloc_lb_filters_mem(struct qlcnic_adapter *adapter);
@@ -172,7 +169,7 @@ qlcnic_alloc_sds_rings(struct qlcnic_recv_context *recv_ctx, int count)
172 169
173 recv_ctx->sds_rings = kzalloc(size, GFP_KERNEL); 170 recv_ctx->sds_rings = kzalloc(size, GFP_KERNEL);
174 171
175 return (recv_ctx->sds_rings == NULL); 172 return recv_ctx->sds_rings == NULL;
176} 173}
177 174
178static void 175static void
@@ -263,40 +260,6 @@ static void qlcnic_clear_stats(struct qlcnic_adapter *adapter)
263 memset(&adapter->stats, 0, sizeof(adapter->stats)); 260 memset(&adapter->stats, 0, sizeof(adapter->stats));
264} 261}
265 262
266static void qlcnic_set_port_mode(struct qlcnic_adapter *adapter)
267{
268 u32 val, data;
269
270 val = adapter->ahw.board_type;
271 if ((val == QLCNIC_BRDTYPE_P3_HMEZ) ||
272 (val == QLCNIC_BRDTYPE_P3_XG_LOM)) {
273 if (port_mode == QLCNIC_PORT_MODE_802_3_AP) {
274 data = QLCNIC_PORT_MODE_802_3_AP;
275 QLCWR32(adapter, QLCNIC_PORT_MODE_ADDR, data);
276 } else if (port_mode == QLCNIC_PORT_MODE_XG) {
277 data = QLCNIC_PORT_MODE_XG;
278 QLCWR32(adapter, QLCNIC_PORT_MODE_ADDR, data);
279 } else if (port_mode == QLCNIC_PORT_MODE_AUTO_NEG_1G) {
280 data = QLCNIC_PORT_MODE_AUTO_NEG_1G;
281 QLCWR32(adapter, QLCNIC_PORT_MODE_ADDR, data);
282 } else if (port_mode == QLCNIC_PORT_MODE_AUTO_NEG_XG) {
283 data = QLCNIC_PORT_MODE_AUTO_NEG_XG;
284 QLCWR32(adapter, QLCNIC_PORT_MODE_ADDR, data);
285 } else {
286 data = QLCNIC_PORT_MODE_AUTO_NEG;
287 QLCWR32(adapter, QLCNIC_PORT_MODE_ADDR, data);
288 }
289
290 if ((wol_port_mode != QLCNIC_PORT_MODE_802_3_AP) &&
291 (wol_port_mode != QLCNIC_PORT_MODE_XG) &&
292 (wol_port_mode != QLCNIC_PORT_MODE_AUTO_NEG_1G) &&
293 (wol_port_mode != QLCNIC_PORT_MODE_AUTO_NEG_XG)) {
294 wol_port_mode = QLCNIC_PORT_MODE_AUTO_NEG;
295 }
296 QLCWR32(adapter, QLCNIC_WOL_PORT_MODE, wol_port_mode);
297 }
298}
299
300static void qlcnic_set_msix_bit(struct pci_dev *pdev, int enable) 263static void qlcnic_set_msix_bit(struct pci_dev *pdev, int enable)
301{ 264{
302 u32 control; 265 u32 control;
@@ -371,6 +334,13 @@ static int qlcnic_set_mac(struct net_device *netdev, void *p)
371 return 0; 334 return 0;
372} 335}
373 336
337static void qlcnic_vlan_rx_register(struct net_device *netdev,
338 struct vlan_group *grp)
339{
340 struct qlcnic_adapter *adapter = netdev_priv(netdev);
341 adapter->vlgrp = grp;
342}
343
374static const struct net_device_ops qlcnic_netdev_ops = { 344static const struct net_device_ops qlcnic_netdev_ops = {
375 .ndo_open = qlcnic_open, 345 .ndo_open = qlcnic_open,
376 .ndo_stop = qlcnic_close, 346 .ndo_stop = qlcnic_close,
@@ -381,6 +351,7 @@ static const struct net_device_ops qlcnic_netdev_ops = {
381 .ndo_set_mac_address = qlcnic_set_mac, 351 .ndo_set_mac_address = qlcnic_set_mac,
382 .ndo_change_mtu = qlcnic_change_mtu, 352 .ndo_change_mtu = qlcnic_change_mtu,
383 .ndo_tx_timeout = qlcnic_tx_timeout, 353 .ndo_tx_timeout = qlcnic_tx_timeout,
354 .ndo_vlan_rx_register = qlcnic_vlan_rx_register,
384#ifdef CONFIG_NET_POLL_CONTROLLER 355#ifdef CONFIG_NET_POLL_CONTROLLER
385 .ndo_poll_controller = qlcnic_poll_controller, 356 .ndo_poll_controller = qlcnic_poll_controller,
386#endif 357#endif
@@ -512,9 +483,9 @@ qlcnic_init_pci_info(struct qlcnic_adapter *adapter)
512 pfn = pci_info[i].id; 483 pfn = pci_info[i].id;
513 if (pfn > QLCNIC_MAX_PCI_FUNC) 484 if (pfn > QLCNIC_MAX_PCI_FUNC)
514 return QL_STATUS_INVALID_PARAM; 485 return QL_STATUS_INVALID_PARAM;
515 adapter->npars[pfn].active = pci_info[i].active; 486 adapter->npars[pfn].active = (u8)pci_info[i].active;
516 adapter->npars[pfn].type = pci_info[i].type; 487 adapter->npars[pfn].type = (u8)pci_info[i].type;
517 adapter->npars[pfn].phy_port = pci_info[i].default_port; 488 adapter->npars[pfn].phy_port = (u8)pci_info[i].default_port;
518 adapter->npars[pfn].min_bw = pci_info[i].tx_min_bw; 489 adapter->npars[pfn].min_bw = pci_info[i].tx_min_bw;
519 adapter->npars[pfn].max_bw = pci_info[i].tx_max_bw; 490 adapter->npars[pfn].max_bw = pci_info[i].tx_max_bw;
520 } 491 }
@@ -686,8 +657,6 @@ qlcnic_check_options(struct qlcnic_adapter *adapter)
686 dev_info(&pdev->dev, "firmware v%d.%d.%d\n", 657 dev_info(&pdev->dev, "firmware v%d.%d.%d\n",
687 fw_major, fw_minor, fw_build); 658 fw_major, fw_minor, fw_build);
688 659
689 adapter->flags &= ~QLCNIC_LRO_ENABLED;
690
691 if (adapter->ahw.port_type == QLCNIC_XGBE) { 660 if (adapter->ahw.port_type == QLCNIC_XGBE) {
692 adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_10G; 661 adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_10G;
693 adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_10G; 662 adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_10G;
@@ -714,7 +683,7 @@ qlcnic_initialize_nic(struct qlcnic_adapter *adapter)
714 if (err) 683 if (err)
715 return err; 684 return err;
716 685
717 adapter->physical_port = nic_info.phys_port; 686 adapter->physical_port = (u8)nic_info.phys_port;
718 adapter->switch_mode = nic_info.switch_mode; 687 adapter->switch_mode = nic_info.switch_mode;
719 adapter->max_tx_ques = nic_info.max_tx_ques; 688 adapter->max_tx_ques = nic_info.max_tx_ques;
720 adapter->max_rx_ques = nic_info.max_rx_ques; 689 adapter->max_rx_ques = nic_info.max_rx_ques;
@@ -749,8 +718,8 @@ static void
749qlcnic_set_eswitch_port_features(struct qlcnic_adapter *adapter, 718qlcnic_set_eswitch_port_features(struct qlcnic_adapter *adapter,
750 struct qlcnic_esw_func_cfg *esw_cfg) 719 struct qlcnic_esw_func_cfg *esw_cfg)
751{ 720{
752 adapter->flags &= ~QLCNIC_MACSPOOF; 721 adapter->flags &= ~(QLCNIC_MACSPOOF | QLCNIC_MAC_OVERRIDE_DISABLED |
753 adapter->flags &= ~QLCNIC_MAC_OVERRIDE_DISABLED; 722 QLCNIC_PROMISC_DISABLED);
754 723
755 if (esw_cfg->mac_anti_spoof) 724 if (esw_cfg->mac_anti_spoof)
756 adapter->flags |= QLCNIC_MACSPOOF; 725 adapter->flags |= QLCNIC_MACSPOOF;
@@ -758,6 +727,9 @@ qlcnic_set_eswitch_port_features(struct qlcnic_adapter *adapter,
758 if (!esw_cfg->mac_override) 727 if (!esw_cfg->mac_override)
759 adapter->flags |= QLCNIC_MAC_OVERRIDE_DISABLED; 728 adapter->flags |= QLCNIC_MAC_OVERRIDE_DISABLED;
760 729
730 if (!esw_cfg->promisc_mode)
731 adapter->flags |= QLCNIC_PROMISC_DISABLED;
732
761 qlcnic_set_netdev_features(adapter, esw_cfg); 733 qlcnic_set_netdev_features(adapter, esw_cfg);
762} 734}
763 735
@@ -876,6 +848,7 @@ qlcnic_set_default_offload_settings(struct qlcnic_adapter *adapter)
876 esw_cfg.pci_func = i; 848 esw_cfg.pci_func = i;
877 esw_cfg.offload_flags = BIT_0; 849 esw_cfg.offload_flags = BIT_0;
878 esw_cfg.mac_override = BIT_0; 850 esw_cfg.mac_override = BIT_0;
851 esw_cfg.promisc_mode = BIT_0;
879 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_TSO) 852 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_TSO)
880 esw_cfg.offload_flags |= (BIT_1 | BIT_2); 853 esw_cfg.offload_flags |= (BIT_1 | BIT_2);
881 if (qlcnic_config_switch_port(adapter, &esw_cfg)) 854 if (qlcnic_config_switch_port(adapter, &esw_cfg))
@@ -1023,7 +996,6 @@ qlcnic_start_firmware(struct qlcnic_adapter *adapter)
1023 err = qlcnic_pinit_from_rom(adapter); 996 err = qlcnic_pinit_from_rom(adapter);
1024 if (err) 997 if (err)
1025 goto err_out; 998 goto err_out;
1026 qlcnic_set_port_mode(adapter);
1027 999
1028 err = qlcnic_load_firmware(adapter); 1000 err = qlcnic_load_firmware(adapter);
1029 if (err) 1001 if (err)
@@ -1446,7 +1418,7 @@ qlcnic_setup_netdev(struct qlcnic_adapter *adapter,
1446 SET_ETHTOOL_OPS(netdev, &qlcnic_ethtool_ops); 1418 SET_ETHTOOL_OPS(netdev, &qlcnic_ethtool_ops);
1447 1419
1448 netdev->features |= (NETIF_F_SG | NETIF_F_IP_CSUM | 1420 netdev->features |= (NETIF_F_SG | NETIF_F_IP_CSUM |
1449 NETIF_F_IPV6_CSUM | NETIF_F_GRO); 1421 NETIF_F_IPV6_CSUM | NETIF_F_GRO | NETIF_F_HW_VLAN_RX);
1450 netdev->vlan_features |= (NETIF_F_SG | NETIF_F_IP_CSUM | 1422 netdev->vlan_features |= (NETIF_F_SG | NETIF_F_IP_CSUM |
1451 NETIF_F_IPV6_CSUM); 1423 NETIF_F_IPV6_CSUM);
1452 1424
@@ -1751,7 +1723,7 @@ qlcnic_resume(struct pci_dev *pdev)
1751 if (err) 1723 if (err)
1752 goto done; 1724 goto done;
1753 1725
1754 qlcnic_config_indev_addr(netdev, NETDEV_UP); 1726 qlcnic_restore_indev_addr(netdev, NETDEV_UP);
1755 } 1727 }
1756done: 1728done:
1757 netif_device_attach(netdev); 1729 netif_device_attach(netdev);
@@ -1826,11 +1798,12 @@ static void qlcnic_free_lb_filters_mem(struct qlcnic_adapter *adapter)
1826} 1798}
1827 1799
1828static void qlcnic_change_filter(struct qlcnic_adapter *adapter, 1800static void qlcnic_change_filter(struct qlcnic_adapter *adapter,
1829 u64 uaddr, u16 vlan_id, struct qlcnic_host_tx_ring *tx_ring) 1801 u64 uaddr, __le16 vlan_id, struct qlcnic_host_tx_ring *tx_ring)
1830{ 1802{
1831 struct cmd_desc_type0 *hwdesc; 1803 struct cmd_desc_type0 *hwdesc;
1832 struct qlcnic_nic_req *req; 1804 struct qlcnic_nic_req *req;
1833 struct qlcnic_mac_req *mac_req; 1805 struct qlcnic_mac_req *mac_req;
1806 struct qlcnic_vlan_req *vlan_req;
1834 u32 producer; 1807 u32 producer;
1835 u64 word; 1808 u64 word;
1836 1809
@@ -1848,7 +1821,8 @@ static void qlcnic_change_filter(struct qlcnic_adapter *adapter,
1848 mac_req->op = vlan_id ? QLCNIC_MAC_VLAN_ADD : QLCNIC_MAC_ADD; 1821 mac_req->op = vlan_id ? QLCNIC_MAC_VLAN_ADD : QLCNIC_MAC_ADD;
1849 memcpy(mac_req->mac_addr, &uaddr, ETH_ALEN); 1822 memcpy(mac_req->mac_addr, &uaddr, ETH_ALEN);
1850 1823
1851 req->words[1] = cpu_to_le64(vlan_id); 1824 vlan_req = (struct qlcnic_vlan_req *)&req->words[1];
1825 vlan_req->vlan_id = vlan_id;
1852 1826
1853 tx_ring->producer = get_next_index(producer, tx_ring->num_desc); 1827 tx_ring->producer = get_next_index(producer, tx_ring->num_desc);
1854} 1828}
@@ -1867,7 +1841,7 @@ qlcnic_send_filter(struct qlcnic_adapter *adapter,
1867 struct hlist_node *tmp_hnode, *n; 1841 struct hlist_node *tmp_hnode, *n;
1868 struct hlist_head *head; 1842 struct hlist_head *head;
1869 u64 src_addr = 0; 1843 u64 src_addr = 0;
1870 u16 vlan_id = 0; 1844 __le16 vlan_id = 0;
1871 u8 hindex; 1845 u8 hindex;
1872 1846
1873 if (!compare_ether_addr(phdr->h_source, adapter->mac_addr)) 1847 if (!compare_ether_addr(phdr->h_source, adapter->mac_addr))
@@ -1920,7 +1894,8 @@ qlcnic_tso_check(struct net_device *netdev,
1920 struct vlan_ethhdr *vh; 1894 struct vlan_ethhdr *vh;
1921 struct qlcnic_adapter *adapter = netdev_priv(netdev); 1895 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1922 u32 producer = tx_ring->producer; 1896 u32 producer = tx_ring->producer;
1923 int vlan_oob = first_desc->flags_opcode & cpu_to_le16(FLAGS_VLAN_OOB); 1897 __le16 vlan_oob = first_desc->flags_opcode &
1898 cpu_to_le16(FLAGS_VLAN_OOB);
1924 1899
1925 if (*(skb->data) & BIT_0) { 1900 if (*(skb->data) & BIT_0) {
1926 flags |= BIT_0; 1901 flags |= BIT_0;
@@ -1991,7 +1966,8 @@ qlcnic_tso_check(struct net_device *netdev,
1991 vh = (struct vlan_ethhdr *)((char *)hwdesc + 2); 1966 vh = (struct vlan_ethhdr *)((char *)hwdesc + 2);
1992 skb_copy_from_linear_data(skb, vh, 12); 1967 skb_copy_from_linear_data(skb, vh, 12);
1993 vh->h_vlan_proto = htons(ETH_P_8021Q); 1968 vh->h_vlan_proto = htons(ETH_P_8021Q);
1994 vh->h_vlan_TCI = htons(first_desc->vlan_TCI); 1969 vh->h_vlan_TCI = (__be16)swab16((u16)first_desc->vlan_TCI);
1970
1995 skb_copy_from_linear_data_offset(skb, 12, 1971 skb_copy_from_linear_data_offset(skb, 12,
1996 (char *)vh + 16, copy_len - 16); 1972 (char *)vh + 16, copy_len - 16);
1997 1973
@@ -2738,7 +2714,8 @@ qlcnic_fwinit_work(struct work_struct *work)
2738 goto err_ret; 2714 goto err_ret;
2739 2715
2740 dev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE); 2716 dev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
2741 if (dev_state == QLCNIC_DEV_QUISCENT) { 2717 if (dev_state == QLCNIC_DEV_QUISCENT ||
2718 dev_state == QLCNIC_DEV_NEED_QUISCENT) {
2742 qlcnic_api_unlock(adapter); 2719 qlcnic_api_unlock(adapter);
2743 qlcnic_schedule_work(adapter, qlcnic_fwinit_work, 2720 qlcnic_schedule_work(adapter, qlcnic_fwinit_work,
2744 FW_POLL_DELAY * 2); 2721 FW_POLL_DELAY * 2);
@@ -2760,18 +2737,6 @@ qlcnic_fwinit_work(struct work_struct *work)
2760skip_ack_check: 2737skip_ack_check:
2761 dev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE); 2738 dev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
2762 2739
2763 if (dev_state == QLCNIC_DEV_NEED_QUISCENT) {
2764 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE,
2765 QLCNIC_DEV_QUISCENT);
2766 qlcnic_schedule_work(adapter, qlcnic_fwinit_work,
2767 FW_POLL_DELAY * 2);
2768 QLCDB(adapter, DRV, "Quiscing the driver\n");
2769 qlcnic_idc_debug_info(adapter, 0);
2770
2771 qlcnic_api_unlock(adapter);
2772 return;
2773 }
2774
2775 if (dev_state == QLCNIC_DEV_NEED_RESET) { 2740 if (dev_state == QLCNIC_DEV_NEED_RESET) {
2776 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, 2741 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE,
2777 QLCNIC_DEV_INITIALIZING); 2742 QLCNIC_DEV_INITIALIZING);
@@ -2828,7 +2793,12 @@ qlcnic_detach_work(struct work_struct *work)
2828 2793
2829 netif_device_detach(netdev); 2794 netif_device_detach(netdev);
2830 2795
2831 qlcnic_down(adapter, netdev); 2796 /* Dont grab rtnl lock during Quiscent mode */
2797 if (adapter->dev_state == QLCNIC_DEV_NEED_QUISCENT) {
2798 if (netif_running(netdev))
2799 __qlcnic_down(adapter, netdev);
2800 } else
2801 qlcnic_down(adapter, netdev);
2832 2802
2833 status = QLCRD32(adapter, QLCNIC_PEG_HALT_STATUS1); 2803 status = QLCRD32(adapter, QLCNIC_PEG_HALT_STATUS1);
2834 2804
@@ -2870,6 +2840,61 @@ qlcnic_set_npar_non_operational(struct qlcnic_adapter *adapter)
2870 qlcnic_api_unlock(adapter); 2840 qlcnic_api_unlock(adapter);
2871} 2841}
2872 2842
2843/* Caller should held RESETTING bit.
2844 * This should be call in sync with qlcnic_request_quiscent_mode.
2845 */
2846void qlcnic_clear_quiscent_mode(struct qlcnic_adapter *adapter)
2847{
2848 qlcnic_clr_drv_state(adapter);
2849 qlcnic_api_lock(adapter);
2850 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_READY);
2851 qlcnic_api_unlock(adapter);
2852}
2853
2854/* Caller should held RESETTING bit.
2855 */
2856int qlcnic_request_quiscent_mode(struct qlcnic_adapter *adapter)
2857{
2858 u8 timeo = adapter->dev_init_timeo / 2;
2859 u32 state;
2860
2861 if (qlcnic_api_lock(adapter))
2862 return -EIO;
2863
2864 state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
2865 if (state != QLCNIC_DEV_READY)
2866 return -EIO;
2867
2868 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_NEED_QUISCENT);
2869 qlcnic_api_unlock(adapter);
2870 QLCDB(adapter, DRV, "NEED QUISCENT state set\n");
2871 qlcnic_idc_debug_info(adapter, 0);
2872
2873 qlcnic_set_drv_state(adapter, QLCNIC_DEV_NEED_QUISCENT);
2874
2875 do {
2876 msleep(2000);
2877 state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
2878 if (state == QLCNIC_DEV_QUISCENT)
2879 return 0;
2880 if (!qlcnic_check_drv_state(adapter)) {
2881 if (qlcnic_api_lock(adapter))
2882 return -EIO;
2883 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE,
2884 QLCNIC_DEV_QUISCENT);
2885 qlcnic_api_unlock(adapter);
2886 QLCDB(adapter, DRV, "QUISCENT mode set\n");
2887 return 0;
2888 }
2889 } while (--timeo);
2890
2891 dev_err(&adapter->pdev->dev, "Failed to quiesce device, DRV_STATE=%08x"
2892 " DRV_ACTIVE=%08x\n", QLCRD32(adapter, QLCNIC_CRB_DRV_STATE),
2893 QLCRD32(adapter, QLCNIC_CRB_DRV_ACTIVE));
2894 qlcnic_clear_quiscent_mode(adapter);
2895 return -EIO;
2896}
2897
2873/*Transit to RESET state from READY state only */ 2898/*Transit to RESET state from READY state only */
2874static void 2899static void
2875qlcnic_dev_request_reset(struct qlcnic_adapter *adapter) 2900qlcnic_dev_request_reset(struct qlcnic_adapter *adapter)
@@ -2913,7 +2938,8 @@ qlcnic_schedule_work(struct qlcnic_adapter *adapter,
2913 return; 2938 return;
2914 2939
2915 INIT_DELAYED_WORK(&adapter->fw_work, func); 2940 INIT_DELAYED_WORK(&adapter->fw_work, func);
2916 schedule_delayed_work(&adapter->fw_work, round_jiffies_relative(delay)); 2941 queue_delayed_work(qlcnic_wq, &adapter->fw_work,
2942 round_jiffies_relative(delay));
2917} 2943}
2918 2944
2919static void 2945static void
@@ -2950,7 +2976,7 @@ attach:
2950 if (qlcnic_up(adapter, netdev)) 2976 if (qlcnic_up(adapter, netdev))
2951 goto done; 2977 goto done;
2952 2978
2953 qlcnic_config_indev_addr(netdev, NETDEV_UP); 2979 qlcnic_restore_indev_addr(netdev, NETDEV_UP);
2954 } 2980 }
2955 2981
2956done: 2982done:
@@ -2976,11 +3002,11 @@ qlcnic_check_health(struct qlcnic_adapter *adapter)
2976 qlcnic_dev_request_reset(adapter); 3002 qlcnic_dev_request_reset(adapter);
2977 3003
2978 state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE); 3004 state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
2979 if (state == QLCNIC_DEV_NEED_RESET || 3005 if (state == QLCNIC_DEV_NEED_RESET) {
2980 state == QLCNIC_DEV_NEED_QUISCENT) {
2981 qlcnic_set_npar_non_operational(adapter); 3006 qlcnic_set_npar_non_operational(adapter);
2982 adapter->need_fw_reset = 1; 3007 adapter->need_fw_reset = 1;
2983 } 3008 } else if (state == QLCNIC_DEV_NEED_QUISCENT)
3009 goto detach;
2984 3010
2985 heartbeat = QLCRD32(adapter, QLCNIC_PEG_ALIVE_COUNTER); 3011 heartbeat = QLCRD32(adapter, QLCNIC_PEG_ALIVE_COUNTER);
2986 if (heartbeat != adapter->heartbeat) { 3012 if (heartbeat != adapter->heartbeat) {
@@ -3112,7 +3138,7 @@ static int qlcnic_attach_func(struct pci_dev *pdev)
3112 if (err) 3138 if (err)
3113 goto done; 3139 goto done;
3114 3140
3115 qlcnic_config_indev_addr(netdev, NETDEV_UP); 3141 qlcnic_restore_indev_addr(netdev, NETDEV_UP);
3116 } 3142 }
3117 done: 3143 done:
3118 netif_device_attach(netdev); 3144 netif_device_attach(netdev);
@@ -3549,6 +3575,7 @@ validate_esw_config(struct qlcnic_adapter *adapter,
3549 QLCNIC_NON_PRIV_FUNC) { 3575 QLCNIC_NON_PRIV_FUNC) {
3550 esw_cfg[i].mac_anti_spoof = 0; 3576 esw_cfg[i].mac_anti_spoof = 0;
3551 esw_cfg[i].mac_override = 1; 3577 esw_cfg[i].mac_override = 1;
3578 esw_cfg[i].promisc_mode = 1;
3552 } 3579 }
3553 break; 3580 break;
3554 case QLCNIC_ADD_VLAN: 3581 case QLCNIC_ADD_VLAN:
@@ -3749,7 +3776,7 @@ qlcnic_sysfs_read_npar_config(struct file *file, struct kobject *kobj,
3749 return ret; 3776 return ret;
3750 3777
3751 np_cfg[i].pci_func = i; 3778 np_cfg[i].pci_func = i;
3752 np_cfg[i].op_mode = nic_info.op_mode; 3779 np_cfg[i].op_mode = (u8)nic_info.op_mode;
3753 np_cfg[i].port_num = nic_info.phys_port; 3780 np_cfg[i].port_num = nic_info.phys_port;
3754 np_cfg[i].fw_capab = nic_info.capabilities; 3781 np_cfg[i].fw_capab = nic_info.capabilities;
3755 np_cfg[i].min_bw = nic_info.min_tx_bw ; 3782 np_cfg[i].min_bw = nic_info.min_tx_bw ;
@@ -4027,10 +4054,10 @@ qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter)
4027#define is_qlcnic_netdev(dev) (dev->netdev_ops == &qlcnic_netdev_ops) 4054#define is_qlcnic_netdev(dev) (dev->netdev_ops == &qlcnic_netdev_ops)
4028 4055
4029static void 4056static void
4030qlcnic_config_indev_addr(struct net_device *dev, unsigned long event) 4057qlcnic_config_indev_addr(struct qlcnic_adapter *adapter,
4058 struct net_device *dev, unsigned long event)
4031{ 4059{
4032 struct in_device *indev; 4060 struct in_device *indev;
4033 struct qlcnic_adapter *adapter = netdev_priv(dev);
4034 4061
4035 indev = in_dev_get(dev); 4062 indev = in_dev_get(dev);
4036 if (!indev) 4063 if (!indev)
@@ -4054,6 +4081,27 @@ qlcnic_config_indev_addr(struct net_device *dev, unsigned long event)
4054 in_dev_put(indev); 4081 in_dev_put(indev);
4055} 4082}
4056 4083
4084static void
4085qlcnic_restore_indev_addr(struct net_device *netdev, unsigned long event)
4086{
4087 struct qlcnic_adapter *adapter = netdev_priv(netdev);
4088 struct net_device *dev;
4089 u16 vid;
4090
4091 qlcnic_config_indev_addr(adapter, netdev, event);
4092
4093 if (!adapter->vlgrp)
4094 return;
4095
4096 for (vid = 0; vid < VLAN_N_VID; vid++) {
4097 dev = vlan_group_get_device(adapter->vlgrp, vid);
4098 if (!dev)
4099 continue;
4100
4101 qlcnic_config_indev_addr(adapter, dev, event);
4102 }
4103}
4104
4057static int qlcnic_netdev_event(struct notifier_block *this, 4105static int qlcnic_netdev_event(struct notifier_block *this,
4058 unsigned long event, void *ptr) 4106 unsigned long event, void *ptr)
4059{ 4107{
@@ -4080,7 +4128,7 @@ recheck:
4080 if (!test_bit(__QLCNIC_DEV_UP, &adapter->state)) 4128 if (!test_bit(__QLCNIC_DEV_UP, &adapter->state))
4081 goto done; 4129 goto done;
4082 4130
4083 qlcnic_config_indev_addr(dev, event); 4131 qlcnic_config_indev_addr(adapter, dev, event);
4084done: 4132done:
4085 return NOTIFY_DONE; 4133 return NOTIFY_DONE;
4086} 4134}
@@ -4097,7 +4145,7 @@ qlcnic_inetaddr_event(struct notifier_block *this,
4097 dev = ifa->ifa_dev ? ifa->ifa_dev->dev : NULL; 4145 dev = ifa->ifa_dev ? ifa->ifa_dev->dev : NULL;
4098 4146
4099recheck: 4147recheck:
4100 if (dev == NULL || !netif_running(dev)) 4148 if (dev == NULL)
4101 goto done; 4149 goto done;
4102 4150
4103 if (dev->priv_flags & IFF_802_1Q_VLAN) { 4151 if (dev->priv_flags & IFF_802_1Q_VLAN) {
@@ -4140,7 +4188,7 @@ static struct notifier_block qlcnic_inetaddr_cb = {
4140}; 4188};
4141#else 4189#else
4142static void 4190static void
4143qlcnic_config_indev_addr(struct net_device *dev, unsigned long event) 4191qlcnic_restore_indev_addr(struct net_device *dev, unsigned long event)
4144{ } 4192{ }
4145#endif 4193#endif
4146static struct pci_error_handlers qlcnic_err_handler = { 4194static struct pci_error_handlers qlcnic_err_handler = {
@@ -4169,6 +4217,12 @@ static int __init qlcnic_init_module(void)
4169 4217
4170 printk(KERN_INFO "%s\n", qlcnic_driver_string); 4218 printk(KERN_INFO "%s\n", qlcnic_driver_string);
4171 4219
4220 qlcnic_wq = create_singlethread_workqueue("qlcnic");
4221 if (qlcnic_wq == NULL) {
4222 printk(KERN_ERR "qlcnic: cannot create workqueue\n");
4223 return -ENOMEM;
4224 }
4225
4172#ifdef CONFIG_INET 4226#ifdef CONFIG_INET
4173 register_netdevice_notifier(&qlcnic_netdev_cb); 4227 register_netdevice_notifier(&qlcnic_netdev_cb);
4174 register_inetaddr_notifier(&qlcnic_inetaddr_cb); 4228 register_inetaddr_notifier(&qlcnic_inetaddr_cb);
@@ -4180,6 +4234,7 @@ static int __init qlcnic_init_module(void)
4180 unregister_inetaddr_notifier(&qlcnic_inetaddr_cb); 4234 unregister_inetaddr_notifier(&qlcnic_inetaddr_cb);
4181 unregister_netdevice_notifier(&qlcnic_netdev_cb); 4235 unregister_netdevice_notifier(&qlcnic_netdev_cb);
4182#endif 4236#endif
4237 destroy_workqueue(qlcnic_wq);
4183 } 4238 }
4184 4239
4185 return ret; 4240 return ret;
@@ -4196,6 +4251,7 @@ static void __exit qlcnic_exit_module(void)
4196 unregister_inetaddr_notifier(&qlcnic_inetaddr_cb); 4251 unregister_inetaddr_notifier(&qlcnic_inetaddr_cb);
4197 unregister_netdevice_notifier(&qlcnic_netdev_cb); 4252 unregister_netdevice_notifier(&qlcnic_netdev_cb);
4198#endif 4253#endif
4254 destroy_workqueue(qlcnic_wq);
4199} 4255}
4200 4256
4201module_exit(qlcnic_exit_module); 4257module_exit(qlcnic_exit_module);
diff --git a/drivers/net/qlge/qlge_main.c b/drivers/net/qlge/qlge_main.c
index 4ffebe83d883..ba0053d8515e 100644
--- a/drivers/net/qlge/qlge_main.c
+++ b/drivers/net/qlge/qlge_main.c
@@ -2572,7 +2572,7 @@ static netdev_tx_t qlge_send(struct sk_buff *skb, struct net_device *ndev)
2572 2572
2573 mac_iocb_ptr->frame_len = cpu_to_le16((u16) skb->len); 2573 mac_iocb_ptr->frame_len = cpu_to_le16((u16) skb->len);
2574 2574
2575 if (qdev->vlgrp && vlan_tx_tag_present(skb)) { 2575 if (vlan_tx_tag_present(skb)) {
2576 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev, 2576 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
2577 "Adding a vlan tag %d.\n", vlan_tx_tag_get(skb)); 2577 "Adding a vlan tag %d.\n", vlan_tx_tag_get(skb));
2578 mac_iocb_ptr->flags3 |= OB_MAC_IOCB_V; 2578 mac_iocb_ptr->flags3 |= OB_MAC_IOCB_V;
diff --git a/drivers/net/r6040.c b/drivers/net/r6040.c
index 63db065508f4..68a84198eb05 100644
--- a/drivers/net/r6040.c
+++ b/drivers/net/r6040.c
@@ -745,6 +745,9 @@ static void r6040_mac_address(struct net_device *dev)
745 iowrite16(adrp[0], ioaddr + MID_0L); 745 iowrite16(adrp[0], ioaddr + MID_0L);
746 iowrite16(adrp[1], ioaddr + MID_0M); 746 iowrite16(adrp[1], ioaddr + MID_0M);
747 iowrite16(adrp[2], ioaddr + MID_0H); 747 iowrite16(adrp[2], ioaddr + MID_0H);
748
749 /* Store MAC Address in perm_addr */
750 memcpy(dev->perm_addr, dev->dev_addr, ETH_ALEN);
748} 751}
749 752
750static int r6040_open(struct net_device *dev) 753static int r6040_open(struct net_device *dev)
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
index 54900332f12d..d88ce9fb1cbd 100644
--- a/drivers/net/r8169.c
+++ b/drivers/net/r8169.c
@@ -187,12 +187,7 @@ static DEFINE_PCI_DEVICE_TABLE(rtl8169_pci_tbl) = {
187 187
188MODULE_DEVICE_TABLE(pci, rtl8169_pci_tbl); 188MODULE_DEVICE_TABLE(pci, rtl8169_pci_tbl);
189 189
190/* 190static int rx_buf_sz = 16383;
191 * we set our copybreak very high so that we don't have
192 * to allocate 16k frames all the time (see note in
193 * rtl8169_open()
194 */
195static int rx_copybreak = 16383;
196static int use_dac; 191static int use_dac;
197static struct { 192static struct {
198 u32 msg_enable; 193 u32 msg_enable;
@@ -484,10 +479,8 @@ struct rtl8169_private {
484 struct RxDesc *RxDescArray; /* 256-aligned Rx descriptor ring */ 479 struct RxDesc *RxDescArray; /* 256-aligned Rx descriptor ring */
485 dma_addr_t TxPhyAddr; 480 dma_addr_t TxPhyAddr;
486 dma_addr_t RxPhyAddr; 481 dma_addr_t RxPhyAddr;
487 struct sk_buff *Rx_skbuff[NUM_RX_DESC]; /* Rx data buffers */ 482 void *Rx_databuff[NUM_RX_DESC]; /* Rx data buffers */
488 struct ring_info tx_skb[NUM_TX_DESC]; /* Tx data buffers */ 483 struct ring_info tx_skb[NUM_TX_DESC]; /* Tx data buffers */
489 unsigned align;
490 unsigned rx_buf_sz;
491 struct timer_list timer; 484 struct timer_list timer;
492 u16 cp_cmd; 485 u16 cp_cmd;
493 u16 intr_event; 486 u16 intr_event;
@@ -515,8 +508,6 @@ struct rtl8169_private {
515 508
516MODULE_AUTHOR("Realtek and the Linux r8169 crew <netdev@vger.kernel.org>"); 509MODULE_AUTHOR("Realtek and the Linux r8169 crew <netdev@vger.kernel.org>");
517MODULE_DESCRIPTION("RealTek RTL-8169 Gigabit Ethernet driver"); 510MODULE_DESCRIPTION("RealTek RTL-8169 Gigabit Ethernet driver");
518module_param(rx_copybreak, int, 0);
519MODULE_PARM_DESC(rx_copybreak, "Copy breakpoint for copy-only-tiny-frames");
520module_param(use_dac, int, 0); 511module_param(use_dac, int, 0);
521MODULE_PARM_DESC(use_dac, "Enable PCI DAC. Unsafe on 32 bit PCI slot."); 512MODULE_PARM_DESC(use_dac, "Enable PCI DAC. Unsafe on 32 bit PCI slot.");
522module_param_named(debug, debug.msg_enable, int, 0); 513module_param_named(debug, debug.msg_enable, int, 0);
@@ -1043,7 +1034,7 @@ static int rtl8169_set_rx_csum(struct net_device *dev, u32 data)
1043static inline u32 rtl8169_tx_vlan_tag(struct rtl8169_private *tp, 1034static inline u32 rtl8169_tx_vlan_tag(struct rtl8169_private *tp,
1044 struct sk_buff *skb) 1035 struct sk_buff *skb)
1045{ 1036{
1046 return (tp->vlgrp && vlan_tx_tag_present(skb)) ? 1037 return (vlan_tx_tag_present(skb)) ?
1047 TxVlanTag | swab16(vlan_tx_tag_get(skb)) : 0x00; 1038 TxVlanTag | swab16(vlan_tx_tag_get(skb)) : 0x00;
1048} 1039}
1049 1040
@@ -1209,6 +1200,7 @@ static void rtl8169_update_counters(struct net_device *dev)
1209 dma_addr_t paddr; 1200 dma_addr_t paddr;
1210 u32 cmd; 1201 u32 cmd;
1211 int wait = 1000; 1202 int wait = 1000;
1203 struct device *d = &tp->pci_dev->dev;
1212 1204
1213 /* 1205 /*
1214 * Some chips are unable to dump tally counters when the receiver 1206 * Some chips are unable to dump tally counters when the receiver
@@ -1217,7 +1209,7 @@ static void rtl8169_update_counters(struct net_device *dev)
1217 if ((RTL_R8(ChipCmd) & CmdRxEnb) == 0) 1209 if ((RTL_R8(ChipCmd) & CmdRxEnb) == 0)
1218 return; 1210 return;
1219 1211
1220 counters = pci_alloc_consistent(tp->pci_dev, sizeof(*counters), &paddr); 1212 counters = dma_alloc_coherent(d, sizeof(*counters), &paddr, GFP_KERNEL);
1221 if (!counters) 1213 if (!counters)
1222 return; 1214 return;
1223 1215
@@ -1238,7 +1230,7 @@ static void rtl8169_update_counters(struct net_device *dev)
1238 RTL_W32(CounterAddrLow, 0); 1230 RTL_W32(CounterAddrLow, 0);
1239 RTL_W32(CounterAddrHigh, 0); 1231 RTL_W32(CounterAddrHigh, 0);
1240 1232
1241 pci_free_consistent(tp->pci_dev, sizeof(*counters), counters, paddr); 1233 dma_free_coherent(d, sizeof(*counters), counters, paddr);
1242} 1234}
1243 1235
1244static void rtl8169_get_ethtool_stats(struct net_device *dev, 1236static void rtl8169_get_ethtool_stats(struct net_device *dev,
@@ -2939,7 +2931,7 @@ static const struct rtl_cfg_info {
2939 .hw_start = rtl_hw_start_8168, 2931 .hw_start = rtl_hw_start_8168,
2940 .region = 2, 2932 .region = 2,
2941 .align = 8, 2933 .align = 8,
2942 .intr_event = SYSErr | LinkChg | RxOverflow | 2934 .intr_event = SYSErr | RxFIFOOver | LinkChg | RxOverflow |
2943 TxErr | TxOK | RxOK | RxErr, 2935 TxErr | TxOK | RxOK | RxErr,
2944 .napi_event = TxErr | TxOK | RxOK | RxOverflow, 2936 .napi_event = TxErr | TxOK | RxOK | RxOverflow,
2945 .features = RTL_FEATURE_GMII | RTL_FEATURE_MSI, 2937 .features = RTL_FEATURE_GMII | RTL_FEATURE_MSI,
@@ -3194,7 +3186,6 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
3194 dev->features |= NETIF_F_GRO; 3186 dev->features |= NETIF_F_GRO;
3195 3187
3196 tp->intr_mask = 0xffff; 3188 tp->intr_mask = 0xffff;
3197 tp->align = cfg->align;
3198 tp->hw_start = cfg->hw_start; 3189 tp->hw_start = cfg->hw_start;
3199 tp->intr_event = cfg->intr_event; 3190 tp->intr_event = cfg->intr_event;
3200 tp->napi_event = cfg->napi_event; 3191 tp->napi_event = cfg->napi_event;
@@ -3264,18 +3255,6 @@ static void __devexit rtl8169_remove_one(struct pci_dev *pdev)
3264 pci_set_drvdata(pdev, NULL); 3255 pci_set_drvdata(pdev, NULL);
3265} 3256}
3266 3257
3267static void rtl8169_set_rxbufsize(struct rtl8169_private *tp,
3268 unsigned int mtu)
3269{
3270 unsigned int max_frame = mtu + VLAN_ETH_HLEN + ETH_FCS_LEN;
3271
3272 if (max_frame != 16383)
3273 printk(KERN_WARNING PFX "WARNING! Changing of MTU on this "
3274 "NIC may lead to frame reception errors!\n");
3275
3276 tp->rx_buf_sz = (max_frame > RX_BUF_SIZE) ? max_frame : RX_BUF_SIZE;
3277}
3278
3279static int rtl8169_open(struct net_device *dev) 3258static int rtl8169_open(struct net_device *dev)
3280{ 3259{
3281 struct rtl8169_private *tp = netdev_priv(dev); 3260 struct rtl8169_private *tp = netdev_priv(dev);
@@ -3285,28 +3264,16 @@ static int rtl8169_open(struct net_device *dev)
3285 pm_runtime_get_sync(&pdev->dev); 3264 pm_runtime_get_sync(&pdev->dev);
3286 3265
3287 /* 3266 /*
3288 * Note that we use a magic value here, its wierd I know
3289 * its done because, some subset of rtl8169 hardware suffers from
3290 * a problem in which frames received that are longer than
3291 * the size set in RxMaxSize register return garbage sizes
3292 * when received. To avoid this we need to turn off filtering,
3293 * which is done by setting a value of 16383 in the RxMaxSize register
3294 * and allocating 16k frames to handle the largest possible rx value
3295 * thats what the magic math below does.
3296 */
3297 rtl8169_set_rxbufsize(tp, 16383 - VLAN_ETH_HLEN - ETH_FCS_LEN);
3298
3299 /*
3300 * Rx and Tx desscriptors needs 256 bytes alignment. 3267 * Rx and Tx desscriptors needs 256 bytes alignment.
3301 * pci_alloc_consistent provides more. 3268 * dma_alloc_coherent provides more.
3302 */ 3269 */
3303 tp->TxDescArray = pci_alloc_consistent(pdev, R8169_TX_RING_BYTES, 3270 tp->TxDescArray = dma_alloc_coherent(&pdev->dev, R8169_TX_RING_BYTES,
3304 &tp->TxPhyAddr); 3271 &tp->TxPhyAddr, GFP_KERNEL);
3305 if (!tp->TxDescArray) 3272 if (!tp->TxDescArray)
3306 goto err_pm_runtime_put; 3273 goto err_pm_runtime_put;
3307 3274
3308 tp->RxDescArray = pci_alloc_consistent(pdev, R8169_RX_RING_BYTES, 3275 tp->RxDescArray = dma_alloc_coherent(&pdev->dev, R8169_RX_RING_BYTES,
3309 &tp->RxPhyAddr); 3276 &tp->RxPhyAddr, GFP_KERNEL);
3310 if (!tp->RxDescArray) 3277 if (!tp->RxDescArray)
3311 goto err_free_tx_0; 3278 goto err_free_tx_0;
3312 3279
@@ -3340,12 +3307,12 @@ out:
3340err_release_ring_2: 3307err_release_ring_2:
3341 rtl8169_rx_clear(tp); 3308 rtl8169_rx_clear(tp);
3342err_free_rx_1: 3309err_free_rx_1:
3343 pci_free_consistent(pdev, R8169_RX_RING_BYTES, tp->RxDescArray, 3310 dma_free_coherent(&pdev->dev, R8169_RX_RING_BYTES, tp->RxDescArray,
3344 tp->RxPhyAddr); 3311 tp->RxPhyAddr);
3345 tp->RxDescArray = NULL; 3312 tp->RxDescArray = NULL;
3346err_free_tx_0: 3313err_free_tx_0:
3347 pci_free_consistent(pdev, R8169_TX_RING_BYTES, tp->TxDescArray, 3314 dma_free_coherent(&pdev->dev, R8169_TX_RING_BYTES, tp->TxDescArray,
3348 tp->TxPhyAddr); 3315 tp->TxPhyAddr);
3349 tp->TxDescArray = NULL; 3316 tp->TxDescArray = NULL;
3350err_pm_runtime_put: 3317err_pm_runtime_put:
3351 pm_runtime_put_noidle(&pdev->dev); 3318 pm_runtime_put_noidle(&pdev->dev);
@@ -3472,7 +3439,7 @@ static void rtl_hw_start_8169(struct net_device *dev)
3472 3439
3473 RTL_W8(EarlyTxThres, EarlyTxThld); 3440 RTL_W8(EarlyTxThres, EarlyTxThld);
3474 3441
3475 rtl_set_rx_max_size(ioaddr, tp->rx_buf_sz); 3442 rtl_set_rx_max_size(ioaddr, rx_buf_sz);
3476 3443
3477 if ((tp->mac_version == RTL_GIGA_MAC_VER_01) || 3444 if ((tp->mac_version == RTL_GIGA_MAC_VER_01) ||
3478 (tp->mac_version == RTL_GIGA_MAC_VER_02) || 3445 (tp->mac_version == RTL_GIGA_MAC_VER_02) ||
@@ -3733,7 +3700,7 @@ static void rtl_hw_start_8168(struct net_device *dev)
3733 3700
3734 RTL_W8(EarlyTxThres, EarlyTxThld); 3701 RTL_W8(EarlyTxThres, EarlyTxThld);
3735 3702
3736 rtl_set_rx_max_size(ioaddr, tp->rx_buf_sz); 3703 rtl_set_rx_max_size(ioaddr, rx_buf_sz);
3737 3704
3738 tp->cp_cmd |= RTL_R16(CPlusCmd) | PktCntrDisable | INTT_1; 3705 tp->cp_cmd |= RTL_R16(CPlusCmd) | PktCntrDisable | INTT_1;
3739 3706
@@ -3913,7 +3880,7 @@ static void rtl_hw_start_8101(struct net_device *dev)
3913 3880
3914 RTL_W8(EarlyTxThres, EarlyTxThld); 3881 RTL_W8(EarlyTxThres, EarlyTxThld);
3915 3882
3916 rtl_set_rx_max_size(ioaddr, tp->rx_buf_sz); 3883 rtl_set_rx_max_size(ioaddr, rx_buf_sz);
3917 3884
3918 tp->cp_cmd |= rtl_rw_cpluscmd(ioaddr) | PCIMulRW; 3885 tp->cp_cmd |= rtl_rw_cpluscmd(ioaddr) | PCIMulRW;
3919 3886
@@ -3941,33 +3908,11 @@ static void rtl_hw_start_8101(struct net_device *dev)
3941 3908
3942static int rtl8169_change_mtu(struct net_device *dev, int new_mtu) 3909static int rtl8169_change_mtu(struct net_device *dev, int new_mtu)
3943{ 3910{
3944 struct rtl8169_private *tp = netdev_priv(dev);
3945 int ret = 0;
3946
3947 if (new_mtu < ETH_ZLEN || new_mtu > SafeMtu) 3911 if (new_mtu < ETH_ZLEN || new_mtu > SafeMtu)
3948 return -EINVAL; 3912 return -EINVAL;
3949 3913
3950 dev->mtu = new_mtu; 3914 dev->mtu = new_mtu;
3951 3915 return 0;
3952 if (!netif_running(dev))
3953 goto out;
3954
3955 rtl8169_down(dev);
3956
3957 rtl8169_set_rxbufsize(tp, dev->mtu);
3958
3959 ret = rtl8169_init_ring(dev);
3960 if (ret < 0)
3961 goto out;
3962
3963 napi_enable(&tp->napi);
3964
3965 rtl_hw_start(dev);
3966
3967 rtl8169_request_timer(dev);
3968
3969out:
3970 return ret;
3971} 3916}
3972 3917
3973static inline void rtl8169_make_unusable_by_asic(struct RxDesc *desc) 3918static inline void rtl8169_make_unusable_by_asic(struct RxDesc *desc)
@@ -3976,15 +3921,14 @@ static inline void rtl8169_make_unusable_by_asic(struct RxDesc *desc)
3976 desc->opts1 &= ~cpu_to_le32(DescOwn | RsvdMask); 3921 desc->opts1 &= ~cpu_to_le32(DescOwn | RsvdMask);
3977} 3922}
3978 3923
3979static void rtl8169_free_rx_skb(struct rtl8169_private *tp, 3924static void rtl8169_free_rx_databuff(struct rtl8169_private *tp,
3980 struct sk_buff **sk_buff, struct RxDesc *desc) 3925 void **data_buff, struct RxDesc *desc)
3981{ 3926{
3982 struct pci_dev *pdev = tp->pci_dev; 3927 dma_unmap_single(&tp->pci_dev->dev, le64_to_cpu(desc->addr), rx_buf_sz,
3928 DMA_FROM_DEVICE);
3983 3929
3984 pci_unmap_single(pdev, le64_to_cpu(desc->addr), tp->rx_buf_sz, 3930 kfree(*data_buff);
3985 PCI_DMA_FROMDEVICE); 3931 *data_buff = NULL;
3986 dev_kfree_skb(*sk_buff);
3987 *sk_buff = NULL;
3988 rtl8169_make_unusable_by_asic(desc); 3932 rtl8169_make_unusable_by_asic(desc);
3989} 3933}
3990 3934
@@ -4003,33 +3947,45 @@ static inline void rtl8169_map_to_asic(struct RxDesc *desc, dma_addr_t mapping,
4003 rtl8169_mark_to_asic(desc, rx_buf_sz); 3947 rtl8169_mark_to_asic(desc, rx_buf_sz);
4004} 3948}
4005 3949
4006static struct sk_buff *rtl8169_alloc_rx_skb(struct pci_dev *pdev, 3950static inline void *rtl8169_align(void *data)
4007 struct net_device *dev,
4008 struct RxDesc *desc, int rx_buf_sz,
4009 unsigned int align)
4010{ 3951{
4011 struct sk_buff *skb; 3952 return (void *)ALIGN((long)data, 16);
4012 dma_addr_t mapping; 3953}
4013 unsigned int pad;
4014 3954
4015 pad = align ? align : NET_IP_ALIGN; 3955static struct sk_buff *rtl8169_alloc_rx_data(struct rtl8169_private *tp,
3956 struct RxDesc *desc)
3957{
3958 void *data;
3959 dma_addr_t mapping;
3960 struct device *d = &tp->pci_dev->dev;
3961 struct net_device *dev = tp->dev;
3962 int node = dev->dev.parent ? dev_to_node(dev->dev.parent) : -1;
4016 3963
4017 skb = netdev_alloc_skb(dev, rx_buf_sz + pad); 3964 data = kmalloc_node(rx_buf_sz, GFP_KERNEL, node);
4018 if (!skb) 3965 if (!data)
4019 goto err_out; 3966 return NULL;
4020 3967
4021 skb_reserve(skb, align ? ((pad - 1) & (unsigned long)skb->data) : pad); 3968 if (rtl8169_align(data) != data) {
3969 kfree(data);
3970 data = kmalloc_node(rx_buf_sz + 15, GFP_KERNEL, node);
3971 if (!data)
3972 return NULL;
3973 }
4022 3974
4023 mapping = pci_map_single(pdev, skb->data, rx_buf_sz, 3975 mapping = dma_map_single(d, rtl8169_align(data), rx_buf_sz,
4024 PCI_DMA_FROMDEVICE); 3976 DMA_FROM_DEVICE);
3977 if (unlikely(dma_mapping_error(d, mapping))) {
3978 if (net_ratelimit())
3979 netif_err(tp, drv, tp->dev, "Failed to map RX DMA!\n");
3980 goto err_out;
3981 }
4025 3982
4026 rtl8169_map_to_asic(desc, mapping, rx_buf_sz); 3983 rtl8169_map_to_asic(desc, mapping, rx_buf_sz);
4027out: 3984 return data;
4028 return skb;
4029 3985
4030err_out: 3986err_out:
4031 rtl8169_make_unusable_by_asic(desc); 3987 kfree(data);
4032 goto out; 3988 return NULL;
4033} 3989}
4034 3990
4035static void rtl8169_rx_clear(struct rtl8169_private *tp) 3991static void rtl8169_rx_clear(struct rtl8169_private *tp)
@@ -4037,41 +3993,42 @@ static void rtl8169_rx_clear(struct rtl8169_private *tp)
4037 unsigned int i; 3993 unsigned int i;
4038 3994
4039 for (i = 0; i < NUM_RX_DESC; i++) { 3995 for (i = 0; i < NUM_RX_DESC; i++) {
4040 if (tp->Rx_skbuff[i]) { 3996 if (tp->Rx_databuff[i]) {
4041 rtl8169_free_rx_skb(tp, tp->Rx_skbuff + i, 3997 rtl8169_free_rx_databuff(tp, tp->Rx_databuff + i,
4042 tp->RxDescArray + i); 3998 tp->RxDescArray + i);
4043 } 3999 }
4044 } 4000 }
4045} 4001}
4046 4002
4047static u32 rtl8169_rx_fill(struct rtl8169_private *tp, struct net_device *dev, 4003static inline void rtl8169_mark_as_last_descriptor(struct RxDesc *desc)
4048 u32 start, u32 end)
4049{ 4004{
4050 u32 cur; 4005 desc->opts1 |= cpu_to_le32(RingEnd);
4006}
4051 4007
4052 for (cur = start; end - cur != 0; cur++) { 4008static int rtl8169_rx_fill(struct rtl8169_private *tp)
4053 struct sk_buff *skb; 4009{
4054 unsigned int i = cur % NUM_RX_DESC; 4010 unsigned int i;
4055 4011
4056 WARN_ON((s32)(end - cur) < 0); 4012 for (i = 0; i < NUM_RX_DESC; i++) {
4013 void *data;
4057 4014
4058 if (tp->Rx_skbuff[i]) 4015 if (tp->Rx_databuff[i])
4059 continue; 4016 continue;
4060 4017
4061 skb = rtl8169_alloc_rx_skb(tp->pci_dev, dev, 4018 data = rtl8169_alloc_rx_data(tp, tp->RxDescArray + i);
4062 tp->RxDescArray + i, 4019 if (!data) {
4063 tp->rx_buf_sz, tp->align); 4020 rtl8169_make_unusable_by_asic(tp->RxDescArray + i);
4064 if (!skb) 4021 goto err_out;
4065 break; 4022 }
4066 4023 tp->Rx_databuff[i] = data;
4067 tp->Rx_skbuff[i] = skb;
4068 } 4024 }
4069 return cur - start;
4070}
4071 4025
4072static inline void rtl8169_mark_as_last_descriptor(struct RxDesc *desc) 4026 rtl8169_mark_as_last_descriptor(tp->RxDescArray + NUM_RX_DESC - 1);
4073{ 4027 return 0;
4074 desc->opts1 |= cpu_to_le32(RingEnd); 4028
4029err_out:
4030 rtl8169_rx_clear(tp);
4031 return -ENOMEM;
4075} 4032}
4076 4033
4077static void rtl8169_init_ring_indexes(struct rtl8169_private *tp) 4034static void rtl8169_init_ring_indexes(struct rtl8169_private *tp)
@@ -4086,53 +4043,51 @@ static int rtl8169_init_ring(struct net_device *dev)
4086 rtl8169_init_ring_indexes(tp); 4043 rtl8169_init_ring_indexes(tp);
4087 4044
4088 memset(tp->tx_skb, 0x0, NUM_TX_DESC * sizeof(struct ring_info)); 4045 memset(tp->tx_skb, 0x0, NUM_TX_DESC * sizeof(struct ring_info));
4089 memset(tp->Rx_skbuff, 0x0, NUM_RX_DESC * sizeof(struct sk_buff *)); 4046 memset(tp->Rx_databuff, 0x0, NUM_RX_DESC * sizeof(void *));
4090
4091 if (rtl8169_rx_fill(tp, dev, 0, NUM_RX_DESC) != NUM_RX_DESC)
4092 goto err_out;
4093
4094 rtl8169_mark_as_last_descriptor(tp->RxDescArray + NUM_RX_DESC - 1);
4095
4096 return 0;
4097 4047
4098err_out: 4048 return rtl8169_rx_fill(tp);
4099 rtl8169_rx_clear(tp);
4100 return -ENOMEM;
4101} 4049}
4102 4050
4103static void rtl8169_unmap_tx_skb(struct pci_dev *pdev, struct ring_info *tx_skb, 4051static void rtl8169_unmap_tx_skb(struct device *d, struct ring_info *tx_skb,
4104 struct TxDesc *desc) 4052 struct TxDesc *desc)
4105{ 4053{
4106 unsigned int len = tx_skb->len; 4054 unsigned int len = tx_skb->len;
4107 4055
4108 pci_unmap_single(pdev, le64_to_cpu(desc->addr), len, PCI_DMA_TODEVICE); 4056 dma_unmap_single(d, le64_to_cpu(desc->addr), len, DMA_TO_DEVICE);
4057
4109 desc->opts1 = 0x00; 4058 desc->opts1 = 0x00;
4110 desc->opts2 = 0x00; 4059 desc->opts2 = 0x00;
4111 desc->addr = 0x00; 4060 desc->addr = 0x00;
4112 tx_skb->len = 0; 4061 tx_skb->len = 0;
4113} 4062}
4114 4063
4115static void rtl8169_tx_clear(struct rtl8169_private *tp) 4064static void rtl8169_tx_clear_range(struct rtl8169_private *tp, u32 start,
4065 unsigned int n)
4116{ 4066{
4117 unsigned int i; 4067 unsigned int i;
4118 4068
4119 for (i = tp->dirty_tx; i < tp->dirty_tx + NUM_TX_DESC; i++) { 4069 for (i = 0; i < n; i++) {
4120 unsigned int entry = i % NUM_TX_DESC; 4070 unsigned int entry = (start + i) % NUM_TX_DESC;
4121 struct ring_info *tx_skb = tp->tx_skb + entry; 4071 struct ring_info *tx_skb = tp->tx_skb + entry;
4122 unsigned int len = tx_skb->len; 4072 unsigned int len = tx_skb->len;
4123 4073
4124 if (len) { 4074 if (len) {
4125 struct sk_buff *skb = tx_skb->skb; 4075 struct sk_buff *skb = tx_skb->skb;
4126 4076
4127 rtl8169_unmap_tx_skb(tp->pci_dev, tx_skb, 4077 rtl8169_unmap_tx_skb(&tp->pci_dev->dev, tx_skb,
4128 tp->TxDescArray + entry); 4078 tp->TxDescArray + entry);
4129 if (skb) { 4079 if (skb) {
4080 tp->dev->stats.tx_dropped++;
4130 dev_kfree_skb(skb); 4081 dev_kfree_skb(skb);
4131 tx_skb->skb = NULL; 4082 tx_skb->skb = NULL;
4132 } 4083 }
4133 tp->dev->stats.tx_dropped++;
4134 } 4084 }
4135 } 4085 }
4086}
4087
4088static void rtl8169_tx_clear(struct rtl8169_private *tp)
4089{
4090 rtl8169_tx_clear_range(tp, tp->dirty_tx, NUM_TX_DESC);
4136 tp->cur_tx = tp->dirty_tx = 0; 4091 tp->cur_tx = tp->dirty_tx = 0;
4137} 4092}
4138 4093
@@ -4236,6 +4191,7 @@ static int rtl8169_xmit_frags(struct rtl8169_private *tp, struct sk_buff *skb,
4236 struct skb_shared_info *info = skb_shinfo(skb); 4191 struct skb_shared_info *info = skb_shinfo(skb);
4237 unsigned int cur_frag, entry; 4192 unsigned int cur_frag, entry;
4238 struct TxDesc * uninitialized_var(txd); 4193 struct TxDesc * uninitialized_var(txd);
4194 struct device *d = &tp->pci_dev->dev;
4239 4195
4240 entry = tp->cur_tx; 4196 entry = tp->cur_tx;
4241 for (cur_frag = 0; cur_frag < info->nr_frags; cur_frag++) { 4197 for (cur_frag = 0; cur_frag < info->nr_frags; cur_frag++) {
@@ -4249,7 +4205,13 @@ static int rtl8169_xmit_frags(struct rtl8169_private *tp, struct sk_buff *skb,
4249 txd = tp->TxDescArray + entry; 4205 txd = tp->TxDescArray + entry;
4250 len = frag->size; 4206 len = frag->size;
4251 addr = ((void *) page_address(frag->page)) + frag->page_offset; 4207 addr = ((void *) page_address(frag->page)) + frag->page_offset;
4252 mapping = pci_map_single(tp->pci_dev, addr, len, PCI_DMA_TODEVICE); 4208 mapping = dma_map_single(d, addr, len, DMA_TO_DEVICE);
4209 if (unlikely(dma_mapping_error(d, mapping))) {
4210 if (net_ratelimit())
4211 netif_err(tp, drv, tp->dev,
4212 "Failed to map TX fragments DMA!\n");
4213 goto err_out;
4214 }
4253 4215
4254 /* anti gcc 2.95.3 bugware (sic) */ 4216 /* anti gcc 2.95.3 bugware (sic) */
4255 status = opts1 | len | (RingEnd * !((entry + 1) % NUM_TX_DESC)); 4217 status = opts1 | len | (RingEnd * !((entry + 1) % NUM_TX_DESC));
@@ -4266,6 +4228,10 @@ static int rtl8169_xmit_frags(struct rtl8169_private *tp, struct sk_buff *skb,
4266 } 4228 }
4267 4229
4268 return cur_frag; 4230 return cur_frag;
4231
4232err_out:
4233 rtl8169_tx_clear_range(tp, tp->cur_tx + 1, cur_frag);
4234 return -EIO;
4269} 4235}
4270 4236
4271static inline u32 rtl8169_tso_csum(struct sk_buff *skb, struct net_device *dev) 4237static inline u32 rtl8169_tso_csum(struct sk_buff *skb, struct net_device *dev)
@@ -4292,39 +4258,47 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
4292 struct net_device *dev) 4258 struct net_device *dev)
4293{ 4259{
4294 struct rtl8169_private *tp = netdev_priv(dev); 4260 struct rtl8169_private *tp = netdev_priv(dev);
4295 unsigned int frags, entry = tp->cur_tx % NUM_TX_DESC; 4261 unsigned int entry = tp->cur_tx % NUM_TX_DESC;
4296 struct TxDesc *txd = tp->TxDescArray + entry; 4262 struct TxDesc *txd = tp->TxDescArray + entry;
4297 void __iomem *ioaddr = tp->mmio_addr; 4263 void __iomem *ioaddr = tp->mmio_addr;
4264 struct device *d = &tp->pci_dev->dev;
4298 dma_addr_t mapping; 4265 dma_addr_t mapping;
4299 u32 status, len; 4266 u32 status, len;
4300 u32 opts1; 4267 u32 opts1;
4268 int frags;
4301 4269
4302 if (unlikely(TX_BUFFS_AVAIL(tp) < skb_shinfo(skb)->nr_frags)) { 4270 if (unlikely(TX_BUFFS_AVAIL(tp) < skb_shinfo(skb)->nr_frags)) {
4303 netif_err(tp, drv, dev, "BUG! Tx Ring full when queue awake!\n"); 4271 netif_err(tp, drv, dev, "BUG! Tx Ring full when queue awake!\n");
4304 goto err_stop; 4272 goto err_stop_0;
4305 } 4273 }
4306 4274
4307 if (unlikely(le32_to_cpu(txd->opts1) & DescOwn)) 4275 if (unlikely(le32_to_cpu(txd->opts1) & DescOwn))
4308 goto err_stop; 4276 goto err_stop_0;
4277
4278 len = skb_headlen(skb);
4279 mapping = dma_map_single(d, skb->data, len, DMA_TO_DEVICE);
4280 if (unlikely(dma_mapping_error(d, mapping))) {
4281 if (net_ratelimit())
4282 netif_err(tp, drv, dev, "Failed to map TX DMA!\n");
4283 goto err_dma_0;
4284 }
4285
4286 tp->tx_skb[entry].len = len;
4287 txd->addr = cpu_to_le64(mapping);
4288 txd->opts2 = cpu_to_le32(rtl8169_tx_vlan_tag(tp, skb));
4309 4289
4310 opts1 = DescOwn | rtl8169_tso_csum(skb, dev); 4290 opts1 = DescOwn | rtl8169_tso_csum(skb, dev);
4311 4291
4312 frags = rtl8169_xmit_frags(tp, skb, opts1); 4292 frags = rtl8169_xmit_frags(tp, skb, opts1);
4313 if (frags) { 4293 if (frags < 0)
4314 len = skb_headlen(skb); 4294 goto err_dma_1;
4295 else if (frags)
4315 opts1 |= FirstFrag; 4296 opts1 |= FirstFrag;
4316 } else { 4297 else {
4317 len = skb->len;
4318 opts1 |= FirstFrag | LastFrag; 4298 opts1 |= FirstFrag | LastFrag;
4319 tp->tx_skb[entry].skb = skb; 4299 tp->tx_skb[entry].skb = skb;
4320 } 4300 }
4321 4301
4322 mapping = pci_map_single(tp->pci_dev, skb->data, len, PCI_DMA_TODEVICE);
4323
4324 tp->tx_skb[entry].len = len;
4325 txd->addr = cpu_to_le64(mapping);
4326 txd->opts2 = cpu_to_le32(rtl8169_tx_vlan_tag(tp, skb));
4327
4328 wmb(); 4302 wmb();
4329 4303
4330 /* anti gcc 2.95.3 bugware (sic) */ 4304 /* anti gcc 2.95.3 bugware (sic) */
@@ -4346,7 +4320,14 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
4346 4320
4347 return NETDEV_TX_OK; 4321 return NETDEV_TX_OK;
4348 4322
4349err_stop: 4323err_dma_1:
4324 rtl8169_unmap_tx_skb(d, tp->tx_skb + entry, txd);
4325err_dma_0:
4326 dev_kfree_skb(skb);
4327 dev->stats.tx_dropped++;
4328 return NETDEV_TX_OK;
4329
4330err_stop_0:
4350 netif_stop_queue(dev); 4331 netif_stop_queue(dev);
4351 dev->stats.tx_dropped++; 4332 dev->stats.tx_dropped++;
4352 return NETDEV_TX_BUSY; 4333 return NETDEV_TX_BUSY;
@@ -4411,7 +4392,6 @@ static void rtl8169_tx_interrupt(struct net_device *dev,
4411 while (tx_left > 0) { 4392 while (tx_left > 0) {
4412 unsigned int entry = dirty_tx % NUM_TX_DESC; 4393 unsigned int entry = dirty_tx % NUM_TX_DESC;
4413 struct ring_info *tx_skb = tp->tx_skb + entry; 4394 struct ring_info *tx_skb = tp->tx_skb + entry;
4414 u32 len = tx_skb->len;
4415 u32 status; 4395 u32 status;
4416 4396
4417 rmb(); 4397 rmb();
@@ -4419,12 +4399,11 @@ static void rtl8169_tx_interrupt(struct net_device *dev,
4419 if (status & DescOwn) 4399 if (status & DescOwn)
4420 break; 4400 break;
4421 4401
4422 dev->stats.tx_bytes += len; 4402 rtl8169_unmap_tx_skb(&tp->pci_dev->dev, tx_skb,
4423 dev->stats.tx_packets++; 4403 tp->TxDescArray + entry);
4424
4425 rtl8169_unmap_tx_skb(tp->pci_dev, tx_skb, tp->TxDescArray + entry);
4426
4427 if (status & LastFrag) { 4404 if (status & LastFrag) {
4405 dev->stats.tx_packets++;
4406 dev->stats.tx_bytes += tx_skb->skb->len;
4428 dev_kfree_skb(tx_skb->skb); 4407 dev_kfree_skb(tx_skb->skb);
4429 tx_skb->skb = NULL; 4408 tx_skb->skb = NULL;
4430 } 4409 }
@@ -4468,27 +4447,23 @@ static inline void rtl8169_rx_csum(struct sk_buff *skb, u32 opts1)
4468 skb_checksum_none_assert(skb); 4447 skb_checksum_none_assert(skb);
4469} 4448}
4470 4449
4471static inline bool rtl8169_try_rx_copy(struct sk_buff **sk_buff, 4450static struct sk_buff *rtl8169_try_rx_copy(void *data,
4472 struct rtl8169_private *tp, int pkt_size, 4451 struct rtl8169_private *tp,
4473 dma_addr_t addr) 4452 int pkt_size,
4453 dma_addr_t addr)
4474{ 4454{
4475 struct sk_buff *skb; 4455 struct sk_buff *skb;
4476 bool done = false; 4456 struct device *d = &tp->pci_dev->dev;
4477
4478 if (pkt_size >= rx_copybreak)
4479 goto out;
4480 4457
4458 data = rtl8169_align(data);
4459 dma_sync_single_for_cpu(d, addr, pkt_size, DMA_FROM_DEVICE);
4460 prefetch(data);
4481 skb = netdev_alloc_skb_ip_align(tp->dev, pkt_size); 4461 skb = netdev_alloc_skb_ip_align(tp->dev, pkt_size);
4482 if (!skb) 4462 if (skb)
4483 goto out; 4463 memcpy(skb->data, data, pkt_size);
4464 dma_sync_single_for_device(d, addr, pkt_size, DMA_FROM_DEVICE);
4484 4465
4485 pci_dma_sync_single_for_cpu(tp->pci_dev, addr, pkt_size, 4466 return skb;
4486 PCI_DMA_FROMDEVICE);
4487 skb_copy_from_linear_data(*sk_buff, skb->data, pkt_size);
4488 *sk_buff = skb;
4489 done = true;
4490out:
4491 return done;
4492} 4467}
4493 4468
4494/* 4469/*
@@ -4503,7 +4478,7 @@ static int rtl8169_rx_interrupt(struct net_device *dev,
4503 void __iomem *ioaddr, u32 budget) 4478 void __iomem *ioaddr, u32 budget)
4504{ 4479{
4505 unsigned int cur_rx, rx_left; 4480 unsigned int cur_rx, rx_left;
4506 unsigned int delta, count; 4481 unsigned int count;
4507 int polling = (budget != ~(u32)0) ? 1 : 0; 4482 int polling = (budget != ~(u32)0) ? 1 : 0;
4508 4483
4509 cur_rx = tp->cur_rx; 4484 cur_rx = tp->cur_rx;
@@ -4532,12 +4507,11 @@ static int rtl8169_rx_interrupt(struct net_device *dev,
4532 rtl8169_schedule_work(dev, rtl8169_reset_task); 4507 rtl8169_schedule_work(dev, rtl8169_reset_task);
4533 dev->stats.rx_fifo_errors++; 4508 dev->stats.rx_fifo_errors++;
4534 } 4509 }
4535 rtl8169_mark_to_asic(desc, tp->rx_buf_sz); 4510 rtl8169_mark_to_asic(desc, rx_buf_sz);
4536 } else { 4511 } else {
4537 struct sk_buff *skb = tp->Rx_skbuff[entry]; 4512 struct sk_buff *skb;
4538 dma_addr_t addr = le64_to_cpu(desc->addr); 4513 dma_addr_t addr = le64_to_cpu(desc->addr);
4539 int pkt_size = (status & 0x00001FFF) - 4; 4514 int pkt_size = (status & 0x00001FFF) - 4;
4540 struct pci_dev *pdev = tp->pci_dev;
4541 4515
4542 /* 4516 /*
4543 * The driver does not support incoming fragmented 4517 * The driver does not support incoming fragmented
@@ -4547,18 +4521,16 @@ static int rtl8169_rx_interrupt(struct net_device *dev,
4547 if (unlikely(rtl8169_fragmented_frame(status))) { 4521 if (unlikely(rtl8169_fragmented_frame(status))) {
4548 dev->stats.rx_dropped++; 4522 dev->stats.rx_dropped++;
4549 dev->stats.rx_length_errors++; 4523 dev->stats.rx_length_errors++;
4550 rtl8169_mark_to_asic(desc, tp->rx_buf_sz); 4524 rtl8169_mark_to_asic(desc, rx_buf_sz);
4551 continue; 4525 continue;
4552 } 4526 }
4553 4527
4554 if (rtl8169_try_rx_copy(&skb, tp, pkt_size, addr)) { 4528 skb = rtl8169_try_rx_copy(tp->Rx_databuff[entry],
4555 pci_dma_sync_single_for_device(pdev, addr, 4529 tp, pkt_size, addr);
4556 pkt_size, PCI_DMA_FROMDEVICE); 4530 rtl8169_mark_to_asic(desc, rx_buf_sz);
4557 rtl8169_mark_to_asic(desc, tp->rx_buf_sz); 4531 if (!skb) {
4558 } else { 4532 dev->stats.rx_dropped++;
4559 pci_unmap_single(pdev, addr, tp->rx_buf_sz, 4533 continue;
4560 PCI_DMA_FROMDEVICE);
4561 tp->Rx_skbuff[entry] = NULL;
4562 } 4534 }
4563 4535
4564 rtl8169_rx_csum(skb, status); 4536 rtl8169_rx_csum(skb, status);
@@ -4587,20 +4559,7 @@ static int rtl8169_rx_interrupt(struct net_device *dev,
4587 count = cur_rx - tp->cur_rx; 4559 count = cur_rx - tp->cur_rx;
4588 tp->cur_rx = cur_rx; 4560 tp->cur_rx = cur_rx;
4589 4561
4590 delta = rtl8169_rx_fill(tp, dev, tp->dirty_rx, tp->cur_rx); 4562 tp->dirty_rx += count;
4591 if (!delta && count)
4592 netif_info(tp, intr, dev, "no Rx buffer allocated\n");
4593 tp->dirty_rx += delta;
4594
4595 /*
4596 * FIXME: until there is periodic timer to try and refill the ring,
4597 * a temporary shortage may definitely kill the Rx process.
4598 * - disable the asic to try and avoid an overflow and kick it again
4599 * after refill ?
4600 * - how do others driver handle this condition (Uh oh...).
4601 */
4602 if (tp->dirty_rx + NUM_RX_DESC == tp->cur_rx)
4603 netif_emerg(tp, intr, dev, "Rx buffers exhausted\n");
4604 4563
4605 return count; 4564 return count;
4606} 4565}
@@ -4629,8 +4588,7 @@ static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance)
4629 } 4588 }
4630 4589
4631 /* Work around for rx fifo overflow */ 4590 /* Work around for rx fifo overflow */
4632 if (unlikely(status & RxFIFOOver) && 4591 if (unlikely(status & RxFIFOOver)) {
4633 (tp->mac_version == RTL_GIGA_MAC_VER_11)) {
4634 netif_stop_queue(dev); 4592 netif_stop_queue(dev);
4635 rtl8169_tx_timeout(dev); 4593 rtl8169_tx_timeout(dev);
4636 break; 4594 break;
@@ -4716,7 +4674,6 @@ static void rtl8169_down(struct net_device *dev)
4716{ 4674{
4717 struct rtl8169_private *tp = netdev_priv(dev); 4675 struct rtl8169_private *tp = netdev_priv(dev);
4718 void __iomem *ioaddr = tp->mmio_addr; 4676 void __iomem *ioaddr = tp->mmio_addr;
4719 unsigned int intrmask;
4720 4677
4721 rtl8169_delete_timer(dev); 4678 rtl8169_delete_timer(dev);
4722 4679
@@ -4724,11 +4681,14 @@ static void rtl8169_down(struct net_device *dev)
4724 4681
4725 napi_disable(&tp->napi); 4682 napi_disable(&tp->napi);
4726 4683
4727core_down:
4728 spin_lock_irq(&tp->lock); 4684 spin_lock_irq(&tp->lock);
4729 4685
4730 rtl8169_asic_down(ioaddr); 4686 rtl8169_asic_down(ioaddr);
4731 4687 /*
4688 * At this point device interrupts can not be enabled in any function,
4689 * as netif_running is not true (rtl8169_interrupt, rtl8169_reset_task,
4690 * rtl8169_reinit_task) and napi is disabled (rtl8169_poll).
4691 */
4732 rtl8169_rx_missed(dev, ioaddr); 4692 rtl8169_rx_missed(dev, ioaddr);
4733 4693
4734 spin_unlock_irq(&tp->lock); 4694 spin_unlock_irq(&tp->lock);
@@ -4738,23 +4698,6 @@ core_down:
4738 /* Give a racing hard_start_xmit a few cycles to complete. */ 4698 /* Give a racing hard_start_xmit a few cycles to complete. */
4739 synchronize_sched(); /* FIXME: should this be synchronize_irq()? */ 4699 synchronize_sched(); /* FIXME: should this be synchronize_irq()? */
4740 4700
4741 /*
4742 * And now for the 50k$ question: are IRQ disabled or not ?
4743 *
4744 * Two paths lead here:
4745 * 1) dev->close
4746 * -> netif_running() is available to sync the current code and the
4747 * IRQ handler. See rtl8169_interrupt for details.
4748 * 2) dev->change_mtu
4749 * -> rtl8169_poll can not be issued again and re-enable the
4750 * interruptions. Let's simply issue the IRQ down sequence again.
4751 *
4752 * No loop if hotpluged or major error (0xffff).
4753 */
4754 intrmask = RTL_R16(IntrMask);
4755 if (intrmask && (intrmask != 0xffff))
4756 goto core_down;
4757
4758 rtl8169_tx_clear(tp); 4701 rtl8169_tx_clear(tp);
4759 4702
4760 rtl8169_rx_clear(tp); 4703 rtl8169_rx_clear(tp);
@@ -4774,10 +4717,10 @@ static int rtl8169_close(struct net_device *dev)
4774 4717
4775 free_irq(dev->irq, dev); 4718 free_irq(dev->irq, dev);
4776 4719
4777 pci_free_consistent(pdev, R8169_RX_RING_BYTES, tp->RxDescArray, 4720 dma_free_coherent(&pdev->dev, R8169_RX_RING_BYTES, tp->RxDescArray,
4778 tp->RxPhyAddr); 4721 tp->RxPhyAddr);
4779 pci_free_consistent(pdev, R8169_TX_RING_BYTES, tp->TxDescArray, 4722 dma_free_coherent(&pdev->dev, R8169_TX_RING_BYTES, tp->TxDescArray,
4780 tp->TxPhyAddr); 4723 tp->TxPhyAddr);
4781 tp->TxDescArray = NULL; 4724 tp->TxDescArray = NULL;
4782 tp->RxDescArray = NULL; 4725 tp->RxDescArray = NULL;
4783 4726
@@ -4891,6 +4834,9 @@ static int rtl8169_resume(struct device *device)
4891{ 4834{
4892 struct pci_dev *pdev = to_pci_dev(device); 4835 struct pci_dev *pdev = to_pci_dev(device);
4893 struct net_device *dev = pci_get_drvdata(pdev); 4836 struct net_device *dev = pci_get_drvdata(pdev);
4837 struct rtl8169_private *tp = netdev_priv(dev);
4838
4839 rtl8169_init_phy(dev, tp);
4894 4840
4895 if (netif_running(dev)) 4841 if (netif_running(dev))
4896 __rtl8169_resume(dev); 4842 __rtl8169_resume(dev);
@@ -4931,6 +4877,8 @@ static int rtl8169_runtime_resume(struct device *device)
4931 tp->saved_wolopts = 0; 4877 tp->saved_wolopts = 0;
4932 spin_unlock_irq(&tp->lock); 4878 spin_unlock_irq(&tp->lock);
4933 4879
4880 rtl8169_init_phy(dev, tp);
4881
4934 __rtl8169_resume(dev); 4882 __rtl8169_resume(dev);
4935 4883
4936 return 0; 4884 return 0;
diff --git a/drivers/net/rionet.c b/drivers/net/rionet.c
index 07eb884ff982..44150f2f7bfd 100644
--- a/drivers/net/rionet.c
+++ b/drivers/net/rionet.c
@@ -384,7 +384,7 @@ static void rionet_remove(struct rio_dev *rdev)
384 free_pages((unsigned long)rionet_active, rdev->net->hport->sys_size ? 384 free_pages((unsigned long)rionet_active, rdev->net->hport->sys_size ?
385 __ilog2(sizeof(void *)) + 4 : 0); 385 __ilog2(sizeof(void *)) + 4 : 0);
386 unregister_netdev(ndev); 386 unregister_netdev(ndev);
387 kfree(ndev); 387 free_netdev(ndev);
388 388
389 list_for_each_entry_safe(peer, tmp, &rionet_peers, node) { 389 list_for_each_entry_safe(peer, tmp, &rionet_peers, node) {
390 list_del(&peer->node); 390 list_del(&peer->node);
diff --git a/drivers/net/s2io.c b/drivers/net/s2io.c
index c70ad515383a..ecc25aab896a 100644
--- a/drivers/net/s2io.c
+++ b/drivers/net/s2io.c
@@ -4101,7 +4101,7 @@ static netdev_tx_t s2io_xmit(struct sk_buff *skb, struct net_device *dev)
4101 } 4101 }
4102 4102
4103 queue = 0; 4103 queue = 0;
4104 if (sp->vlgrp && vlan_tx_tag_present(skb)) 4104 if (vlan_tx_tag_present(skb))
4105 vlan_tag = vlan_tx_tag_get(skb); 4105 vlan_tag = vlan_tx_tag_get(skb);
4106 if (sp->config.tx_steering_type == TX_DEFAULT_STEERING) { 4106 if (sp->config.tx_steering_type == TX_DEFAULT_STEERING) {
4107 if (skb->protocol == htons(ETH_P_IP)) { 4107 if (skb->protocol == htons(ETH_P_IP)) {
diff --git a/drivers/net/sfc/Makefile b/drivers/net/sfc/Makefile
index 1047b19c60a5..ab31c7124db1 100644
--- a/drivers/net/sfc/Makefile
+++ b/drivers/net/sfc/Makefile
@@ -1,7 +1,8 @@
1sfc-y += efx.o nic.o falcon.o siena.o tx.o rx.o \ 1sfc-y += efx.o nic.o falcon.o siena.o tx.o rx.o filter.o \
2 falcon_gmac.o falcon_xmac.o mcdi_mac.o \ 2 falcon_xmac.o mcdi_mac.o \
3 selftest.o ethtool.o qt202x_phy.o mdio_10g.o \ 3 selftest.o ethtool.o qt202x_phy.o mdio_10g.o \
4 tenxpress.o falcon_boards.o mcdi.o mcdi_phy.o 4 tenxpress.o txc43128_phy.o falcon_boards.o \
5 mcdi.o mcdi_phy.o
5sfc-$(CONFIG_SFC_MTD) += mtd.o 6sfc-$(CONFIG_SFC_MTD) += mtd.o
6 7
7obj-$(CONFIG_SFC) += sfc.o 8obj-$(CONFIG_SFC) += sfc.o
diff --git a/drivers/net/sfc/efx.c b/drivers/net/sfc/efx.c
index f702f1fb63b6..05df20e47976 100644
--- a/drivers/net/sfc/efx.c
+++ b/drivers/net/sfc/efx.c
@@ -68,14 +68,6 @@ const char *efx_loopback_mode_names[] = {
68 [LOOPBACK_PHYXS_WS] = "PHYXS_WS", 68 [LOOPBACK_PHYXS_WS] = "PHYXS_WS",
69}; 69};
70 70
71/* Interrupt mode names (see INT_MODE())) */
72const unsigned int efx_interrupt_mode_max = EFX_INT_MODE_MAX;
73const char *efx_interrupt_mode_names[] = {
74 [EFX_INT_MODE_MSIX] = "MSI-X",
75 [EFX_INT_MODE_MSI] = "MSI",
76 [EFX_INT_MODE_LEGACY] = "legacy",
77};
78
79const unsigned int efx_reset_type_max = RESET_TYPE_MAX; 71const unsigned int efx_reset_type_max = RESET_TYPE_MAX;
80const char *efx_reset_type_names[] = { 72const char *efx_reset_type_names[] = {
81 [RESET_TYPE_INVISIBLE] = "INVISIBLE", 73 [RESET_TYPE_INVISIBLE] = "INVISIBLE",
@@ -124,10 +116,11 @@ MODULE_PARM_DESC(separate_tx_channels,
124static int napi_weight = 64; 116static int napi_weight = 64;
125 117
126/* This is the time (in jiffies) between invocations of the hardware 118/* This is the time (in jiffies) between invocations of the hardware
127 * monitor, which checks for known hardware bugs and resets the 119 * monitor. On Falcon-based NICs, this will:
128 * hardware and driver as necessary. 120 * - Check the on-board hardware monitor;
121 * - Poll the link state and reconfigure the hardware as necessary.
129 */ 122 */
130unsigned int efx_monitor_interval = 1 * HZ; 123static unsigned int efx_monitor_interval = 1 * HZ;
131 124
132/* This controls whether or not the driver will initialise devices 125/* This controls whether or not the driver will initialise devices
133 * with invalid MAC addresses stored in the EEPROM or flash. If true, 126 * with invalid MAC addresses stored in the EEPROM or flash. If true,
@@ -1314,7 +1307,8 @@ static int efx_probe_nic(struct efx_nic *efx)
1314 efx->rx_indir_table[i] = i % efx->n_rx_channels; 1307 efx->rx_indir_table[i] = i % efx->n_rx_channels;
1315 1308
1316 efx_set_channels(efx); 1309 efx_set_channels(efx);
1317 efx->net_dev->real_num_tx_queues = efx->n_tx_channels; 1310 netif_set_real_num_tx_queues(efx->net_dev, efx->n_tx_channels);
1311 netif_set_real_num_rx_queues(efx->net_dev, efx->n_rx_channels);
1318 1312
1319 /* Initialise the interrupt moderation settings */ 1313 /* Initialise the interrupt moderation settings */
1320 efx_init_irq_moderation(efx, tx_irq_mod_usec, rx_irq_mod_usec, true); 1314 efx_init_irq_moderation(efx, tx_irq_mod_usec, rx_irq_mod_usec, true);
@@ -1357,8 +1351,17 @@ static int efx_probe_all(struct efx_nic *efx)
1357 if (rc) 1351 if (rc)
1358 goto fail3; 1352 goto fail3;
1359 1353
1354 rc = efx_probe_filters(efx);
1355 if (rc) {
1356 netif_err(efx, probe, efx->net_dev,
1357 "failed to create filter tables\n");
1358 goto fail4;
1359 }
1360
1360 return 0; 1361 return 0;
1361 1362
1363 fail4:
1364 efx_remove_channels(efx);
1362 fail3: 1365 fail3:
1363 efx_remove_port(efx); 1366 efx_remove_port(efx);
1364 fail2: 1367 fail2:
@@ -1489,6 +1492,7 @@ static void efx_stop_all(struct efx_nic *efx)
1489 1492
1490static void efx_remove_all(struct efx_nic *efx) 1493static void efx_remove_all(struct efx_nic *efx)
1491{ 1494{
1495 efx_remove_filters(efx);
1492 efx_remove_channels(efx); 1496 efx_remove_channels(efx);
1493 efx_remove_port(efx); 1497 efx_remove_port(efx);
1494 efx_remove_nic(efx); 1498 efx_remove_nic(efx);
@@ -1535,8 +1539,7 @@ void efx_init_irq_moderation(struct efx_nic *efx, int tx_usecs, int rx_usecs,
1535 * 1539 *
1536 **************************************************************************/ 1540 **************************************************************************/
1537 1541
1538/* Run periodically off the general workqueue. Serialised against 1542/* Run periodically off the general workqueue */
1539 * efx_reconfigure_port via the mac_lock */
1540static void efx_monitor(struct work_struct *data) 1543static void efx_monitor(struct work_struct *data)
1541{ 1544{
1542 struct efx_nic *efx = container_of(data, struct efx_nic, 1545 struct efx_nic *efx = container_of(data, struct efx_nic,
@@ -1549,16 +1552,13 @@ static void efx_monitor(struct work_struct *data)
1549 1552
1550 /* If the mac_lock is already held then it is likely a port 1553 /* If the mac_lock is already held then it is likely a port
1551 * reconfiguration is already in place, which will likely do 1554 * reconfiguration is already in place, which will likely do
1552 * most of the work of check_hw() anyway. */ 1555 * most of the work of monitor() anyway. */
1553 if (!mutex_trylock(&efx->mac_lock)) 1556 if (mutex_trylock(&efx->mac_lock)) {
1554 goto out_requeue; 1557 if (efx->port_enabled)
1555 if (!efx->port_enabled) 1558 efx->type->monitor(efx);
1556 goto out_unlock; 1559 mutex_unlock(&efx->mac_lock);
1557 efx->type->monitor(efx); 1560 }
1558 1561
1559out_unlock:
1560 mutex_unlock(&efx->mac_lock);
1561out_requeue:
1562 queue_delayed_work(efx->workqueue, &efx->monitor_work, 1562 queue_delayed_work(efx->workqueue, &efx->monitor_work,
1563 efx_monitor_interval); 1563 efx_monitor_interval);
1564} 1564}
@@ -2002,6 +2002,7 @@ int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok)
2002 efx->mac_op->reconfigure(efx); 2002 efx->mac_op->reconfigure(efx);
2003 2003
2004 efx_init_channels(efx); 2004 efx_init_channels(efx);
2005 efx_restore_filters(efx);
2005 2006
2006 mutex_unlock(&efx->spi_lock); 2007 mutex_unlock(&efx->spi_lock);
2007 mutex_unlock(&efx->mac_lock); 2008 mutex_unlock(&efx->mac_lock);
@@ -2171,10 +2172,8 @@ int efx_port_dummy_op_int(struct efx_nic *efx)
2171 return 0; 2172 return 0;
2172} 2173}
2173void efx_port_dummy_op_void(struct efx_nic *efx) {} 2174void efx_port_dummy_op_void(struct efx_nic *efx) {}
2174void efx_port_dummy_op_set_id_led(struct efx_nic *efx, enum efx_led_mode mode) 2175
2175{ 2176static bool efx_port_dummy_op_poll(struct efx_nic *efx)
2176}
2177bool efx_port_dummy_op_poll(struct efx_nic *efx)
2178{ 2177{
2179 return false; 2178 return false;
2180} 2179}
diff --git a/drivers/net/sfc/efx.h b/drivers/net/sfc/efx.h
index e783c0fedfd8..10a1bf40da96 100644
--- a/drivers/net/sfc/efx.h
+++ b/drivers/net/sfc/efx.h
@@ -12,6 +12,7 @@
12#define EFX_EFX_H 12#define EFX_EFX_H
13 13
14#include "net_driver.h" 14#include "net_driver.h"
15#include "filter.h"
15 16
16/* PCI IDs */ 17/* PCI IDs */
17#define EFX_VENDID_SFC 0x1924 18#define EFX_VENDID_SFC 0x1924
@@ -64,6 +65,19 @@ extern void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue);
64 * skb. Falcon/A1 may require up to three descriptors per skb_frag. */ 65 * skb. Falcon/A1 may require up to three descriptors per skb_frag. */
65#define EFX_MIN_RING_SIZE (roundup_pow_of_two(2 * 3 * MAX_SKB_FRAGS)) 66#define EFX_MIN_RING_SIZE (roundup_pow_of_two(2 * 3 * MAX_SKB_FRAGS))
66 67
68/* Filters */
69extern int efx_probe_filters(struct efx_nic *efx);
70extern void efx_restore_filters(struct efx_nic *efx);
71extern void efx_remove_filters(struct efx_nic *efx);
72extern int efx_filter_insert_filter(struct efx_nic *efx,
73 struct efx_filter_spec *spec,
74 bool replace);
75extern int efx_filter_remove_filter(struct efx_nic *efx,
76 struct efx_filter_spec *spec);
77extern void efx_filter_table_clear(struct efx_nic *efx,
78 enum efx_filter_table_id table_id,
79 enum efx_filter_priority priority);
80
67/* Channels */ 81/* Channels */
68extern void efx_process_channel_now(struct efx_channel *channel); 82extern void efx_process_channel_now(struct efx_channel *channel);
69extern int 83extern int
@@ -74,10 +88,6 @@ extern int efx_reconfigure_port(struct efx_nic *efx);
74extern int __efx_reconfigure_port(struct efx_nic *efx); 88extern int __efx_reconfigure_port(struct efx_nic *efx);
75 89
76/* Ethtool support */ 90/* Ethtool support */
77extern int efx_ethtool_get_settings(struct net_device *net_dev,
78 struct ethtool_cmd *ecmd);
79extern int efx_ethtool_set_settings(struct net_device *net_dev,
80 struct ethtool_cmd *ecmd);
81extern const struct ethtool_ops efx_ethtool_ops; 91extern const struct ethtool_ops efx_ethtool_ops;
82 92
83/* Reset handling */ 93/* Reset handling */
@@ -93,9 +103,7 @@ extern void efx_init_irq_moderation(struct efx_nic *efx, int tx_usecs,
93/* Dummy PHY ops for PHY drivers */ 103/* Dummy PHY ops for PHY drivers */
94extern int efx_port_dummy_op_int(struct efx_nic *efx); 104extern int efx_port_dummy_op_int(struct efx_nic *efx);
95extern void efx_port_dummy_op_void(struct efx_nic *efx); 105extern void efx_port_dummy_op_void(struct efx_nic *efx);
96extern void 106
97efx_port_dummy_op_set_id_led(struct efx_nic *efx, enum efx_led_mode mode);
98extern bool efx_port_dummy_op_poll(struct efx_nic *efx);
99 107
100/* MTD */ 108/* MTD */
101#ifdef CONFIG_SFC_MTD 109#ifdef CONFIG_SFC_MTD
@@ -108,8 +116,6 @@ static inline void efx_mtd_rename(struct efx_nic *efx) {}
108static inline void efx_mtd_remove(struct efx_nic *efx) {} 116static inline void efx_mtd_remove(struct efx_nic *efx) {}
109#endif 117#endif
110 118
111extern unsigned int efx_monitor_interval;
112
113static inline void efx_schedule_channel(struct efx_channel *channel) 119static inline void efx_schedule_channel(struct efx_channel *channel)
114{ 120{
115 netif_vdbg(channel->efx, intr, channel->efx->net_dev, 121 netif_vdbg(channel->efx, intr, channel->efx->net_dev,
diff --git a/drivers/net/sfc/ethtool.c b/drivers/net/sfc/ethtool.c
index 7f735d804801..edb9d16b8b47 100644
--- a/drivers/net/sfc/ethtool.c
+++ b/drivers/net/sfc/ethtool.c
@@ -15,6 +15,7 @@
15#include "workarounds.h" 15#include "workarounds.h"
16#include "selftest.h" 16#include "selftest.h"
17#include "efx.h" 17#include "efx.h"
18#include "filter.h"
18#include "nic.h" 19#include "nic.h"
19#include "spi.h" 20#include "spi.h"
20#include "mdio_10g.h" 21#include "mdio_10g.h"
@@ -186,8 +187,8 @@ static int efx_ethtool_phys_id(struct net_device *net_dev, u32 count)
186} 187}
187 188
188/* This must be called with rtnl_lock held. */ 189/* This must be called with rtnl_lock held. */
189int efx_ethtool_get_settings(struct net_device *net_dev, 190static int efx_ethtool_get_settings(struct net_device *net_dev,
190 struct ethtool_cmd *ecmd) 191 struct ethtool_cmd *ecmd)
191{ 192{
192 struct efx_nic *efx = netdev_priv(net_dev); 193 struct efx_nic *efx = netdev_priv(net_dev);
193 struct efx_link_state *link_state = &efx->link_state; 194 struct efx_link_state *link_state = &efx->link_state;
@@ -210,8 +211,8 @@ int efx_ethtool_get_settings(struct net_device *net_dev,
210} 211}
211 212
212/* This must be called with rtnl_lock held. */ 213/* This must be called with rtnl_lock held. */
213int efx_ethtool_set_settings(struct net_device *net_dev, 214static int efx_ethtool_set_settings(struct net_device *net_dev,
214 struct ethtool_cmd *ecmd) 215 struct ethtool_cmd *ecmd)
215{ 216{
216 struct efx_nic *efx = netdev_priv(net_dev); 217 struct efx_nic *efx = netdev_priv(net_dev);
217 int rc; 218 int rc;
@@ -551,9 +552,22 @@ static u32 efx_ethtool_get_rx_csum(struct net_device *net_dev)
551static int efx_ethtool_set_flags(struct net_device *net_dev, u32 data) 552static int efx_ethtool_set_flags(struct net_device *net_dev, u32 data)
552{ 553{
553 struct efx_nic *efx = netdev_priv(net_dev); 554 struct efx_nic *efx = netdev_priv(net_dev);
554 u32 supported = efx->type->offload_features & ETH_FLAG_RXHASH; 555 u32 supported = (efx->type->offload_features &
556 (ETH_FLAG_RXHASH | ETH_FLAG_NTUPLE));
557 int rc;
558
559 rc = ethtool_op_set_flags(net_dev, data, supported);
560 if (rc)
561 return rc;
562
563 if (!(data & ETH_FLAG_NTUPLE)) {
564 efx_filter_table_clear(efx, EFX_FILTER_TABLE_RX_IP,
565 EFX_FILTER_PRI_MANUAL);
566 efx_filter_table_clear(efx, EFX_FILTER_TABLE_RX_MAC,
567 EFX_FILTER_PRI_MANUAL);
568 }
555 569
556 return ethtool_op_set_flags(net_dev, data, supported); 570 return 0;
557} 571}
558 572
559static void efx_ethtool_self_test(struct net_device *net_dev, 573static void efx_ethtool_self_test(struct net_device *net_dev,
@@ -877,7 +891,7 @@ static int efx_ethtool_set_wol(struct net_device *net_dev,
877 return efx->type->set_wol(efx, wol->wolopts); 891 return efx->type->set_wol(efx, wol->wolopts);
878} 892}
879 893
880extern int efx_ethtool_reset(struct net_device *net_dev, u32 *flags) 894static int efx_ethtool_reset(struct net_device *net_dev, u32 *flags)
881{ 895{
882 struct efx_nic *efx = netdev_priv(net_dev); 896 struct efx_nic *efx = netdev_priv(net_dev);
883 enum reset_type method; 897 enum reset_type method;
@@ -955,6 +969,105 @@ efx_ethtool_get_rxnfc(struct net_device *net_dev,
955 } 969 }
956} 970}
957 971
972static int efx_ethtool_set_rx_ntuple(struct net_device *net_dev,
973 struct ethtool_rx_ntuple *ntuple)
974{
975 struct efx_nic *efx = netdev_priv(net_dev);
976 struct ethtool_tcpip4_spec *ip_entry = &ntuple->fs.h_u.tcp_ip4_spec;
977 struct ethtool_tcpip4_spec *ip_mask = &ntuple->fs.m_u.tcp_ip4_spec;
978 struct ethhdr *mac_entry = &ntuple->fs.h_u.ether_spec;
979 struct ethhdr *mac_mask = &ntuple->fs.m_u.ether_spec;
980 struct efx_filter_spec filter;
981
982 /* Range-check action */
983 if (ntuple->fs.action < ETHTOOL_RXNTUPLE_ACTION_CLEAR ||
984 ntuple->fs.action >= (s32)efx->n_rx_channels)
985 return -EINVAL;
986
987 if (~ntuple->fs.data_mask)
988 return -EINVAL;
989
990 switch (ntuple->fs.flow_type) {
991 case TCP_V4_FLOW:
992 case UDP_V4_FLOW:
993 /* Must match all of destination, */
994 if (ip_mask->ip4dst | ip_mask->pdst)
995 return -EINVAL;
996 /* all or none of source, */
997 if ((ip_mask->ip4src | ip_mask->psrc) &&
998 ((__force u32)~ip_mask->ip4src |
999 (__force u16)~ip_mask->psrc))
1000 return -EINVAL;
1001 /* and nothing else */
1002 if ((u8)~ip_mask->tos | (u16)~ntuple->fs.vlan_tag_mask)
1003 return -EINVAL;
1004 break;
1005 case ETHER_FLOW:
1006 /* Must match all of destination, */
1007 if (!is_zero_ether_addr(mac_mask->h_dest))
1008 return -EINVAL;
1009 /* all or none of VID, */
1010 if (ntuple->fs.vlan_tag_mask != 0xf000 &&
1011 ntuple->fs.vlan_tag_mask != 0xffff)
1012 return -EINVAL;
1013 /* and nothing else */
1014 if (!is_broadcast_ether_addr(mac_mask->h_source) ||
1015 mac_mask->h_proto != htons(0xffff))
1016 return -EINVAL;
1017 break;
1018 default:
1019 return -EINVAL;
1020 }
1021
1022 filter.priority = EFX_FILTER_PRI_MANUAL;
1023 filter.flags = 0;
1024
1025 switch (ntuple->fs.flow_type) {
1026 case TCP_V4_FLOW:
1027 if (!ip_mask->ip4src)
1028 efx_filter_set_rx_tcp_full(&filter,
1029 htonl(ip_entry->ip4src),
1030 htons(ip_entry->psrc),
1031 htonl(ip_entry->ip4dst),
1032 htons(ip_entry->pdst));
1033 else
1034 efx_filter_set_rx_tcp_wild(&filter,
1035 htonl(ip_entry->ip4dst),
1036 htons(ip_entry->pdst));
1037 break;
1038 case UDP_V4_FLOW:
1039 if (!ip_mask->ip4src)
1040 efx_filter_set_rx_udp_full(&filter,
1041 htonl(ip_entry->ip4src),
1042 htons(ip_entry->psrc),
1043 htonl(ip_entry->ip4dst),
1044 htons(ip_entry->pdst));
1045 else
1046 efx_filter_set_rx_udp_wild(&filter,
1047 htonl(ip_entry->ip4dst),
1048 htons(ip_entry->pdst));
1049 break;
1050 case ETHER_FLOW:
1051 if (ntuple->fs.vlan_tag_mask == 0xf000)
1052 efx_filter_set_rx_mac_full(&filter,
1053 ntuple->fs.vlan_tag & 0xfff,
1054 mac_entry->h_dest);
1055 else
1056 efx_filter_set_rx_mac_wild(&filter, mac_entry->h_dest);
1057 break;
1058 }
1059
1060 if (ntuple->fs.action == ETHTOOL_RXNTUPLE_ACTION_CLEAR) {
1061 return efx_filter_remove_filter(efx, &filter);
1062 } else {
1063 if (ntuple->fs.action == ETHTOOL_RXNTUPLE_ACTION_DROP)
1064 filter.dmaq_id = 0xfff;
1065 else
1066 filter.dmaq_id = ntuple->fs.action;
1067 return efx_filter_insert_filter(efx, &filter, true);
1068 }
1069}
1070
958static int efx_ethtool_get_rxfh_indir(struct net_device *net_dev, 1071static int efx_ethtool_get_rxfh_indir(struct net_device *net_dev,
959 struct ethtool_rxfh_indir *indir) 1072 struct ethtool_rxfh_indir *indir)
960{ 1073{
@@ -1033,6 +1146,7 @@ const struct ethtool_ops efx_ethtool_ops = {
1033 .set_wol = efx_ethtool_set_wol, 1146 .set_wol = efx_ethtool_set_wol,
1034 .reset = efx_ethtool_reset, 1147 .reset = efx_ethtool_reset,
1035 .get_rxnfc = efx_ethtool_get_rxnfc, 1148 .get_rxnfc = efx_ethtool_get_rxnfc,
1149 .set_rx_ntuple = efx_ethtool_set_rx_ntuple,
1036 .get_rxfh_indir = efx_ethtool_get_rxfh_indir, 1150 .get_rxfh_indir = efx_ethtool_get_rxfh_indir,
1037 .set_rxfh_indir = efx_ethtool_set_rxfh_indir, 1151 .set_rxfh_indir = efx_ethtool_set_rxfh_indir,
1038}; 1152};
diff --git a/drivers/net/sfc/falcon.c b/drivers/net/sfc/falcon.c
index b4d8efe67772..267019bb2b15 100644
--- a/drivers/net/sfc/falcon.c
+++ b/drivers/net/sfc/falcon.c
@@ -446,30 +446,19 @@ static void falcon_reset_macs(struct efx_nic *efx)
446 /* It's not safe to use GLB_CTL_REG to reset the 446 /* It's not safe to use GLB_CTL_REG to reset the
447 * macs, so instead use the internal MAC resets 447 * macs, so instead use the internal MAC resets
448 */ 448 */
449 if (!EFX_IS10G(efx)) { 449 EFX_POPULATE_OWORD_1(reg, FRF_AB_XM_CORE_RST, 1);
450 EFX_POPULATE_OWORD_1(reg, FRF_AB_GM_SW_RST, 1); 450 efx_writeo(efx, &reg, FR_AB_XM_GLB_CFG);
451 efx_writeo(efx, &reg, FR_AB_GM_CFG1); 451
452 udelay(1000); 452 for (count = 0; count < 10000; count++) {
453 453 efx_reado(efx, &reg, FR_AB_XM_GLB_CFG);
454 EFX_POPULATE_OWORD_1(reg, FRF_AB_GM_SW_RST, 0); 454 if (EFX_OWORD_FIELD(reg, FRF_AB_XM_CORE_RST) ==
455 efx_writeo(efx, &reg, FR_AB_GM_CFG1); 455 0)
456 udelay(1000); 456 return;
457 return; 457 udelay(10);
458 } else {
459 EFX_POPULATE_OWORD_1(reg, FRF_AB_XM_CORE_RST, 1);
460 efx_writeo(efx, &reg, FR_AB_XM_GLB_CFG);
461
462 for (count = 0; count < 10000; count++) {
463 efx_reado(efx, &reg, FR_AB_XM_GLB_CFG);
464 if (EFX_OWORD_FIELD(reg, FRF_AB_XM_CORE_RST) ==
465 0)
466 return;
467 udelay(10);
468 }
469
470 netif_err(efx, hw, efx->net_dev,
471 "timed out waiting for XMAC core reset\n");
472 } 458 }
459
460 netif_err(efx, hw, efx->net_dev,
461 "timed out waiting for XMAC core reset\n");
473 } 462 }
474 463
475 /* Mac stats will fail whist the TX fifo is draining */ 464 /* Mac stats will fail whist the TX fifo is draining */
@@ -508,7 +497,6 @@ static void falcon_reset_macs(struct efx_nic *efx)
508 * are re-enabled by the caller */ 497 * are re-enabled by the caller */
509 efx_writeo(efx, &mac_ctrl, FR_AB_MAC_CTRL); 498 efx_writeo(efx, &mac_ctrl, FR_AB_MAC_CTRL);
510 499
511 /* This can run even when the GMAC is selected */
512 falcon_setup_xaui(efx); 500 falcon_setup_xaui(efx);
513} 501}
514 502
@@ -646,8 +634,6 @@ static void falcon_stats_timer_func(unsigned long context)
646 spin_unlock(&efx->stats_lock); 634 spin_unlock(&efx->stats_lock);
647} 635}
648 636
649static void falcon_switch_mac(struct efx_nic *efx);
650
651static bool falcon_loopback_link_poll(struct efx_nic *efx) 637static bool falcon_loopback_link_poll(struct efx_nic *efx)
652{ 638{
653 struct efx_link_state old_state = efx->link_state; 639 struct efx_link_state old_state = efx->link_state;
@@ -658,11 +644,7 @@ static bool falcon_loopback_link_poll(struct efx_nic *efx)
658 efx->link_state.fd = true; 644 efx->link_state.fd = true;
659 efx->link_state.fc = efx->wanted_fc; 645 efx->link_state.fc = efx->wanted_fc;
660 efx->link_state.up = true; 646 efx->link_state.up = true;
661 647 efx->link_state.speed = 10000;
662 if (efx->loopback_mode == LOOPBACK_GMAC)
663 efx->link_state.speed = 1000;
664 else
665 efx->link_state.speed = 10000;
666 648
667 return !efx_link_state_equal(&efx->link_state, &old_state); 649 return !efx_link_state_equal(&efx->link_state, &old_state);
668} 650}
@@ -685,7 +667,7 @@ static int falcon_reconfigure_port(struct efx_nic *efx)
685 falcon_stop_nic_stats(efx); 667 falcon_stop_nic_stats(efx);
686 falcon_deconfigure_mac_wrapper(efx); 668 falcon_deconfigure_mac_wrapper(efx);
687 669
688 falcon_switch_mac(efx); 670 falcon_reset_macs(efx);
689 671
690 efx->phy_op->reconfigure(efx); 672 efx->phy_op->reconfigure(efx);
691 rc = efx->mac_op->reconfigure(efx); 673 rc = efx->mac_op->reconfigure(efx);
@@ -835,73 +817,23 @@ out:
835 return rc; 817 return rc;
836} 818}
837 819
838static void falcon_clock_mac(struct efx_nic *efx)
839{
840 unsigned strap_val;
841 efx_oword_t nic_stat;
842
843 /* Configure the NIC generated MAC clock correctly */
844 efx_reado(efx, &nic_stat, FR_AB_NIC_STAT);
845 strap_val = EFX_IS10G(efx) ? 5 : 3;
846 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
847 EFX_SET_OWORD_FIELD(nic_stat, FRF_BB_EE_STRAP_EN, 1);
848 EFX_SET_OWORD_FIELD(nic_stat, FRF_BB_EE_STRAP, strap_val);
849 efx_writeo(efx, &nic_stat, FR_AB_NIC_STAT);
850 } else {
851 /* Falcon A1 does not support 1G/10G speed switching
852 * and must not be used with a PHY that does. */
853 BUG_ON(EFX_OWORD_FIELD(nic_stat, FRF_AB_STRAP_PINS) !=
854 strap_val);
855 }
856}
857
858static void falcon_switch_mac(struct efx_nic *efx)
859{
860 struct efx_mac_operations *old_mac_op = efx->mac_op;
861 struct falcon_nic_data *nic_data = efx->nic_data;
862 unsigned int stats_done_offset;
863
864 WARN_ON(!mutex_is_locked(&efx->mac_lock));
865 WARN_ON(nic_data->stats_disable_count == 0);
866
867 efx->mac_op = (EFX_IS10G(efx) ?
868 &falcon_xmac_operations : &falcon_gmac_operations);
869
870 if (EFX_IS10G(efx))
871 stats_done_offset = XgDmaDone_offset;
872 else
873 stats_done_offset = GDmaDone_offset;
874 nic_data->stats_dma_done = efx->stats_buffer.addr + stats_done_offset;
875
876 if (old_mac_op == efx->mac_op)
877 return;
878
879 falcon_clock_mac(efx);
880
881 netif_dbg(efx, hw, efx->net_dev, "selected %cMAC\n",
882 EFX_IS10G(efx) ? 'X' : 'G');
883 /* Not all macs support a mac-level link state */
884 efx->xmac_poll_required = false;
885 falcon_reset_macs(efx);
886}
887
888/* This call is responsible for hooking in the MAC and PHY operations */ 820/* This call is responsible for hooking in the MAC and PHY operations */
889static int falcon_probe_port(struct efx_nic *efx) 821static int falcon_probe_port(struct efx_nic *efx)
890{ 822{
823 struct falcon_nic_data *nic_data = efx->nic_data;
891 int rc; 824 int rc;
892 825
893 switch (efx->phy_type) { 826 switch (efx->phy_type) {
894 case PHY_TYPE_SFX7101: 827 case PHY_TYPE_SFX7101:
895 efx->phy_op = &falcon_sfx7101_phy_ops; 828 efx->phy_op = &falcon_sfx7101_phy_ops;
896 break; 829 break;
897 case PHY_TYPE_SFT9001A:
898 case PHY_TYPE_SFT9001B:
899 efx->phy_op = &falcon_sft9001_phy_ops;
900 break;
901 case PHY_TYPE_QT2022C2: 830 case PHY_TYPE_QT2022C2:
902 case PHY_TYPE_QT2025C: 831 case PHY_TYPE_QT2025C:
903 efx->phy_op = &falcon_qt202x_phy_ops; 832 efx->phy_op = &falcon_qt202x_phy_ops;
904 break; 833 break;
834 case PHY_TYPE_TXC43128:
835 efx->phy_op = &falcon_txc_phy_ops;
836 break;
905 default: 837 default:
906 netif_err(efx, probe, efx->net_dev, "Unknown PHY type %d\n", 838 netif_err(efx, probe, efx->net_dev, "Unknown PHY type %d\n",
907 efx->phy_type); 839 efx->phy_type);
@@ -937,6 +869,7 @@ static int falcon_probe_port(struct efx_nic *efx)
937 (u64)efx->stats_buffer.dma_addr, 869 (u64)efx->stats_buffer.dma_addr,
938 efx->stats_buffer.addr, 870 efx->stats_buffer.addr,
939 (u64)virt_to_phys(efx->stats_buffer.addr)); 871 (u64)virt_to_phys(efx->stats_buffer.addr));
872 nic_data->stats_dma_done = efx->stats_buffer.addr + XgDmaDone_offset;
940 873
941 return 0; 874 return 0;
942} 875}
@@ -1201,7 +1134,7 @@ static void falcon_monitor(struct efx_nic *efx)
1201 falcon_stop_nic_stats(efx); 1134 falcon_stop_nic_stats(efx);
1202 falcon_deconfigure_mac_wrapper(efx); 1135 falcon_deconfigure_mac_wrapper(efx);
1203 1136
1204 falcon_switch_mac(efx); 1137 falcon_reset_macs(efx);
1205 rc = efx->mac_op->reconfigure(efx); 1138 rc = efx->mac_op->reconfigure(efx);
1206 BUG_ON(rc); 1139 BUG_ON(rc);
1207 1140
@@ -1210,8 +1143,7 @@ static void falcon_monitor(struct efx_nic *efx)
1210 efx_link_status_changed(efx); 1143 efx_link_status_changed(efx);
1211 } 1144 }
1212 1145
1213 if (EFX_IS10G(efx)) 1146 falcon_poll_xmac(efx);
1214 falcon_poll_xmac(efx);
1215} 1147}
1216 1148
1217/* Zeroes out the SRAM contents. This routine must be called in 1149/* Zeroes out the SRAM contents. This routine must be called in
@@ -1604,16 +1536,6 @@ static int falcon_init_nic(struct efx_nic *efx)
1604 EFX_SET_OWORD_FIELD(temp, FRF_AB_ONCHIP_SRAM, 1); 1536 EFX_SET_OWORD_FIELD(temp, FRF_AB_ONCHIP_SRAM, 1);
1605 efx_writeo(efx, &temp, FR_AB_NIC_STAT); 1537 efx_writeo(efx, &temp, FR_AB_NIC_STAT);
1606 1538
1607 /* Set the source of the GMAC clock */
1608 if (efx_nic_rev(efx) == EFX_REV_FALCON_B0) {
1609 efx_reado(efx, &temp, FR_AB_GPIO_CTL);
1610 EFX_SET_OWORD_FIELD(temp, FRF_AB_USE_NIC_CLK, true);
1611 efx_writeo(efx, &temp, FR_AB_GPIO_CTL);
1612 }
1613
1614 /* Select the correct MAC */
1615 falcon_clock_mac(efx);
1616
1617 rc = falcon_reset_sram(efx); 1539 rc = falcon_reset_sram(efx);
1618 if (rc) 1540 if (rc)
1619 return rc; 1541 return rc;
@@ -1874,7 +1796,7 @@ struct efx_nic_type falcon_b0_nic_type = {
1874 * channels */ 1796 * channels */
1875 .tx_dc_base = 0x130000, 1797 .tx_dc_base = 0x130000,
1876 .rx_dc_base = 0x100000, 1798 .rx_dc_base = 0x100000,
1877 .offload_features = NETIF_F_IP_CSUM | NETIF_F_RXHASH, 1799 .offload_features = NETIF_F_IP_CSUM | NETIF_F_RXHASH | NETIF_F_NTUPLE,
1878 .reset_world_flags = ETH_RESET_IRQ, 1800 .reset_world_flags = ETH_RESET_IRQ,
1879}; 1801};
1880 1802
diff --git a/drivers/net/sfc/falcon_boards.c b/drivers/net/sfc/falcon_boards.c
index 3d950c2cf205..cfc6a5b5a477 100644
--- a/drivers/net/sfc/falcon_boards.c
+++ b/drivers/net/sfc/falcon_boards.c
@@ -26,7 +26,7 @@
26/* Board types */ 26/* Board types */
27#define FALCON_BOARD_SFE4001 0x01 27#define FALCON_BOARD_SFE4001 0x01
28#define FALCON_BOARD_SFE4002 0x02 28#define FALCON_BOARD_SFE4002 0x02
29#define FALCON_BOARD_SFN4111T 0x51 29#define FALCON_BOARD_SFE4003 0x03
30#define FALCON_BOARD_SFN4112F 0x52 30#define FALCON_BOARD_SFN4112F 0x52
31 31
32/* Board temperature is about 15°C above ambient when air flow is 32/* Board temperature is about 15°C above ambient when air flow is
@@ -142,17 +142,17 @@ static inline int efx_check_lm87(struct efx_nic *efx, unsigned mask)
142#endif /* CONFIG_SENSORS_LM87 */ 142#endif /* CONFIG_SENSORS_LM87 */
143 143
144/***************************************************************************** 144/*****************************************************************************
145 * Support for the SFE4001 and SFN4111T NICs. 145 * Support for the SFE4001 NIC.
146 * 146 *
147 * The SFE4001 does not power-up fully at reset due to its high power 147 * The SFE4001 does not power-up fully at reset due to its high power
148 * consumption. We control its power via a PCA9539 I/O expander. 148 * consumption. We control its power via a PCA9539 I/O expander.
149 * Both boards have a MAX6647 temperature monitor which we expose to 149 * It also has a MAX6647 temperature monitor which we expose to
150 * the lm90 driver. 150 * the lm90 driver.
151 * 151 *
152 * This also provides minimal support for reflashing the PHY, which is 152 * This also provides minimal support for reflashing the PHY, which is
153 * initiated by resetting it with the FLASH_CFG_1 pin pulled down. 153 * initiated by resetting it with the FLASH_CFG_1 pin pulled down.
154 * On SFE4001 rev A2 and later this is connected to the 3V3X output of 154 * On SFE4001 rev A2 and later this is connected to the 3V3X output of
155 * the IO-expander; on the SFN4111T it is connected to Falcon's GPIO3. 155 * the IO-expander.
156 * We represent reflash mode as PHY_MODE_SPECIAL and make it mutually 156 * We represent reflash mode as PHY_MODE_SPECIAL and make it mutually
157 * exclusive with the network device being open. 157 * exclusive with the network device being open.
158 */ 158 */
@@ -304,34 +304,6 @@ fail_on:
304 return rc; 304 return rc;
305} 305}
306 306
307static int sfn4111t_reset(struct efx_nic *efx)
308{
309 struct falcon_board *board = falcon_board(efx);
310 efx_oword_t reg;
311
312 /* GPIO 3 and the GPIO register are shared with I2C, so block that */
313 i2c_lock_adapter(&board->i2c_adap);
314
315 /* Pull RST_N (GPIO 2) low then let it up again, setting the
316 * FLASH_CFG_1 strap (GPIO 3) appropriately. Only change the
317 * output enables; the output levels should always be 0 (low)
318 * and we rely on external pull-ups. */
319 efx_reado(efx, &reg, FR_AB_GPIO_CTL);
320 EFX_SET_OWORD_FIELD(reg, FRF_AB_GPIO2_OEN, true);
321 efx_writeo(efx, &reg, FR_AB_GPIO_CTL);
322 msleep(1000);
323 EFX_SET_OWORD_FIELD(reg, FRF_AB_GPIO2_OEN, false);
324 EFX_SET_OWORD_FIELD(reg, FRF_AB_GPIO3_OEN,
325 !!(efx->phy_mode & PHY_MODE_SPECIAL));
326 efx_writeo(efx, &reg, FR_AB_GPIO_CTL);
327 msleep(1);
328
329 i2c_unlock_adapter(&board->i2c_adap);
330
331 ssleep(1);
332 return 0;
333}
334
335static ssize_t show_phy_flash_cfg(struct device *dev, 307static ssize_t show_phy_flash_cfg(struct device *dev,
336 struct device_attribute *attr, char *buf) 308 struct device_attribute *attr, char *buf)
337{ 309{
@@ -363,10 +335,7 @@ static ssize_t set_phy_flash_cfg(struct device *dev,
363 efx->phy_mode = new_mode; 335 efx->phy_mode = new_mode;
364 if (new_mode & PHY_MODE_SPECIAL) 336 if (new_mode & PHY_MODE_SPECIAL)
365 falcon_stop_nic_stats(efx); 337 falcon_stop_nic_stats(efx);
366 if (falcon_board(efx)->type->id == FALCON_BOARD_SFE4001) 338 err = sfe4001_poweron(efx);
367 err = sfe4001_poweron(efx);
368 else
369 err = sfn4111t_reset(efx);
370 if (!err) 339 if (!err)
371 err = efx_reconfigure_port(efx); 340 err = efx_reconfigure_port(efx);
372 if (!(new_mode & PHY_MODE_SPECIAL)) 341 if (!(new_mode & PHY_MODE_SPECIAL))
@@ -479,83 +448,6 @@ fail_hwmon:
479 return rc; 448 return rc;
480} 449}
481 450
482static int sfn4111t_check_hw(struct efx_nic *efx)
483{
484 s32 status;
485
486 /* If XAUI link is up then do not monitor */
487 if (EFX_WORKAROUND_7884(efx) && !efx->xmac_poll_required)
488 return 0;
489
490 /* Test LHIGH, RHIGH, FAULT, EOT and IOT alarms */
491 status = i2c_smbus_read_byte_data(falcon_board(efx)->hwmon_client,
492 MAX664X_REG_RSL);
493 if (status < 0)
494 return -EIO;
495 if (status & 0x57)
496 return -ERANGE;
497 return 0;
498}
499
500static void sfn4111t_fini(struct efx_nic *efx)
501{
502 netif_info(efx, drv, efx->net_dev, "%s\n", __func__);
503
504 device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_flash_cfg);
505 i2c_unregister_device(falcon_board(efx)->hwmon_client);
506}
507
508static struct i2c_board_info sfn4111t_a0_hwmon_info = {
509 I2C_BOARD_INFO("max6647", 0x4e),
510};
511
512static struct i2c_board_info sfn4111t_r5_hwmon_info = {
513 I2C_BOARD_INFO("max6646", 0x4d),
514};
515
516static void sfn4111t_init_phy(struct efx_nic *efx)
517{
518 if (!(efx->phy_mode & PHY_MODE_SPECIAL)) {
519 if (sft9001_wait_boot(efx) != -EINVAL)
520 return;
521
522 efx->phy_mode = PHY_MODE_SPECIAL;
523 falcon_stop_nic_stats(efx);
524 }
525
526 sfn4111t_reset(efx);
527 sft9001_wait_boot(efx);
528}
529
530static int sfn4111t_init(struct efx_nic *efx)
531{
532 struct falcon_board *board = falcon_board(efx);
533 int rc;
534
535 board->hwmon_client =
536 i2c_new_device(&board->i2c_adap,
537 (board->minor < 5) ?
538 &sfn4111t_a0_hwmon_info :
539 &sfn4111t_r5_hwmon_info);
540 if (!board->hwmon_client)
541 return -EIO;
542
543 rc = device_create_file(&efx->pci_dev->dev, &dev_attr_phy_flash_cfg);
544 if (rc)
545 goto fail_hwmon;
546
547 if (efx->phy_mode & PHY_MODE_SPECIAL)
548 /* PHY may not generate a 156.25 MHz clock and MAC
549 * stats fetch will fail. */
550 falcon_stop_nic_stats(efx);
551
552 return 0;
553
554fail_hwmon:
555 i2c_unregister_device(board->hwmon_client);
556 return rc;
557}
558
559/***************************************************************************** 451/*****************************************************************************
560 * Support for the SFE4002 452 * Support for the SFE4002
561 * 453 *
@@ -691,6 +583,75 @@ static int sfn4112f_init(struct efx_nic *efx)
691 return efx_init_lm87(efx, &sfn4112f_hwmon_info, sfn4112f_lm87_regs); 583 return efx_init_lm87(efx, &sfn4112f_hwmon_info, sfn4112f_lm87_regs);
692} 584}
693 585
586/*****************************************************************************
587 * Support for the SFE4003
588 *
589 */
590static u8 sfe4003_lm87_channel = 0x03; /* use AIN not FAN inputs */
591
592static const u8 sfe4003_lm87_regs[] = {
593 LM87_IN_LIMITS(0, 0x67, 0x7f), /* 2.5V: 1.5V +/- 10% */
594 LM87_IN_LIMITS(1, 0x4c, 0x5e), /* Vccp1: 1.2V +/- 10% */
595 LM87_IN_LIMITS(2, 0xac, 0xd4), /* 3.3V: 3.3V +/- 10% */
596 LM87_IN_LIMITS(4, 0xac, 0xe0), /* 12V: 10.8-14V */
597 LM87_IN_LIMITS(5, 0x3f, 0x4f), /* Vccp2: 1.0V +/- 10% */
598 LM87_TEMP_INT_LIMITS(0, 70 + FALCON_BOARD_TEMP_BIAS),
599 0
600};
601
602static struct i2c_board_info sfe4003_hwmon_info = {
603 I2C_BOARD_INFO("lm87", 0x2e),
604 .platform_data = &sfe4003_lm87_channel,
605};
606
607/* Board-specific LED info. */
608#define SFE4003_RED_LED_GPIO 11
609#define SFE4003_LED_ON 1
610#define SFE4003_LED_OFF 0
611
612static void sfe4003_set_id_led(struct efx_nic *efx, enum efx_led_mode mode)
613{
614 struct falcon_board *board = falcon_board(efx);
615
616 /* The LEDs were not wired to GPIOs before A3 */
617 if (board->minor < 3 && board->major == 0)
618 return;
619
620 falcon_txc_set_gpio_val(
621 efx, SFE4003_RED_LED_GPIO,
622 (mode == EFX_LED_ON) ? SFE4003_LED_ON : SFE4003_LED_OFF);
623}
624
625static void sfe4003_init_phy(struct efx_nic *efx)
626{
627 struct falcon_board *board = falcon_board(efx);
628
629 /* The LEDs were not wired to GPIOs before A3 */
630 if (board->minor < 3 && board->major == 0)
631 return;
632
633 falcon_txc_set_gpio_dir(efx, SFE4003_RED_LED_GPIO, TXC_GPIO_DIR_OUTPUT);
634 falcon_txc_set_gpio_val(efx, SFE4003_RED_LED_GPIO, SFE4003_LED_OFF);
635}
636
637static int sfe4003_check_hw(struct efx_nic *efx)
638{
639 struct falcon_board *board = falcon_board(efx);
640
641 /* A0/A1/A2 board rev. 4003s report a temperature fault the whole time
642 * (bad sensor) so we mask it out. */
643 unsigned alarm_mask =
644 (board->major == 0 && board->minor <= 2) ?
645 ~LM87_ALARM_TEMP_EXT1 : ~0;
646
647 return efx_check_lm87(efx, alarm_mask);
648}
649
650static int sfe4003_init(struct efx_nic *efx)
651{
652 return efx_init_lm87(efx, &sfe4003_hwmon_info, sfe4003_lm87_regs);
653}
654
694static const struct falcon_board_type board_types[] = { 655static const struct falcon_board_type board_types[] = {
695 { 656 {
696 .id = FALCON_BOARD_SFE4001, 657 .id = FALCON_BOARD_SFE4001,
@@ -713,14 +674,14 @@ static const struct falcon_board_type board_types[] = {
713 .monitor = sfe4002_check_hw, 674 .monitor = sfe4002_check_hw,
714 }, 675 },
715 { 676 {
716 .id = FALCON_BOARD_SFN4111T, 677 .id = FALCON_BOARD_SFE4003,
717 .ref_model = "SFN4111T", 678 .ref_model = "SFE4003",
718 .gen_type = "100/1000/10GBASE-T adapter", 679 .gen_type = "10GBASE-CX4 adapter",
719 .init = sfn4111t_init, 680 .init = sfe4003_init,
720 .init_phy = sfn4111t_init_phy, 681 .init_phy = sfe4003_init_phy,
721 .fini = sfn4111t_fini, 682 .fini = efx_fini_lm87,
722 .set_id_led = tenxpress_set_id_led, 683 .set_id_led = sfe4003_set_id_led,
723 .monitor = sfn4111t_check_hw, 684 .monitor = sfe4003_check_hw,
724 }, 685 },
725 { 686 {
726 .id = FALCON_BOARD_SFN4112F, 687 .id = FALCON_BOARD_SFN4112F,
diff --git a/drivers/net/sfc/falcon_gmac.c b/drivers/net/sfc/falcon_gmac.c
deleted file mode 100644
index 7dadfcbd6ce7..000000000000
--- a/drivers/net/sfc/falcon_gmac.c
+++ /dev/null
@@ -1,230 +0,0 @@
1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2006-2009 Solarflare Communications Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation, incorporated herein by reference.
9 */
10
11#include <linux/delay.h>
12#include "net_driver.h"
13#include "efx.h"
14#include "nic.h"
15#include "mac.h"
16#include "regs.h"
17#include "io.h"
18
19/**************************************************************************
20 *
21 * MAC operations
22 *
23 *************************************************************************/
24
25static int falcon_reconfigure_gmac(struct efx_nic *efx)
26{
27 struct efx_link_state *link_state = &efx->link_state;
28 bool loopback, tx_fc, rx_fc, bytemode;
29 int if_mode;
30 unsigned int max_frame_len;
31 efx_oword_t reg;
32
33 /* Configuration register 1 */
34 tx_fc = (link_state->fc & EFX_FC_TX) || !link_state->fd;
35 rx_fc = !!(link_state->fc & EFX_FC_RX);
36 loopback = (efx->loopback_mode == LOOPBACK_GMAC);
37 bytemode = (link_state->speed == 1000);
38
39 EFX_POPULATE_OWORD_5(reg,
40 FRF_AB_GM_LOOP, loopback,
41 FRF_AB_GM_TX_EN, 1,
42 FRF_AB_GM_TX_FC_EN, tx_fc,
43 FRF_AB_GM_RX_EN, 1,
44 FRF_AB_GM_RX_FC_EN, rx_fc);
45 efx_writeo(efx, &reg, FR_AB_GM_CFG1);
46 udelay(10);
47
48 /* Configuration register 2 */
49 if_mode = (bytemode) ? 2 : 1;
50 EFX_POPULATE_OWORD_5(reg,
51 FRF_AB_GM_IF_MODE, if_mode,
52 FRF_AB_GM_PAD_CRC_EN, 1,
53 FRF_AB_GM_LEN_CHK, 1,
54 FRF_AB_GM_FD, link_state->fd,
55 FRF_AB_GM_PAMBL_LEN, 0x7/*datasheet recommended */);
56
57 efx_writeo(efx, &reg, FR_AB_GM_CFG2);
58 udelay(10);
59
60 /* Max frame len register */
61 max_frame_len = EFX_MAX_FRAME_LEN(efx->net_dev->mtu);
62 EFX_POPULATE_OWORD_1(reg, FRF_AB_GM_MAX_FLEN, max_frame_len);
63 efx_writeo(efx, &reg, FR_AB_GM_MAX_FLEN);
64 udelay(10);
65
66 /* FIFO configuration register 0 */
67 EFX_POPULATE_OWORD_5(reg,
68 FRF_AB_GMF_FTFENREQ, 1,
69 FRF_AB_GMF_STFENREQ, 1,
70 FRF_AB_GMF_FRFENREQ, 1,
71 FRF_AB_GMF_SRFENREQ, 1,
72 FRF_AB_GMF_WTMENREQ, 1);
73 efx_writeo(efx, &reg, FR_AB_GMF_CFG0);
74 udelay(10);
75
76 /* FIFO configuration register 1 */
77 EFX_POPULATE_OWORD_2(reg,
78 FRF_AB_GMF_CFGFRTH, 0x12,
79 FRF_AB_GMF_CFGXOFFRTX, 0xffff);
80 efx_writeo(efx, &reg, FR_AB_GMF_CFG1);
81 udelay(10);
82
83 /* FIFO configuration register 2 */
84 EFX_POPULATE_OWORD_2(reg,
85 FRF_AB_GMF_CFGHWM, 0x3f,
86 FRF_AB_GMF_CFGLWM, 0xa);
87 efx_writeo(efx, &reg, FR_AB_GMF_CFG2);
88 udelay(10);
89
90 /* FIFO configuration register 3 */
91 EFX_POPULATE_OWORD_2(reg,
92 FRF_AB_GMF_CFGHWMFT, 0x1c,
93 FRF_AB_GMF_CFGFTTH, 0x08);
94 efx_writeo(efx, &reg, FR_AB_GMF_CFG3);
95 udelay(10);
96
97 /* FIFO configuration register 4 */
98 EFX_POPULATE_OWORD_1(reg, FRF_AB_GMF_HSTFLTRFRM_PAUSE, 1);
99 efx_writeo(efx, &reg, FR_AB_GMF_CFG4);
100 udelay(10);
101
102 /* FIFO configuration register 5 */
103 efx_reado(efx, &reg, FR_AB_GMF_CFG5);
104 EFX_SET_OWORD_FIELD(reg, FRF_AB_GMF_CFGBYTMODE, bytemode);
105 EFX_SET_OWORD_FIELD(reg, FRF_AB_GMF_CFGHDPLX, !link_state->fd);
106 EFX_SET_OWORD_FIELD(reg, FRF_AB_GMF_HSTDRPLT64, !link_state->fd);
107 EFX_SET_OWORD_FIELD(reg, FRF_AB_GMF_HSTFLTRFRMDC_PAUSE, 0);
108 efx_writeo(efx, &reg, FR_AB_GMF_CFG5);
109 udelay(10);
110
111 /* MAC address */
112 EFX_POPULATE_OWORD_4(reg,
113 FRF_AB_GM_ADR_B0, efx->net_dev->dev_addr[5],
114 FRF_AB_GM_ADR_B1, efx->net_dev->dev_addr[4],
115 FRF_AB_GM_ADR_B2, efx->net_dev->dev_addr[3],
116 FRF_AB_GM_ADR_B3, efx->net_dev->dev_addr[2]);
117 efx_writeo(efx, &reg, FR_AB_GM_ADR1);
118 udelay(10);
119 EFX_POPULATE_OWORD_2(reg,
120 FRF_AB_GM_ADR_B4, efx->net_dev->dev_addr[1],
121 FRF_AB_GM_ADR_B5, efx->net_dev->dev_addr[0]);
122 efx_writeo(efx, &reg, FR_AB_GM_ADR2);
123 udelay(10);
124
125 falcon_reconfigure_mac_wrapper(efx);
126
127 return 0;
128}
129
130static void falcon_update_stats_gmac(struct efx_nic *efx)
131{
132 struct efx_mac_stats *mac_stats = &efx->mac_stats;
133 unsigned long old_rx_pause, old_tx_pause;
134 unsigned long new_rx_pause, new_tx_pause;
135
136 /* Pause frames are erroneously counted as errors (SFC bug 3269) */
137 old_rx_pause = mac_stats->rx_pause;
138 old_tx_pause = mac_stats->tx_pause;
139
140 /* Update MAC stats from DMAed values */
141 FALCON_STAT(efx, GRxGoodOct, rx_good_bytes);
142 FALCON_STAT(efx, GRxBadOct, rx_bad_bytes);
143 FALCON_STAT(efx, GRxMissPkt, rx_missed);
144 FALCON_STAT(efx, GRxFalseCRS, rx_false_carrier);
145 FALCON_STAT(efx, GRxPausePkt, rx_pause);
146 FALCON_STAT(efx, GRxBadPkt, rx_bad);
147 FALCON_STAT(efx, GRxUcastPkt, rx_unicast);
148 FALCON_STAT(efx, GRxMcastPkt, rx_multicast);
149 FALCON_STAT(efx, GRxBcastPkt, rx_broadcast);
150 FALCON_STAT(efx, GRxGoodLt64Pkt, rx_good_lt64);
151 FALCON_STAT(efx, GRxBadLt64Pkt, rx_bad_lt64);
152 FALCON_STAT(efx, GRx64Pkt, rx_64);
153 FALCON_STAT(efx, GRx65to127Pkt, rx_65_to_127);
154 FALCON_STAT(efx, GRx128to255Pkt, rx_128_to_255);
155 FALCON_STAT(efx, GRx256to511Pkt, rx_256_to_511);
156 FALCON_STAT(efx, GRx512to1023Pkt, rx_512_to_1023);
157 FALCON_STAT(efx, GRx1024to15xxPkt, rx_1024_to_15xx);
158 FALCON_STAT(efx, GRx15xxtoJumboPkt, rx_15xx_to_jumbo);
159 FALCON_STAT(efx, GRxGtJumboPkt, rx_gtjumbo);
160 FALCON_STAT(efx, GRxFcsErr64to15xxPkt, rx_bad_64_to_15xx);
161 FALCON_STAT(efx, GRxFcsErr15xxtoJumboPkt, rx_bad_15xx_to_jumbo);
162 FALCON_STAT(efx, GRxFcsErrGtJumboPkt, rx_bad_gtjumbo);
163 FALCON_STAT(efx, GTxGoodBadOct, tx_bytes);
164 FALCON_STAT(efx, GTxGoodOct, tx_good_bytes);
165 FALCON_STAT(efx, GTxSglColPkt, tx_single_collision);
166 FALCON_STAT(efx, GTxMultColPkt, tx_multiple_collision);
167 FALCON_STAT(efx, GTxExColPkt, tx_excessive_collision);
168 FALCON_STAT(efx, GTxDefPkt, tx_deferred);
169 FALCON_STAT(efx, GTxLateCol, tx_late_collision);
170 FALCON_STAT(efx, GTxExDefPkt, tx_excessive_deferred);
171 FALCON_STAT(efx, GTxPausePkt, tx_pause);
172 FALCON_STAT(efx, GTxBadPkt, tx_bad);
173 FALCON_STAT(efx, GTxUcastPkt, tx_unicast);
174 FALCON_STAT(efx, GTxMcastPkt, tx_multicast);
175 FALCON_STAT(efx, GTxBcastPkt, tx_broadcast);
176 FALCON_STAT(efx, GTxLt64Pkt, tx_lt64);
177 FALCON_STAT(efx, GTx64Pkt, tx_64);
178 FALCON_STAT(efx, GTx65to127Pkt, tx_65_to_127);
179 FALCON_STAT(efx, GTx128to255Pkt, tx_128_to_255);
180 FALCON_STAT(efx, GTx256to511Pkt, tx_256_to_511);
181 FALCON_STAT(efx, GTx512to1023Pkt, tx_512_to_1023);
182 FALCON_STAT(efx, GTx1024to15xxPkt, tx_1024_to_15xx);
183 FALCON_STAT(efx, GTx15xxtoJumboPkt, tx_15xx_to_jumbo);
184 FALCON_STAT(efx, GTxGtJumboPkt, tx_gtjumbo);
185 FALCON_STAT(efx, GTxNonTcpUdpPkt, tx_non_tcpudp);
186 FALCON_STAT(efx, GTxMacSrcErrPkt, tx_mac_src_error);
187 FALCON_STAT(efx, GTxIpSrcErrPkt, tx_ip_src_error);
188
189 /* Pause frames are erroneously counted as errors (SFC bug 3269) */
190 new_rx_pause = mac_stats->rx_pause;
191 new_tx_pause = mac_stats->tx_pause;
192 mac_stats->rx_bad -= (new_rx_pause - old_rx_pause);
193 mac_stats->tx_bad -= (new_tx_pause - old_tx_pause);
194
195 /* Derive stats that the MAC doesn't provide directly */
196 mac_stats->tx_bad_bytes =
197 mac_stats->tx_bytes - mac_stats->tx_good_bytes;
198 mac_stats->tx_packets =
199 mac_stats->tx_lt64 + mac_stats->tx_64 +
200 mac_stats->tx_65_to_127 + mac_stats->tx_128_to_255 +
201 mac_stats->tx_256_to_511 + mac_stats->tx_512_to_1023 +
202 mac_stats->tx_1024_to_15xx + mac_stats->tx_15xx_to_jumbo +
203 mac_stats->tx_gtjumbo;
204 mac_stats->tx_collision =
205 mac_stats->tx_single_collision +
206 mac_stats->tx_multiple_collision +
207 mac_stats->tx_excessive_collision +
208 mac_stats->tx_late_collision;
209 mac_stats->rx_bytes =
210 mac_stats->rx_good_bytes + mac_stats->rx_bad_bytes;
211 mac_stats->rx_packets =
212 mac_stats->rx_good_lt64 + mac_stats->rx_bad_lt64 +
213 mac_stats->rx_64 + mac_stats->rx_65_to_127 +
214 mac_stats->rx_128_to_255 + mac_stats->rx_256_to_511 +
215 mac_stats->rx_512_to_1023 + mac_stats->rx_1024_to_15xx +
216 mac_stats->rx_15xx_to_jumbo + mac_stats->rx_gtjumbo;
217 mac_stats->rx_good = mac_stats->rx_packets - mac_stats->rx_bad;
218 mac_stats->rx_lt64 = mac_stats->rx_good_lt64 + mac_stats->rx_bad_lt64;
219}
220
221static bool falcon_gmac_check_fault(struct efx_nic *efx)
222{
223 return false;
224}
225
226struct efx_mac_operations falcon_gmac_operations = {
227 .reconfigure = falcon_reconfigure_gmac,
228 .update_stats = falcon_update_stats_gmac,
229 .check_fault = falcon_gmac_check_fault,
230};
diff --git a/drivers/net/sfc/falcon_xmac.c b/drivers/net/sfc/falcon_xmac.c
index bae656dd2c4e..b31f595ebb5b 100644
--- a/drivers/net/sfc/falcon_xmac.c
+++ b/drivers/net/sfc/falcon_xmac.c
@@ -143,7 +143,7 @@ static bool falcon_xmac_link_ok(struct efx_nic *efx)
143 efx_mdio_phyxgxs_lane_sync(efx)); 143 efx_mdio_phyxgxs_lane_sync(efx));
144} 144}
145 145
146void falcon_reconfigure_xmac_core(struct efx_nic *efx) 146static void falcon_reconfigure_xmac_core(struct efx_nic *efx)
147{ 147{
148 unsigned int max_frame_len; 148 unsigned int max_frame_len;
149 efx_oword_t reg; 149 efx_oword_t reg;
diff --git a/drivers/net/sfc/filter.c b/drivers/net/sfc/filter.c
new file mode 100644
index 000000000000..52cb6082b910
--- /dev/null
+++ b/drivers/net/sfc/filter.c
@@ -0,0 +1,454 @@
1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2010 Solarflare Communications Inc.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published
7 * by the Free Software Foundation, incorporated herein by reference.
8 */
9
10#include "efx.h"
11#include "filter.h"
12#include "io.h"
13#include "nic.h"
14#include "regs.h"
15
16/* "Fudge factors" - difference between programmed value and actual depth.
17 * Due to pipelined implementation we need to program H/W with a value that
18 * is larger than the hop limit we want.
19 */
20#define FILTER_CTL_SRCH_FUDGE_WILD 3
21#define FILTER_CTL_SRCH_FUDGE_FULL 1
22
23/* Hard maximum hop limit. Hardware will time-out beyond 200-something.
24 * We also need to avoid infinite loops in efx_filter_search() when the
25 * table is full.
26 */
27#define FILTER_CTL_SRCH_MAX 200
28
29struct efx_filter_table {
30 u32 offset; /* address of table relative to BAR */
31 unsigned size; /* number of entries */
32 unsigned step; /* step between entries */
33 unsigned used; /* number currently used */
34 unsigned long *used_bitmap;
35 struct efx_filter_spec *spec;
36};
37
38struct efx_filter_state {
39 spinlock_t lock;
40 struct efx_filter_table table[EFX_FILTER_TABLE_COUNT];
41 unsigned search_depth[EFX_FILTER_TYPE_COUNT];
42};
43
44/* The filter hash function is LFSR polynomial x^16 + x^3 + 1 of a 32-bit
45 * key derived from the n-tuple. The initial LFSR state is 0xffff. */
46static u16 efx_filter_hash(u32 key)
47{
48 u16 tmp;
49
50 /* First 16 rounds */
51 tmp = 0x1fff ^ key >> 16;
52 tmp = tmp ^ tmp >> 3 ^ tmp >> 6;
53 tmp = tmp ^ tmp >> 9;
54 /* Last 16 rounds */
55 tmp = tmp ^ tmp << 13 ^ key;
56 tmp = tmp ^ tmp >> 3 ^ tmp >> 6;
57 return tmp ^ tmp >> 9;
58}
59
60/* To allow for hash collisions, filter search continues at these
61 * increments from the first possible entry selected by the hash. */
62static u16 efx_filter_increment(u32 key)
63{
64 return key * 2 - 1;
65}
66
67static enum efx_filter_table_id
68efx_filter_type_table_id(enum efx_filter_type type)
69{
70 BUILD_BUG_ON(EFX_FILTER_TABLE_RX_IP != (EFX_FILTER_RX_TCP_FULL >> 2));
71 BUILD_BUG_ON(EFX_FILTER_TABLE_RX_IP != (EFX_FILTER_RX_TCP_WILD >> 2));
72 BUILD_BUG_ON(EFX_FILTER_TABLE_RX_IP != (EFX_FILTER_RX_UDP_FULL >> 2));
73 BUILD_BUG_ON(EFX_FILTER_TABLE_RX_IP != (EFX_FILTER_RX_UDP_WILD >> 2));
74 BUILD_BUG_ON(EFX_FILTER_TABLE_RX_MAC != (EFX_FILTER_RX_MAC_FULL >> 2));
75 BUILD_BUG_ON(EFX_FILTER_TABLE_RX_MAC != (EFX_FILTER_RX_MAC_WILD >> 2));
76 return type >> 2;
77}
78
79static void
80efx_filter_table_reset_search_depth(struct efx_filter_state *state,
81 enum efx_filter_table_id table_id)
82{
83 memset(state->search_depth + (table_id << 2), 0,
84 sizeof(state->search_depth[0]) << 2);
85}
86
87static void efx_filter_push_rx_limits(struct efx_nic *efx)
88{
89 struct efx_filter_state *state = efx->filter_state;
90 efx_oword_t filter_ctl;
91
92 efx_reado(efx, &filter_ctl, FR_BZ_RX_FILTER_CTL);
93
94 EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_TCP_FULL_SRCH_LIMIT,
95 state->search_depth[EFX_FILTER_RX_TCP_FULL] +
96 FILTER_CTL_SRCH_FUDGE_FULL);
97 EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_TCP_WILD_SRCH_LIMIT,
98 state->search_depth[EFX_FILTER_RX_TCP_WILD] +
99 FILTER_CTL_SRCH_FUDGE_WILD);
100 EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_UDP_FULL_SRCH_LIMIT,
101 state->search_depth[EFX_FILTER_RX_UDP_FULL] +
102 FILTER_CTL_SRCH_FUDGE_FULL);
103 EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_UDP_WILD_SRCH_LIMIT,
104 state->search_depth[EFX_FILTER_RX_UDP_WILD] +
105 FILTER_CTL_SRCH_FUDGE_WILD);
106
107 if (state->table[EFX_FILTER_TABLE_RX_MAC].size) {
108 EFX_SET_OWORD_FIELD(
109 filter_ctl, FRF_CZ_ETHERNET_FULL_SEARCH_LIMIT,
110 state->search_depth[EFX_FILTER_RX_MAC_FULL] +
111 FILTER_CTL_SRCH_FUDGE_FULL);
112 EFX_SET_OWORD_FIELD(
113 filter_ctl, FRF_CZ_ETHERNET_WILDCARD_SEARCH_LIMIT,
114 state->search_depth[EFX_FILTER_RX_MAC_WILD] +
115 FILTER_CTL_SRCH_FUDGE_WILD);
116 }
117
118 efx_writeo(efx, &filter_ctl, FR_BZ_RX_FILTER_CTL);
119}
120
121/* Build a filter entry and return its n-tuple key. */
122static u32 efx_filter_build(efx_oword_t *filter, struct efx_filter_spec *spec)
123{
124 u32 data3;
125
126 switch (efx_filter_type_table_id(spec->type)) {
127 case EFX_FILTER_TABLE_RX_IP: {
128 bool is_udp = (spec->type == EFX_FILTER_RX_UDP_FULL ||
129 spec->type == EFX_FILTER_RX_UDP_WILD);
130 EFX_POPULATE_OWORD_7(
131 *filter,
132 FRF_BZ_RSS_EN,
133 !!(spec->flags & EFX_FILTER_FLAG_RX_RSS),
134 FRF_BZ_SCATTER_EN,
135 !!(spec->flags & EFX_FILTER_FLAG_RX_SCATTER),
136 FRF_BZ_TCP_UDP, is_udp,
137 FRF_BZ_RXQ_ID, spec->dmaq_id,
138 EFX_DWORD_2, spec->data[2],
139 EFX_DWORD_1, spec->data[1],
140 EFX_DWORD_0, spec->data[0]);
141 data3 = is_udp;
142 break;
143 }
144
145 case EFX_FILTER_TABLE_RX_MAC: {
146 bool is_wild = spec->type == EFX_FILTER_RX_MAC_WILD;
147 EFX_POPULATE_OWORD_8(
148 *filter,
149 FRF_CZ_RMFT_RSS_EN,
150 !!(spec->flags & EFX_FILTER_FLAG_RX_RSS),
151 FRF_CZ_RMFT_SCATTER_EN,
152 !!(spec->flags & EFX_FILTER_FLAG_RX_SCATTER),
153 FRF_CZ_RMFT_IP_OVERRIDE,
154 !!(spec->flags & EFX_FILTER_FLAG_RX_OVERRIDE_IP),
155 FRF_CZ_RMFT_RXQ_ID, spec->dmaq_id,
156 FRF_CZ_RMFT_WILDCARD_MATCH, is_wild,
157 FRF_CZ_RMFT_DEST_MAC_HI, spec->data[2],
158 FRF_CZ_RMFT_DEST_MAC_LO, spec->data[1],
159 FRF_CZ_RMFT_VLAN_ID, spec->data[0]);
160 data3 = is_wild;
161 break;
162 }
163
164 default:
165 BUG();
166 }
167
168 return spec->data[0] ^ spec->data[1] ^ spec->data[2] ^ data3;
169}
170
171static bool efx_filter_equal(const struct efx_filter_spec *left,
172 const struct efx_filter_spec *right)
173{
174 if (left->type != right->type ||
175 memcmp(left->data, right->data, sizeof(left->data)))
176 return false;
177
178 return true;
179}
180
181static int efx_filter_search(struct efx_filter_table *table,
182 struct efx_filter_spec *spec, u32 key,
183 bool for_insert, int *depth_required)
184{
185 unsigned hash, incr, filter_idx, depth;
186 struct efx_filter_spec *cmp;
187
188 hash = efx_filter_hash(key);
189 incr = efx_filter_increment(key);
190
191 for (depth = 1, filter_idx = hash & (table->size - 1);
192 depth <= FILTER_CTL_SRCH_MAX &&
193 test_bit(filter_idx, table->used_bitmap);
194 ++depth) {
195 cmp = &table->spec[filter_idx];
196 if (efx_filter_equal(spec, cmp))
197 goto found;
198 filter_idx = (filter_idx + incr) & (table->size - 1);
199 }
200 if (!for_insert)
201 return -ENOENT;
202 if (depth > FILTER_CTL_SRCH_MAX)
203 return -EBUSY;
204found:
205 *depth_required = depth;
206 return filter_idx;
207}
208
209/**
210 * efx_filter_insert_filter - add or replace a filter
211 * @efx: NIC in which to insert the filter
212 * @spec: Specification for the filter
213 * @replace: Flag for whether the specified filter may replace a filter
214 * with an identical match expression and equal or lower priority
215 *
216 * On success, return the filter index within its table.
217 * On failure, return a negative error code.
218 */
219int efx_filter_insert_filter(struct efx_nic *efx, struct efx_filter_spec *spec,
220 bool replace)
221{
222 struct efx_filter_state *state = efx->filter_state;
223 enum efx_filter_table_id table_id =
224 efx_filter_type_table_id(spec->type);
225 struct efx_filter_table *table = &state->table[table_id];
226 struct efx_filter_spec *saved_spec;
227 efx_oword_t filter;
228 int filter_idx, depth;
229 u32 key;
230 int rc;
231
232 if (table->size == 0)
233 return -EINVAL;
234
235 key = efx_filter_build(&filter, spec);
236
237 netif_vdbg(efx, hw, efx->net_dev,
238 "%s: type %d search_depth=%d", __func__, spec->type,
239 state->search_depth[spec->type]);
240
241 spin_lock_bh(&state->lock);
242
243 rc = efx_filter_search(table, spec, key, true, &depth);
244 if (rc < 0)
245 goto out;
246 filter_idx = rc;
247 BUG_ON(filter_idx >= table->size);
248 saved_spec = &table->spec[filter_idx];
249
250 if (test_bit(filter_idx, table->used_bitmap)) {
251 /* Should we replace the existing filter? */
252 if (!replace) {
253 rc = -EEXIST;
254 goto out;
255 }
256 if (spec->priority < saved_spec->priority) {
257 rc = -EPERM;
258 goto out;
259 }
260 } else {
261 __set_bit(filter_idx, table->used_bitmap);
262 ++table->used;
263 }
264 *saved_spec = *spec;
265
266 if (state->search_depth[spec->type] < depth) {
267 state->search_depth[spec->type] = depth;
268 efx_filter_push_rx_limits(efx);
269 }
270
271 efx_writeo(efx, &filter, table->offset + table->step * filter_idx);
272
273 netif_vdbg(efx, hw, efx->net_dev,
274 "%s: filter type %d index %d rxq %u set",
275 __func__, spec->type, filter_idx, spec->dmaq_id);
276
277out:
278 spin_unlock_bh(&state->lock);
279 return rc;
280}
281
282static void efx_filter_table_clear_entry(struct efx_nic *efx,
283 struct efx_filter_table *table,
284 int filter_idx)
285{
286 static efx_oword_t filter;
287
288 if (test_bit(filter_idx, table->used_bitmap)) {
289 __clear_bit(filter_idx, table->used_bitmap);
290 --table->used;
291 memset(&table->spec[filter_idx], 0, sizeof(table->spec[0]));
292
293 efx_writeo(efx, &filter,
294 table->offset + table->step * filter_idx);
295 }
296}
297
298/**
299 * efx_filter_remove_filter - remove a filter by specification
300 * @efx: NIC from which to remove the filter
301 * @spec: Specification for the filter
302 *
303 * On success, return zero.
304 * On failure, return a negative error code.
305 */
306int efx_filter_remove_filter(struct efx_nic *efx, struct efx_filter_spec *spec)
307{
308 struct efx_filter_state *state = efx->filter_state;
309 enum efx_filter_table_id table_id =
310 efx_filter_type_table_id(spec->type);
311 struct efx_filter_table *table = &state->table[table_id];
312 struct efx_filter_spec *saved_spec;
313 efx_oword_t filter;
314 int filter_idx, depth;
315 u32 key;
316 int rc;
317
318 key = efx_filter_build(&filter, spec);
319
320 spin_lock_bh(&state->lock);
321
322 rc = efx_filter_search(table, spec, key, false, &depth);
323 if (rc < 0)
324 goto out;
325 filter_idx = rc;
326 saved_spec = &table->spec[filter_idx];
327
328 if (spec->priority < saved_spec->priority) {
329 rc = -EPERM;
330 goto out;
331 }
332
333 efx_filter_table_clear_entry(efx, table, filter_idx);
334 if (table->used == 0)
335 efx_filter_table_reset_search_depth(state, table_id);
336 rc = 0;
337
338out:
339 spin_unlock_bh(&state->lock);
340 return rc;
341}
342
343/**
344 * efx_filter_table_clear - remove filters from a table by priority
345 * @efx: NIC from which to remove the filters
346 * @table_id: Table from which to remove the filters
347 * @priority: Maximum priority to remove
348 */
349void efx_filter_table_clear(struct efx_nic *efx,
350 enum efx_filter_table_id table_id,
351 enum efx_filter_priority priority)
352{
353 struct efx_filter_state *state = efx->filter_state;
354 struct efx_filter_table *table = &state->table[table_id];
355 int filter_idx;
356
357 spin_lock_bh(&state->lock);
358
359 for (filter_idx = 0; filter_idx < table->size; ++filter_idx)
360 if (table->spec[filter_idx].priority <= priority)
361 efx_filter_table_clear_entry(efx, table, filter_idx);
362 if (table->used == 0)
363 efx_filter_table_reset_search_depth(state, table_id);
364
365 spin_unlock_bh(&state->lock);
366}
367
368/* Restore filter stater after reset */
369void efx_restore_filters(struct efx_nic *efx)
370{
371 struct efx_filter_state *state = efx->filter_state;
372 enum efx_filter_table_id table_id;
373 struct efx_filter_table *table;
374 efx_oword_t filter;
375 int filter_idx;
376
377 spin_lock_bh(&state->lock);
378
379 for (table_id = 0; table_id < EFX_FILTER_TABLE_COUNT; table_id++) {
380 table = &state->table[table_id];
381 for (filter_idx = 0; filter_idx < table->size; filter_idx++) {
382 if (!test_bit(filter_idx, table->used_bitmap))
383 continue;
384 efx_filter_build(&filter, &table->spec[filter_idx]);
385 efx_writeo(efx, &filter,
386 table->offset + table->step * filter_idx);
387 }
388 }
389
390 efx_filter_push_rx_limits(efx);
391
392 spin_unlock_bh(&state->lock);
393}
394
395int efx_probe_filters(struct efx_nic *efx)
396{
397 struct efx_filter_state *state;
398 struct efx_filter_table *table;
399 unsigned table_id;
400
401 state = kzalloc(sizeof(*efx->filter_state), GFP_KERNEL);
402 if (!state)
403 return -ENOMEM;
404 efx->filter_state = state;
405
406 spin_lock_init(&state->lock);
407
408 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
409 table = &state->table[EFX_FILTER_TABLE_RX_IP];
410 table->offset = FR_BZ_RX_FILTER_TBL0;
411 table->size = FR_BZ_RX_FILTER_TBL0_ROWS;
412 table->step = FR_BZ_RX_FILTER_TBL0_STEP;
413 }
414
415 if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) {
416 table = &state->table[EFX_FILTER_TABLE_RX_MAC];
417 table->offset = FR_CZ_RX_MAC_FILTER_TBL0;
418 table->size = FR_CZ_RX_MAC_FILTER_TBL0_ROWS;
419 table->step = FR_CZ_RX_MAC_FILTER_TBL0_STEP;
420 }
421
422 for (table_id = 0; table_id < EFX_FILTER_TABLE_COUNT; table_id++) {
423 table = &state->table[table_id];
424 if (table->size == 0)
425 continue;
426 table->used_bitmap = kcalloc(BITS_TO_LONGS(table->size),
427 sizeof(unsigned long),
428 GFP_KERNEL);
429 if (!table->used_bitmap)
430 goto fail;
431 table->spec = vmalloc(table->size * sizeof(*table->spec));
432 if (!table->spec)
433 goto fail;
434 memset(table->spec, 0, table->size * sizeof(*table->spec));
435 }
436
437 return 0;
438
439fail:
440 efx_remove_filters(efx);
441 return -ENOMEM;
442}
443
444void efx_remove_filters(struct efx_nic *efx)
445{
446 struct efx_filter_state *state = efx->filter_state;
447 enum efx_filter_table_id table_id;
448
449 for (table_id = 0; table_id < EFX_FILTER_TABLE_COUNT; table_id++) {
450 kfree(state->table[table_id].used_bitmap);
451 vfree(state->table[table_id].spec);
452 }
453 kfree(state);
454}
diff --git a/drivers/net/sfc/filter.h b/drivers/net/sfc/filter.h
new file mode 100644
index 000000000000..a53319ded79c
--- /dev/null
+++ b/drivers/net/sfc/filter.h
@@ -0,0 +1,189 @@
1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2010 Solarflare Communications Inc.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published
7 * by the Free Software Foundation, incorporated herein by reference.
8 */
9
10#ifndef EFX_FILTER_H
11#define EFX_FILTER_H
12
13#include <linux/types.h>
14
15enum efx_filter_table_id {
16 EFX_FILTER_TABLE_RX_IP = 0,
17 EFX_FILTER_TABLE_RX_MAC,
18 EFX_FILTER_TABLE_COUNT,
19};
20
21/**
22 * enum efx_filter_type - type of hardware filter
23 * @EFX_FILTER_RX_TCP_FULL: RX, matching TCP/IPv4 4-tuple
24 * @EFX_FILTER_RX_TCP_WILD: RX, matching TCP/IPv4 destination (host, port)
25 * @EFX_FILTER_RX_UDP_FULL: RX, matching UDP/IPv4 4-tuple
26 * @EFX_FILTER_RX_UDP_WILD: RX, matching UDP/IPv4 destination (host, port)
27 * @EFX_FILTER_RX_MAC_FULL: RX, matching Ethernet destination MAC address, VID
28 * @EFX_FILTER_RX_MAC_WILD: RX, matching Ethernet destination MAC address
29 *
30 * Falcon NICs only support the RX TCP/IPv4 and UDP/IPv4 filter types.
31 */
32enum efx_filter_type {
33 EFX_FILTER_RX_TCP_FULL = 0,
34 EFX_FILTER_RX_TCP_WILD,
35 EFX_FILTER_RX_UDP_FULL,
36 EFX_FILTER_RX_UDP_WILD,
37 EFX_FILTER_RX_MAC_FULL = 4,
38 EFX_FILTER_RX_MAC_WILD,
39 EFX_FILTER_TYPE_COUNT,
40};
41
42/**
43 * enum efx_filter_priority - priority of a hardware filter specification
44 * @EFX_FILTER_PRI_HINT: Performance hint
45 * @EFX_FILTER_PRI_MANUAL: Manually configured filter
46 * @EFX_FILTER_PRI_REQUIRED: Required for correct behaviour
47 */
48enum efx_filter_priority {
49 EFX_FILTER_PRI_HINT = 0,
50 EFX_FILTER_PRI_MANUAL,
51 EFX_FILTER_PRI_REQUIRED,
52};
53
54/**
55 * enum efx_filter_flags - flags for hardware filter specifications
56 * @EFX_FILTER_FLAG_RX_RSS: Use RSS to spread across multiple queues.
57 * By default, matching packets will be delivered only to the
58 * specified queue. If this flag is set, they will be delivered
59 * to a range of queues offset from the specified queue number
60 * according to the indirection table.
61 * @EFX_FILTER_FLAG_RX_SCATTER: Enable DMA scatter on the receiving
62 * queue.
63 * @EFX_FILTER_FLAG_RX_OVERRIDE_IP: Enables a MAC filter to override
64 * any IP filter that matches the same packet. By default, IP
65 * filters take precedence.
66 *
67 * Currently, no flags are defined for TX filters.
68 */
69enum efx_filter_flags {
70 EFX_FILTER_FLAG_RX_RSS = 0x01,
71 EFX_FILTER_FLAG_RX_SCATTER = 0x02,
72 EFX_FILTER_FLAG_RX_OVERRIDE_IP = 0x04,
73};
74
75/**
76 * struct efx_filter_spec - specification for a hardware filter
77 * @type: Type of match to be performed, from &enum efx_filter_type
78 * @priority: Priority of the filter, from &enum efx_filter_priority
79 * @flags: Miscellaneous flags, from &enum efx_filter_flags
80 * @dmaq_id: Source/target queue index
81 * @data: Match data (type-dependent)
82 *
83 * Use the efx_filter_set_*() functions to initialise the @type and
84 * @data fields.
85 */
86struct efx_filter_spec {
87 u8 type:4;
88 u8 priority:4;
89 u8 flags;
90 u16 dmaq_id;
91 u32 data[3];
92};
93
94/**
95 * efx_filter_set_rx_tcp_full - specify RX filter with TCP/IPv4 full match
96 * @spec: Specification to initialise
97 * @shost: Source host address (host byte order)
98 * @sport: Source port (host byte order)
99 * @dhost: Destination host address (host byte order)
100 * @dport: Destination port (host byte order)
101 */
102static inline void
103efx_filter_set_rx_tcp_full(struct efx_filter_spec *spec,
104 u32 shost, u16 sport, u32 dhost, u16 dport)
105{
106 spec->type = EFX_FILTER_RX_TCP_FULL;
107 spec->data[0] = sport | shost << 16;
108 spec->data[1] = dport << 16 | shost >> 16;
109 spec->data[2] = dhost;
110}
111
112/**
113 * efx_filter_set_rx_tcp_wild - specify RX filter with TCP/IPv4 wildcard match
114 * @spec: Specification to initialise
115 * @dhost: Destination host address (host byte order)
116 * @dport: Destination port (host byte order)
117 */
118static inline void
119efx_filter_set_rx_tcp_wild(struct efx_filter_spec *spec, u32 dhost, u16 dport)
120{
121 spec->type = EFX_FILTER_RX_TCP_WILD;
122 spec->data[0] = 0;
123 spec->data[1] = dport << 16;
124 spec->data[2] = dhost;
125}
126
127/**
128 * efx_filter_set_rx_udp_full - specify RX filter with UDP/IPv4 full match
129 * @spec: Specification to initialise
130 * @shost: Source host address (host byte order)
131 * @sport: Source port (host byte order)
132 * @dhost: Destination host address (host byte order)
133 * @dport: Destination port (host byte order)
134 */
135static inline void
136efx_filter_set_rx_udp_full(struct efx_filter_spec *spec,
137 u32 shost, u16 sport, u32 dhost, u16 dport)
138{
139 spec->type = EFX_FILTER_RX_UDP_FULL;
140 spec->data[0] = sport | shost << 16;
141 spec->data[1] = dport << 16 | shost >> 16;
142 spec->data[2] = dhost;
143}
144
145/**
146 * efx_filter_set_rx_udp_wild - specify RX filter with UDP/IPv4 wildcard match
147 * @spec: Specification to initialise
148 * @dhost: Destination host address (host byte order)
149 * @dport: Destination port (host byte order)
150 */
151static inline void
152efx_filter_set_rx_udp_wild(struct efx_filter_spec *spec, u32 dhost, u16 dport)
153{
154 spec->type = EFX_FILTER_RX_UDP_WILD;
155 spec->data[0] = dport;
156 spec->data[1] = 0;
157 spec->data[2] = dhost;
158}
159
160/**
161 * efx_filter_set_rx_mac_full - specify RX filter with MAC full match
162 * @spec: Specification to initialise
163 * @vid: VLAN ID
164 * @addr: Destination MAC address
165 */
166static inline void efx_filter_set_rx_mac_full(struct efx_filter_spec *spec,
167 u16 vid, const u8 *addr)
168{
169 spec->type = EFX_FILTER_RX_MAC_FULL;
170 spec->data[0] = vid;
171 spec->data[1] = addr[2] << 24 | addr[3] << 16 | addr[4] << 8 | addr[5];
172 spec->data[2] = addr[0] << 8 | addr[1];
173}
174
175/**
176 * efx_filter_set_rx_mac_full - specify RX filter with MAC wildcard match
177 * @spec: Specification to initialise
178 * @addr: Destination MAC address
179 */
180static inline void efx_filter_set_rx_mac_wild(struct efx_filter_spec *spec,
181 const u8 *addr)
182{
183 spec->type = EFX_FILTER_RX_MAC_WILD;
184 spec->data[0] = 0;
185 spec->data[1] = addr[2] << 24 | addr[3] << 16 | addr[4] << 8 | addr[5];
186 spec->data[2] = addr[0] << 8 | addr[1];
187}
188
189#endif /* EFX_FILTER_H */
diff --git a/drivers/net/sfc/mac.h b/drivers/net/sfc/mac.h
index f1aa5f374890..6886cdf87c12 100644
--- a/drivers/net/sfc/mac.h
+++ b/drivers/net/sfc/mac.h
@@ -13,10 +13,8 @@
13 13
14#include "net_driver.h" 14#include "net_driver.h"
15 15
16extern struct efx_mac_operations falcon_gmac_operations;
17extern struct efx_mac_operations falcon_xmac_operations; 16extern struct efx_mac_operations falcon_xmac_operations;
18extern struct efx_mac_operations efx_mcdi_mac_operations; 17extern struct efx_mac_operations efx_mcdi_mac_operations;
19extern void falcon_reconfigure_xmac_core(struct efx_nic *efx);
20extern int efx_mcdi_mac_stats(struct efx_nic *efx, dma_addr_t dma_addr, 18extern int efx_mcdi_mac_stats(struct efx_nic *efx, dma_addr_t dma_addr,
21 u32 dma_len, int enable, int clear); 19 u32 dma_len, int enable, int clear);
22 20
diff --git a/drivers/net/sfc/mcdi.c b/drivers/net/sfc/mcdi.c
index 3912b8fed912..12cf910c2ce7 100644
--- a/drivers/net/sfc/mcdi.c
+++ b/drivers/net/sfc/mcdi.c
@@ -1093,8 +1093,8 @@ int efx_mcdi_reset_mc(struct efx_nic *efx)
1093 return rc; 1093 return rc;
1094} 1094}
1095 1095
1096int efx_mcdi_wol_filter_set(struct efx_nic *efx, u32 type, 1096static int efx_mcdi_wol_filter_set(struct efx_nic *efx, u32 type,
1097 const u8 *mac, int *id_out) 1097 const u8 *mac, int *id_out)
1098{ 1098{
1099 u8 inbuf[MC_CMD_WOL_FILTER_SET_IN_LEN]; 1099 u8 inbuf[MC_CMD_WOL_FILTER_SET_IN_LEN];
1100 u8 outbuf[MC_CMD_WOL_FILTER_SET_OUT_LEN]; 1100 u8 outbuf[MC_CMD_WOL_FILTER_SET_OUT_LEN];
diff --git a/drivers/net/sfc/mcdi.h b/drivers/net/sfc/mcdi.h
index f1f89ad4075a..c792f1d65e48 100644
--- a/drivers/net/sfc/mcdi.h
+++ b/drivers/net/sfc/mcdi.h
@@ -121,8 +121,6 @@ extern int efx_mcdi_handle_assertion(struct efx_nic *efx);
121extern void efx_mcdi_set_id_led(struct efx_nic *efx, enum efx_led_mode mode); 121extern void efx_mcdi_set_id_led(struct efx_nic *efx, enum efx_led_mode mode);
122extern int efx_mcdi_reset_port(struct efx_nic *efx); 122extern int efx_mcdi_reset_port(struct efx_nic *efx);
123extern int efx_mcdi_reset_mc(struct efx_nic *efx); 123extern int efx_mcdi_reset_mc(struct efx_nic *efx);
124extern int efx_mcdi_wol_filter_set(struct efx_nic *efx, u32 type,
125 const u8 *mac, int *id_out);
126extern int efx_mcdi_wol_filter_set_magic(struct efx_nic *efx, 124extern int efx_mcdi_wol_filter_set_magic(struct efx_nic *efx,
127 const u8 *mac, int *id_out); 125 const u8 *mac, int *id_out);
128extern int efx_mcdi_wol_filter_get_magic(struct efx_nic *efx, int *id_out); 126extern int efx_mcdi_wol_filter_get_magic(struct efx_nic *efx, int *id_out);
diff --git a/drivers/net/sfc/mcdi_phy.c b/drivers/net/sfc/mcdi_phy.c
index 0121e71702bf..c992742446b1 100644
--- a/drivers/net/sfc/mcdi_phy.c
+++ b/drivers/net/sfc/mcdi_phy.c
@@ -713,7 +713,8 @@ static int efx_mcdi_phy_run_tests(struct efx_nic *efx, int *results,
713 return 0; 713 return 0;
714} 714}
715 715
716const char *efx_mcdi_phy_test_name(struct efx_nic *efx, unsigned int index) 716static const char *efx_mcdi_phy_test_name(struct efx_nic *efx,
717 unsigned int index)
717{ 718{
718 struct efx_mcdi_phy_data *phy_cfg = efx->phy_data; 719 struct efx_mcdi_phy_data *phy_cfg = efx->phy_data;
719 720
diff --git a/drivers/net/sfc/mdio_10g.c b/drivers/net/sfc/mdio_10g.c
index eeaf0bd64bd3..98d946020429 100644
--- a/drivers/net/sfc/mdio_10g.c
+++ b/drivers/net/sfc/mdio_10g.c
@@ -286,46 +286,24 @@ int efx_mdio_set_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd)
286 */ 286 */
287void efx_mdio_an_reconfigure(struct efx_nic *efx) 287void efx_mdio_an_reconfigure(struct efx_nic *efx)
288{ 288{
289 bool xnp = (efx->link_advertising & ADVERTISED_10000baseT_Full
290 || EFX_WORKAROUND_13204(efx));
291 int reg; 289 int reg;
292 290
293 WARN_ON(!(efx->mdio.mmds & MDIO_DEVS_AN)); 291 WARN_ON(!(efx->mdio.mmds & MDIO_DEVS_AN));
294 292
295 /* Set up the base page */ 293 /* Set up the base page */
296 reg = ADVERTISE_CSMA; 294 reg = ADVERTISE_CSMA | ADVERTISE_RESV;
297 if (efx->link_advertising & ADVERTISED_10baseT_Half)
298 reg |= ADVERTISE_10HALF;
299 if (efx->link_advertising & ADVERTISED_10baseT_Full)
300 reg |= ADVERTISE_10FULL;
301 if (efx->link_advertising & ADVERTISED_100baseT_Half)
302 reg |= ADVERTISE_100HALF;
303 if (efx->link_advertising & ADVERTISED_100baseT_Full)
304 reg |= ADVERTISE_100FULL;
305 if (xnp)
306 reg |= ADVERTISE_RESV;
307 else if (efx->link_advertising & (ADVERTISED_1000baseT_Half |
308 ADVERTISED_1000baseT_Full))
309 reg |= ADVERTISE_NPAGE;
310 if (efx->link_advertising & ADVERTISED_Pause) 295 if (efx->link_advertising & ADVERTISED_Pause)
311 reg |= ADVERTISE_PAUSE_CAP; 296 reg |= ADVERTISE_PAUSE_CAP;
312 if (efx->link_advertising & ADVERTISED_Asym_Pause) 297 if (efx->link_advertising & ADVERTISED_Asym_Pause)
313 reg |= ADVERTISE_PAUSE_ASYM; 298 reg |= ADVERTISE_PAUSE_ASYM;
314 efx_mdio_write(efx, MDIO_MMD_AN, MDIO_AN_ADVERTISE, reg); 299 efx_mdio_write(efx, MDIO_MMD_AN, MDIO_AN_ADVERTISE, reg);
315 300
316 /* Set up the (extended) next page if necessary */ 301 /* Set up the (extended) next page */
317 if (efx->phy_op->set_npage_adv) 302 efx->phy_op->set_npage_adv(efx, efx->link_advertising);
318 efx->phy_op->set_npage_adv(efx, efx->link_advertising);
319 303
320 /* Enable and restart AN */ 304 /* Enable and restart AN */
321 reg = efx_mdio_read(efx, MDIO_MMD_AN, MDIO_CTRL1); 305 reg = efx_mdio_read(efx, MDIO_MMD_AN, MDIO_CTRL1);
322 reg |= MDIO_AN_CTRL1_ENABLE; 306 reg |= MDIO_AN_CTRL1_ENABLE | MDIO_AN_CTRL1_RESTART | MDIO_AN_CTRL1_XNP;
323 if (!(EFX_WORKAROUND_15195(efx) && LOOPBACK_EXTERNAL(efx)))
324 reg |= MDIO_AN_CTRL1_RESTART;
325 if (xnp)
326 reg |= MDIO_AN_CTRL1_XNP;
327 else
328 reg &= ~MDIO_AN_CTRL1_XNP;
329 efx_mdio_write(efx, MDIO_MMD_AN, MDIO_CTRL1, reg); 307 efx_mdio_write(efx, MDIO_MMD_AN, MDIO_CTRL1, reg);
330} 308}
331 309
diff --git a/drivers/net/sfc/net_driver.h b/drivers/net/sfc/net_driver.h
index 152342dbff29..0a7e26d73b52 100644
--- a/drivers/net/sfc/net_driver.h
+++ b/drivers/net/sfc/net_driver.h
@@ -29,6 +29,7 @@
29#include <linux/device.h> 29#include <linux/device.h>
30#include <linux/highmem.h> 30#include <linux/highmem.h>
31#include <linux/workqueue.h> 31#include <linux/workqueue.h>
32#include <linux/vmalloc.h>
32#include <linux/i2c.h> 33#include <linux/i2c.h>
33 34
34#include "enum.h" 35#include "enum.h"
@@ -386,11 +387,6 @@ extern const unsigned int efx_loopback_mode_max;
386#define LOOPBACK_MODE(efx) \ 387#define LOOPBACK_MODE(efx) \
387 STRING_TABLE_LOOKUP((efx)->loopback_mode, efx_loopback_mode) 388 STRING_TABLE_LOOKUP((efx)->loopback_mode, efx_loopback_mode)
388 389
389extern const char *efx_interrupt_mode_names[];
390extern const unsigned int efx_interrupt_mode_max;
391#define INT_MODE(efx) \
392 STRING_TABLE_LOOKUP(efx->interrupt_mode, efx_interrupt_mode)
393
394extern const char *efx_reset_type_names[]; 390extern const char *efx_reset_type_names[];
395extern const unsigned int efx_reset_type_max; 391extern const unsigned int efx_reset_type_max;
396#define RESET_TYPE(type) \ 392#define RESET_TYPE(type) \
@@ -405,8 +401,6 @@ enum efx_int_mode {
405}; 401};
406#define EFX_INT_MODE_USE_MSI(x) (((x)->interrupt_mode) <= EFX_INT_MODE_MSI) 402#define EFX_INT_MODE_USE_MSI(x) (((x)->interrupt_mode) <= EFX_INT_MODE_MSI)
407 403
408#define EFX_IS10G(efx) ((efx)->link_state.speed == 10000)
409
410enum nic_state { 404enum nic_state {
411 STATE_INIT = 0, 405 STATE_INIT = 0,
412 STATE_RUNNING = 1, 406 STATE_RUNNING = 1,
@@ -619,6 +613,8 @@ union efx_multicast_hash {
619 efx_oword_t oword[EFX_MCAST_HASH_ENTRIES / sizeof(efx_oword_t) / 8]; 613 efx_oword_t oword[EFX_MCAST_HASH_ENTRIES / sizeof(efx_oword_t) / 8];
620}; 614};
621 615
616struct efx_filter_state;
617
622/** 618/**
623 * struct efx_nic - an Efx NIC 619 * struct efx_nic - an Efx NIC
624 * @name: Device name (net device name or bus id before net device registered) 620 * @name: Device name (net device name or bus id before net device registered)
@@ -799,6 +795,8 @@ struct efx_nic {
799 u64 loopback_modes; 795 u64 loopback_modes;
800 796
801 void *loopback_selftest; 797 void *loopback_selftest;
798
799 struct efx_filter_state *filter_state;
802}; 800};
803 801
804static inline int efx_dev_registered(struct efx_nic *efx) 802static inline int efx_dev_registered(struct efx_nic *efx)
@@ -982,7 +980,7 @@ static inline int efx_rx_queue_index(struct efx_rx_queue *rx_queue)
982static inline struct efx_rx_buffer *efx_rx_buffer(struct efx_rx_queue *rx_queue, 980static inline struct efx_rx_buffer *efx_rx_buffer(struct efx_rx_queue *rx_queue,
983 unsigned int index) 981 unsigned int index)
984{ 982{
985 return (&rx_queue->buffer[index]); 983 return &rx_queue->buffer[index];
986} 984}
987 985
988/* Set bit in a little-endian bitfield */ 986/* Set bit in a little-endian bitfield */
diff --git a/drivers/net/sfc/nic.c b/drivers/net/sfc/nic.c
index 6c5c0cefa9d8..41c36b9a4244 100644
--- a/drivers/net/sfc/nic.c
+++ b/drivers/net/sfc/nic.c
@@ -104,7 +104,7 @@ static inline void efx_write_buf_tbl(struct efx_nic *efx, efx_qword_t *value,
104static inline efx_qword_t *efx_event(struct efx_channel *channel, 104static inline efx_qword_t *efx_event(struct efx_channel *channel,
105 unsigned int index) 105 unsigned int index)
106{ 106{
107 return (((efx_qword_t *) (channel->eventq.addr)) + index); 107 return ((efx_qword_t *) (channel->eventq.addr)) + index;
108} 108}
109 109
110/* See if an event is present 110/* See if an event is present
@@ -119,8 +119,8 @@ static inline efx_qword_t *efx_event(struct efx_channel *channel,
119 */ 119 */
120static inline int efx_event_present(efx_qword_t *event) 120static inline int efx_event_present(efx_qword_t *event)
121{ 121{
122 return (!(EFX_DWORD_IS_ALL_ONES(event->dword[0]) | 122 return !(EFX_DWORD_IS_ALL_ONES(event->dword[0]) |
123 EFX_DWORD_IS_ALL_ONES(event->dword[1]))); 123 EFX_DWORD_IS_ALL_ONES(event->dword[1]));
124} 124}
125 125
126static bool efx_masked_compare_oword(const efx_oword_t *a, const efx_oword_t *b, 126static bool efx_masked_compare_oword(const efx_oword_t *a, const efx_oword_t *b,
@@ -347,7 +347,7 @@ void efx_nic_free_buffer(struct efx_nic *efx, struct efx_buffer *buffer)
347static inline efx_qword_t * 347static inline efx_qword_t *
348efx_tx_desc(struct efx_tx_queue *tx_queue, unsigned int index) 348efx_tx_desc(struct efx_tx_queue *tx_queue, unsigned int index)
349{ 349{
350 return (((efx_qword_t *) (tx_queue->txd.addr)) + index); 350 return ((efx_qword_t *) (tx_queue->txd.addr)) + index;
351} 351}
352 352
353/* This writes to the TX_DESC_WPTR; write pointer for TX descriptor ring */ 353/* This writes to the TX_DESC_WPTR; write pointer for TX descriptor ring */
@@ -502,7 +502,7 @@ void efx_nic_remove_tx(struct efx_tx_queue *tx_queue)
502static inline efx_qword_t * 502static inline efx_qword_t *
503efx_rx_desc(struct efx_rx_queue *rx_queue, unsigned int index) 503efx_rx_desc(struct efx_rx_queue *rx_queue, unsigned int index)
504{ 504{
505 return (((efx_qword_t *) (rx_queue->rxd.addr)) + index); 505 return ((efx_qword_t *) (rx_queue->rxd.addr)) + index;
506} 506}
507 507
508/* This creates an entry in the RX descriptor queue */ 508/* This creates an entry in the RX descriptor queue */
@@ -653,7 +653,7 @@ void efx_nic_eventq_read_ack(struct efx_channel *channel)
653} 653}
654 654
655/* Use HW to insert a SW defined event */ 655/* Use HW to insert a SW defined event */
656void efx_generate_event(struct efx_channel *channel, efx_qword_t *event) 656static void efx_generate_event(struct efx_channel *channel, efx_qword_t *event)
657{ 657{
658 efx_oword_t drv_ev_reg; 658 efx_oword_t drv_ev_reg;
659 659
@@ -1849,8 +1849,7 @@ static const struct efx_nic_reg_table efx_nic_reg_tables[] = {
1849 REGISTER_TABLE_BB_CZ(TX_DESC_PTR_TBL), 1849 REGISTER_TABLE_BB_CZ(TX_DESC_PTR_TBL),
1850 REGISTER_TABLE_AA(EVQ_PTR_TBL_KER), 1850 REGISTER_TABLE_AA(EVQ_PTR_TBL_KER),
1851 REGISTER_TABLE_BB_CZ(EVQ_PTR_TBL), 1851 REGISTER_TABLE_BB_CZ(EVQ_PTR_TBL),
1852 /* The register buffer is allocated with slab, so we can't 1852 /* We can't reasonably read all of the buffer table (up to 8MB!).
1853 * reasonably read all of the buffer table (up to 8MB!).
1854 * However this driver will only use a few entries. Reading 1853 * However this driver will only use a few entries. Reading
1855 * 1K entries allows for some expansion of queue count and 1854 * 1K entries allows for some expansion of queue count and
1856 * size before we need to change the version. */ 1855 * size before we need to change the version. */
@@ -1858,7 +1857,6 @@ static const struct efx_nic_reg_table efx_nic_reg_tables[] = {
1858 A, A, 8, 1024), 1857 A, A, 8, 1024),
1859 REGISTER_TABLE_DIMENSIONS(BUF_FULL_TBL, FR_BZ_BUF_FULL_TBL, 1858 REGISTER_TABLE_DIMENSIONS(BUF_FULL_TBL, FR_BZ_BUF_FULL_TBL,
1860 B, Z, 8, 1024), 1859 B, Z, 8, 1024),
1861 /* RX_FILTER_TBL{0,1} is huge and not used by this driver */
1862 REGISTER_TABLE_CZ(RX_MAC_FILTER_TBL0), 1860 REGISTER_TABLE_CZ(RX_MAC_FILTER_TBL0),
1863 REGISTER_TABLE_BB_CZ(TIMER_TBL), 1861 REGISTER_TABLE_BB_CZ(TIMER_TBL),
1864 REGISTER_TABLE_BB_CZ(TX_PACE_TBL), 1862 REGISTER_TABLE_BB_CZ(TX_PACE_TBL),
@@ -1868,6 +1866,7 @@ static const struct efx_nic_reg_table efx_nic_reg_tables[] = {
1868 REGISTER_TABLE_CZ(MC_TREG_SMEM), 1866 REGISTER_TABLE_CZ(MC_TREG_SMEM),
1869 /* MSIX_PBA_TABLE is not mapped */ 1867 /* MSIX_PBA_TABLE is not mapped */
1870 /* SRM_DBG is not mapped (and is redundant with BUF_FLL_TBL) */ 1868 /* SRM_DBG is not mapped (and is redundant with BUF_FLL_TBL) */
1869 REGISTER_TABLE_BZ(RX_FILTER_TBL0),
1871}; 1870};
1872 1871
1873size_t efx_nic_get_regs_len(struct efx_nic *efx) 1872size_t efx_nic_get_regs_len(struct efx_nic *efx)
diff --git a/drivers/net/sfc/phy.h b/drivers/net/sfc/phy.h
index 5bc26137257b..1dab609757fb 100644
--- a/drivers/net/sfc/phy.h
+++ b/drivers/net/sfc/phy.h
@@ -11,17 +11,12 @@
11#define EFX_PHY_H 11#define EFX_PHY_H
12 12
13/**************************************************************************** 13/****************************************************************************
14 * 10Xpress (SFX7101 and SFT9001) PHYs 14 * 10Xpress (SFX7101) PHY
15 */ 15 */
16extern struct efx_phy_operations falcon_sfx7101_phy_ops; 16extern struct efx_phy_operations falcon_sfx7101_phy_ops;
17extern struct efx_phy_operations falcon_sft9001_phy_ops;
18 17
19extern void tenxpress_set_id_led(struct efx_nic *efx, enum efx_led_mode mode); 18extern void tenxpress_set_id_led(struct efx_nic *efx, enum efx_led_mode mode);
20 19
21/* Wait for the PHY to boot. Return 0 on success, -EINVAL if the PHY failed
22 * to boot due to corrupt flash, or some other negative error code. */
23extern int sft9001_wait_boot(struct efx_nic *efx);
24
25/**************************************************************************** 20/****************************************************************************
26 * AMCC/Quake QT202x PHYs 21 * AMCC/Quake QT202x PHYs
27 */ 22 */
@@ -42,6 +37,17 @@ extern struct efx_phy_operations falcon_qt202x_phy_ops;
42extern void falcon_qt202x_set_led(struct efx_nic *p, int led, int state); 37extern void falcon_qt202x_set_led(struct efx_nic *p, int led, int state);
43 38
44/**************************************************************************** 39/****************************************************************************
40* Transwitch CX4 retimer
41*/
42extern struct efx_phy_operations falcon_txc_phy_ops;
43
44#define TXC_GPIO_DIR_INPUT 0
45#define TXC_GPIO_DIR_OUTPUT 1
46
47extern void falcon_txc_set_gpio_dir(struct efx_nic *efx, int pin, int dir);
48extern void falcon_txc_set_gpio_val(struct efx_nic *efx, int pin, int val);
49
50/****************************************************************************
45 * Siena managed PHYs 51 * Siena managed PHYs
46 */ 52 */
47extern struct efx_phy_operations efx_mcdi_phy_ops; 53extern struct efx_phy_operations efx_mcdi_phy_ops;
diff --git a/drivers/net/sfc/regs.h b/drivers/net/sfc/regs.h
index 18a3be428348..96430ed81c36 100644
--- a/drivers/net/sfc/regs.h
+++ b/drivers/net/sfc/regs.h
@@ -2893,6 +2893,20 @@
2893#define FRF_AB_XX_FORCE_SIG_WIDTH 8 2893#define FRF_AB_XX_FORCE_SIG_WIDTH 8
2894#define FFE_AB_XX_FORCE_SIG_ALL_LANES 0xff 2894#define FFE_AB_XX_FORCE_SIG_ALL_LANES 0xff
2895 2895
2896/* RX_MAC_FILTER_TBL0 */
2897/* RMFT_DEST_MAC is wider than 32 bits */
2898#define FRF_CZ_RMFT_DEST_MAC_LO_LBN 12
2899#define FRF_CZ_RMFT_DEST_MAC_LO_WIDTH 32
2900#define FRF_CZ_RMFT_DEST_MAC_HI_LBN 44
2901#define FRF_CZ_RMFT_DEST_MAC_HI_WIDTH 16
2902
2903/* TX_MAC_FILTER_TBL0 */
2904/* TMFT_SRC_MAC is wider than 32 bits */
2905#define FRF_CZ_TMFT_SRC_MAC_LO_LBN 12
2906#define FRF_CZ_TMFT_SRC_MAC_LO_WIDTH 32
2907#define FRF_CZ_TMFT_SRC_MAC_HI_LBN 44
2908#define FRF_CZ_TMFT_SRC_MAC_HI_WIDTH 16
2909
2896/* DRIVER_EV */ 2910/* DRIVER_EV */
2897/* Sub-fields of an RX flush completion event */ 2911/* Sub-fields of an RX flush completion event */
2898#define FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL_LBN 12 2912#define FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL_LBN 12
diff --git a/drivers/net/sfc/selftest.c b/drivers/net/sfc/selftest.c
index da4473b71058..0ebfb99f1299 100644
--- a/drivers/net/sfc/selftest.c
+++ b/drivers/net/sfc/selftest.c
@@ -48,6 +48,16 @@ static const unsigned char payload_source[ETH_ALEN] = {
48static const char payload_msg[] = 48static const char payload_msg[] =
49 "Hello world! This is an Efx loopback test in progress!"; 49 "Hello world! This is an Efx loopback test in progress!";
50 50
51/* Interrupt mode names */
52static const unsigned int efx_interrupt_mode_max = EFX_INT_MODE_MAX;
53static const char *efx_interrupt_mode_names[] = {
54 [EFX_INT_MODE_MSIX] = "MSI-X",
55 [EFX_INT_MODE_MSI] = "MSI",
56 [EFX_INT_MODE_LEGACY] = "legacy",
57};
58#define INT_MODE(efx) \
59 STRING_TABLE_LOOKUP(efx->interrupt_mode, efx_interrupt_mode)
60
51/** 61/**
52 * efx_loopback_state - persistent state during a loopback selftest 62 * efx_loopback_state - persistent state during a loopback selftest
53 * @flush: Drop all packets in efx_loopback_rx_packet 63 * @flush: Drop all packets in efx_loopback_rx_packet
diff --git a/drivers/net/sfc/siena.c b/drivers/net/sfc/siena.c
index 9f5368049694..45236f58a258 100644
--- a/drivers/net/sfc/siena.c
+++ b/drivers/net/sfc/siena.c
@@ -129,7 +129,7 @@ static int siena_probe_port(struct efx_nic *efx)
129 return 0; 129 return 0;
130} 130}
131 131
132void siena_remove_port(struct efx_nic *efx) 132static void siena_remove_port(struct efx_nic *efx)
133{ 133{
134 efx->phy_op->remove(efx); 134 efx->phy_op->remove(efx);
135 efx_nic_free_buffer(efx, &efx->stats_buffer); 135 efx_nic_free_buffer(efx, &efx->stats_buffer);
@@ -651,6 +651,6 @@ struct efx_nic_type siena_a0_nic_type = {
651 .tx_dc_base = 0x88000, 651 .tx_dc_base = 0x88000,
652 .rx_dc_base = 0x68000, 652 .rx_dc_base = 0x68000,
653 .offload_features = (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | 653 .offload_features = (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
654 NETIF_F_RXHASH), 654 NETIF_F_RXHASH | NETIF_F_NTUPLE),
655 .reset_world_flags = ETH_RESET_MGMT << ETH_RESET_SHARED_SHIFT, 655 .reset_world_flags = ETH_RESET_MGMT << ETH_RESET_SHARED_SHIFT,
656}; 656};
diff --git a/drivers/net/sfc/tenxpress.c b/drivers/net/sfc/tenxpress.c
index 6791be90c2fe..1bc6c48c96ee 100644
--- a/drivers/net/sfc/tenxpress.c
+++ b/drivers/net/sfc/tenxpress.c
@@ -19,10 +19,7 @@
19#include "workarounds.h" 19#include "workarounds.h"
20#include "selftest.h" 20#include "selftest.h"
21 21
22/* We expect these MMDs to be in the package. SFT9001 also has a 22/* We expect these MMDs to be in the package. */
23 * clause 22 extension MMD, but since it doesn't have all the generic
24 * MMD registers it is pointless to include it here.
25 */
26#define TENXPRESS_REQUIRED_DEVS (MDIO_DEVS_PMAPMD | \ 23#define TENXPRESS_REQUIRED_DEVS (MDIO_DEVS_PMAPMD | \
27 MDIO_DEVS_PCS | \ 24 MDIO_DEVS_PCS | \
28 MDIO_DEVS_PHYXS | \ 25 MDIO_DEVS_PHYXS | \
@@ -33,12 +30,6 @@
33 (1 << LOOPBACK_PMAPMD) | \ 30 (1 << LOOPBACK_PMAPMD) | \
34 (1 << LOOPBACK_PHYXS_WS)) 31 (1 << LOOPBACK_PHYXS_WS))
35 32
36#define SFT9001_LOOPBACKS ((1 << LOOPBACK_GPHY) | \
37 (1 << LOOPBACK_PHYXS) | \
38 (1 << LOOPBACK_PCS) | \
39 (1 << LOOPBACK_PMAPMD) | \
40 (1 << LOOPBACK_PHYXS_WS))
41
42/* We complain if we fail to see the link partner as 10G capable this many 33/* We complain if we fail to see the link partner as 10G capable this many
43 * times in a row (must be > 1 as sampling the autoneg. registers is racy) 34 * times in a row (must be > 1 as sampling the autoneg. registers is racy)
44 */ 35 */
@@ -50,9 +41,8 @@
50#define PMA_PMD_EXT_GMII_EN_WIDTH 1 41#define PMA_PMD_EXT_GMII_EN_WIDTH 1
51#define PMA_PMD_EXT_CLK_OUT_LBN 2 42#define PMA_PMD_EXT_CLK_OUT_LBN 2
52#define PMA_PMD_EXT_CLK_OUT_WIDTH 1 43#define PMA_PMD_EXT_CLK_OUT_WIDTH 1
53#define PMA_PMD_LNPGA_POWERDOWN_LBN 8 /* SFX7101 only */ 44#define PMA_PMD_LNPGA_POWERDOWN_LBN 8
54#define PMA_PMD_LNPGA_POWERDOWN_WIDTH 1 45#define PMA_PMD_LNPGA_POWERDOWN_WIDTH 1
55#define PMA_PMD_EXT_CLK312_LBN 8 /* SFT9001 only */
56#define PMA_PMD_EXT_CLK312_WIDTH 1 46#define PMA_PMD_EXT_CLK312_WIDTH 1
57#define PMA_PMD_EXT_LPOWER_LBN 12 47#define PMA_PMD_EXT_LPOWER_LBN 12
58#define PMA_PMD_EXT_LPOWER_WIDTH 1 48#define PMA_PMD_EXT_LPOWER_WIDTH 1
@@ -84,7 +74,6 @@
84#define PMA_PMD_LED_FLASH (3) 74#define PMA_PMD_LED_FLASH (3)
85#define PMA_PMD_LED_MASK 3 75#define PMA_PMD_LED_MASK 3
86/* All LEDs under hardware control */ 76/* All LEDs under hardware control */
87#define SFT9001_PMA_PMD_LED_DEFAULT 0
88/* Green and Amber under hardware control, Red off */ 77/* Green and Amber under hardware control, Red off */
89#define SFX7101_PMA_PMD_LED_DEFAULT (PMA_PMD_LED_OFF << PMA_PMD_LED_RX_LBN) 78#define SFX7101_PMA_PMD_LED_DEFAULT (PMA_PMD_LED_OFF << PMA_PMD_LED_RX_LBN)
90 79
@@ -98,31 +87,7 @@
98#define PMA_PMD_SPEED_LBN 4 87#define PMA_PMD_SPEED_LBN 4
99#define PMA_PMD_SPEED_WIDTH 4 88#define PMA_PMD_SPEED_WIDTH 4
100 89
101/* Cable diagnostics - SFT9001 only */ 90/* Misc register defines */
102#define PMA_PMD_CDIAG_CTRL_REG 49213
103#define CDIAG_CTRL_IMMED_LBN 15
104#define CDIAG_CTRL_BRK_LINK_LBN 12
105#define CDIAG_CTRL_IN_PROG_LBN 11
106#define CDIAG_CTRL_LEN_UNIT_LBN 10
107#define CDIAG_CTRL_LEN_METRES 1
108#define PMA_PMD_CDIAG_RES_REG 49174
109#define CDIAG_RES_A_LBN 12
110#define CDIAG_RES_B_LBN 8
111#define CDIAG_RES_C_LBN 4
112#define CDIAG_RES_D_LBN 0
113#define CDIAG_RES_WIDTH 4
114#define CDIAG_RES_OPEN 2
115#define CDIAG_RES_OK 1
116#define CDIAG_RES_INVALID 0
117/* Set of 4 registers for pairs A-D */
118#define PMA_PMD_CDIAG_LEN_REG 49175
119
120/* Serdes control registers - SFT9001 only */
121#define PMA_PMD_CSERDES_CTRL_REG 64258
122/* Set the 156.25 MHz output to 312.5 MHz to drive Falcon's XMAC */
123#define PMA_PMD_CSERDES_DEFAULT 0x000f
124
125/* Misc register defines - SFX7101 only */
126#define PCS_CLOCK_CTRL_REG 55297 91#define PCS_CLOCK_CTRL_REG 55297
127#define PLL312_RST_N_LBN 2 92#define PLL312_RST_N_LBN 2
128 93
@@ -185,121 +150,17 @@ struct tenxpress_phy_data {
185 int bad_lp_tries; 150 int bad_lp_tries;
186}; 151};
187 152
188static ssize_t show_phy_short_reach(struct device *dev,
189 struct device_attribute *attr, char *buf)
190{
191 struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
192 int reg;
193
194 reg = efx_mdio_read(efx, MDIO_MMD_PMAPMD, MDIO_PMA_10GBT_TXPWR);
195 return sprintf(buf, "%d\n", !!(reg & MDIO_PMA_10GBT_TXPWR_SHORT));
196}
197
198static ssize_t set_phy_short_reach(struct device *dev,
199 struct device_attribute *attr,
200 const char *buf, size_t count)
201{
202 struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
203 int rc;
204
205 rtnl_lock();
206 if (efx->state != STATE_RUNNING) {
207 rc = -EBUSY;
208 } else {
209 efx_mdio_set_flag(efx, MDIO_MMD_PMAPMD, MDIO_PMA_10GBT_TXPWR,
210 MDIO_PMA_10GBT_TXPWR_SHORT,
211 count != 0 && *buf != '0');
212 rc = efx_reconfigure_port(efx);
213 }
214 rtnl_unlock();
215
216 return rc < 0 ? rc : (ssize_t)count;
217}
218
219static DEVICE_ATTR(phy_short_reach, 0644, show_phy_short_reach,
220 set_phy_short_reach);
221
222int sft9001_wait_boot(struct efx_nic *efx)
223{
224 unsigned long timeout = jiffies + HZ + 1;
225 int boot_stat;
226
227 for (;;) {
228 boot_stat = efx_mdio_read(efx, MDIO_MMD_PCS,
229 PCS_BOOT_STATUS_REG);
230 if (boot_stat >= 0) {
231 netif_dbg(efx, hw, efx->net_dev,
232 "PHY boot status = %#x\n", boot_stat);
233 switch (boot_stat &
234 ((1 << PCS_BOOT_FATAL_ERROR_LBN) |
235 (3 << PCS_BOOT_PROGRESS_LBN) |
236 (1 << PCS_BOOT_DOWNLOAD_WAIT_LBN) |
237 (1 << PCS_BOOT_CODE_STARTED_LBN))) {
238 case ((1 << PCS_BOOT_FATAL_ERROR_LBN) |
239 (PCS_BOOT_PROGRESS_CHECKSUM <<
240 PCS_BOOT_PROGRESS_LBN)):
241 case ((1 << PCS_BOOT_FATAL_ERROR_LBN) |
242 (PCS_BOOT_PROGRESS_INIT <<
243 PCS_BOOT_PROGRESS_LBN) |
244 (1 << PCS_BOOT_DOWNLOAD_WAIT_LBN)):
245 return -EINVAL;
246 case ((PCS_BOOT_PROGRESS_WAIT_MDIO <<
247 PCS_BOOT_PROGRESS_LBN) |
248 (1 << PCS_BOOT_DOWNLOAD_WAIT_LBN)):
249 return (efx->phy_mode & PHY_MODE_SPECIAL) ?
250 0 : -EIO;
251 case ((PCS_BOOT_PROGRESS_JUMP <<
252 PCS_BOOT_PROGRESS_LBN) |
253 (1 << PCS_BOOT_CODE_STARTED_LBN)):
254 case ((PCS_BOOT_PROGRESS_JUMP <<
255 PCS_BOOT_PROGRESS_LBN) |
256 (1 << PCS_BOOT_DOWNLOAD_WAIT_LBN) |
257 (1 << PCS_BOOT_CODE_STARTED_LBN)):
258 return (efx->phy_mode & PHY_MODE_SPECIAL) ?
259 -EIO : 0;
260 default:
261 if (boot_stat & (1 << PCS_BOOT_FATAL_ERROR_LBN))
262 return -EIO;
263 break;
264 }
265 }
266
267 if (time_after_eq(jiffies, timeout))
268 return -ETIMEDOUT;
269
270 msleep(50);
271 }
272}
273
274static int tenxpress_init(struct efx_nic *efx) 153static int tenxpress_init(struct efx_nic *efx)
275{ 154{
276 int reg; 155 /* Enable 312.5 MHz clock */
277 156 efx_mdio_write(efx, MDIO_MMD_PCS, PCS_TEST_SELECT_REG,
278 if (efx->phy_type == PHY_TYPE_SFX7101) { 157 1 << CLK312_EN_LBN);
279 /* Enable 312.5 MHz clock */
280 efx_mdio_write(efx, MDIO_MMD_PCS, PCS_TEST_SELECT_REG,
281 1 << CLK312_EN_LBN);
282 } else {
283 /* Enable 312.5 MHz clock and GMII */
284 reg = efx_mdio_read(efx, MDIO_MMD_PMAPMD, PMA_PMD_XCONTROL_REG);
285 reg |= ((1 << PMA_PMD_EXT_GMII_EN_LBN) |
286 (1 << PMA_PMD_EXT_CLK_OUT_LBN) |
287 (1 << PMA_PMD_EXT_CLK312_LBN) |
288 (1 << PMA_PMD_EXT_ROBUST_LBN));
289
290 efx_mdio_write(efx, MDIO_MMD_PMAPMD, PMA_PMD_XCONTROL_REG, reg);
291 efx_mdio_set_flag(efx, MDIO_MMD_C22EXT,
292 GPHY_XCONTROL_REG, 1 << GPHY_ISOLATE_LBN,
293 false);
294 }
295 158
296 /* Set the LEDs up as: Green = Link, Amber = Link/Act, Red = Off */ 159 /* Set the LEDs up as: Green = Link, Amber = Link/Act, Red = Off */
297 if (efx->phy_type == PHY_TYPE_SFX7101) { 160 efx_mdio_set_flag(efx, MDIO_MMD_PMAPMD, PMA_PMD_LED_CTRL_REG,
298 efx_mdio_set_flag(efx, MDIO_MMD_PMAPMD, PMA_PMD_LED_CTRL_REG, 161 1 << PMA_PMA_LED_ACTIVITY_LBN, true);
299 1 << PMA_PMA_LED_ACTIVITY_LBN, true); 162 efx_mdio_write(efx, MDIO_MMD_PMAPMD, PMA_PMD_LED_OVERR_REG,
300 efx_mdio_write(efx, MDIO_MMD_PMAPMD, PMA_PMD_LED_OVERR_REG, 163 SFX7101_PMA_PMD_LED_DEFAULT);
301 SFX7101_PMA_PMD_LED_DEFAULT);
302 }
303 164
304 return 0; 165 return 0;
305} 166}
@@ -307,7 +168,6 @@ static int tenxpress_init(struct efx_nic *efx)
307static int tenxpress_phy_probe(struct efx_nic *efx) 168static int tenxpress_phy_probe(struct efx_nic *efx)
308{ 169{
309 struct tenxpress_phy_data *phy_data; 170 struct tenxpress_phy_data *phy_data;
310 int rc;
311 171
312 /* Allocate phy private storage */ 172 /* Allocate phy private storage */
313 phy_data = kzalloc(sizeof(*phy_data), GFP_KERNEL); 173 phy_data = kzalloc(sizeof(*phy_data), GFP_KERNEL);
@@ -316,42 +176,15 @@ static int tenxpress_phy_probe(struct efx_nic *efx)
316 efx->phy_data = phy_data; 176 efx->phy_data = phy_data;
317 phy_data->phy_mode = efx->phy_mode; 177 phy_data->phy_mode = efx->phy_mode;
318 178
319 /* Create any special files */ 179 efx->mdio.mmds = TENXPRESS_REQUIRED_DEVS;
320 if (efx->phy_type == PHY_TYPE_SFT9001B) { 180 efx->mdio.mode_support = MDIO_SUPPORTS_C45;
321 rc = device_create_file(&efx->pci_dev->dev,
322 &dev_attr_phy_short_reach);
323 if (rc)
324 goto fail;
325 }
326
327 if (efx->phy_type == PHY_TYPE_SFX7101) {
328 efx->mdio.mmds = TENXPRESS_REQUIRED_DEVS;
329 efx->mdio.mode_support = MDIO_SUPPORTS_C45;
330
331 efx->loopback_modes = SFX7101_LOOPBACKS | FALCON_XMAC_LOOPBACKS;
332 181
333 efx->link_advertising = (ADVERTISED_TP | ADVERTISED_Autoneg | 182 efx->loopback_modes = SFX7101_LOOPBACKS | FALCON_XMAC_LOOPBACKS;
334 ADVERTISED_10000baseT_Full);
335 } else {
336 efx->mdio.mmds = TENXPRESS_REQUIRED_DEVS;
337 efx->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
338 183
339 efx->loopback_modes = (SFT9001_LOOPBACKS | 184 efx->link_advertising = (ADVERTISED_TP | ADVERTISED_Autoneg |
340 FALCON_XMAC_LOOPBACKS | 185 ADVERTISED_10000baseT_Full);
341 FALCON_GMAC_LOOPBACKS);
342
343 efx->link_advertising = (ADVERTISED_TP | ADVERTISED_Autoneg |
344 ADVERTISED_10000baseT_Full |
345 ADVERTISED_1000baseT_Full |
346 ADVERTISED_100baseT_Full);
347 }
348 186
349 return 0; 187 return 0;
350
351fail:
352 kfree(efx->phy_data);
353 efx->phy_data = NULL;
354 return rc;
355} 188}
356 189
357static int tenxpress_phy_init(struct efx_nic *efx) 190static int tenxpress_phy_init(struct efx_nic *efx)
@@ -361,16 +194,6 @@ static int tenxpress_phy_init(struct efx_nic *efx)
361 falcon_board(efx)->type->init_phy(efx); 194 falcon_board(efx)->type->init_phy(efx);
362 195
363 if (!(efx->phy_mode & PHY_MODE_SPECIAL)) { 196 if (!(efx->phy_mode & PHY_MODE_SPECIAL)) {
364 if (efx->phy_type == PHY_TYPE_SFT9001A) {
365 int reg;
366 reg = efx_mdio_read(efx, MDIO_MMD_PMAPMD,
367 PMA_PMD_XCONTROL_REG);
368 reg |= (1 << PMA_PMD_EXT_SSR_LBN);
369 efx_mdio_write(efx, MDIO_MMD_PMAPMD,
370 PMA_PMD_XCONTROL_REG, reg);
371 mdelay(200);
372 }
373
374 rc = efx_mdio_wait_reset_mmds(efx, TENXPRESS_REQUIRED_DEVS); 197 rc = efx_mdio_wait_reset_mmds(efx, TENXPRESS_REQUIRED_DEVS);
375 if (rc < 0) 198 if (rc < 0)
376 return rc; 199 return rc;
@@ -403,7 +226,7 @@ static int tenxpress_special_reset(struct efx_nic *efx)
403{ 226{
404 int rc, reg; 227 int rc, reg;
405 228
406 /* The XGMAC clock is driven from the SFC7101/SFT9001 312MHz clock, so 229 /* The XGMAC clock is driven from the SFX7101 312MHz clock, so
407 * a special software reset can glitch the XGMAC sufficiently for stats 230 * a special software reset can glitch the XGMAC sufficiently for stats
408 * requests to fail. */ 231 * requests to fail. */
409 falcon_stop_nic_stats(efx); 232 falcon_stop_nic_stats(efx);
@@ -484,53 +307,18 @@ static bool sfx7101_link_ok(struct efx_nic *efx)
484 MDIO_DEVS_PHYXS); 307 MDIO_DEVS_PHYXS);
485} 308}
486 309
487static bool sft9001_link_ok(struct efx_nic *efx, struct ethtool_cmd *ecmd)
488{
489 u32 reg;
490
491 if (efx_phy_mode_disabled(efx->phy_mode))
492 return false;
493 else if (efx->loopback_mode == LOOPBACK_GPHY)
494 return true;
495 else if (efx->loopback_mode)
496 return efx_mdio_links_ok(efx,
497 MDIO_DEVS_PMAPMD |
498 MDIO_DEVS_PHYXS);
499
500 /* We must use the same definition of link state as LASI,
501 * otherwise we can miss a link state transition
502 */
503 if (ecmd->speed == 10000) {
504 reg = efx_mdio_read(efx, MDIO_MMD_PCS, MDIO_PCS_10GBRT_STAT1);
505 return reg & MDIO_PCS_10GBRT_STAT1_BLKLK;
506 } else {
507 reg = efx_mdio_read(efx, MDIO_MMD_C22EXT, C22EXT_STATUS_REG);
508 return reg & (1 << C22EXT_STATUS_LINK_LBN);
509 }
510}
511
512static void tenxpress_ext_loopback(struct efx_nic *efx) 310static void tenxpress_ext_loopback(struct efx_nic *efx)
513{ 311{
514 efx_mdio_set_flag(efx, MDIO_MMD_PHYXS, PHYXS_TEST1, 312 efx_mdio_set_flag(efx, MDIO_MMD_PHYXS, PHYXS_TEST1,
515 1 << LOOPBACK_NEAR_LBN, 313 1 << LOOPBACK_NEAR_LBN,
516 efx->loopback_mode == LOOPBACK_PHYXS); 314 efx->loopback_mode == LOOPBACK_PHYXS);
517 if (efx->phy_type != PHY_TYPE_SFX7101)
518 efx_mdio_set_flag(efx, MDIO_MMD_C22EXT, GPHY_XCONTROL_REG,
519 1 << GPHY_LOOPBACK_NEAR_LBN,
520 efx->loopback_mode == LOOPBACK_GPHY);
521} 315}
522 316
523static void tenxpress_low_power(struct efx_nic *efx) 317static void tenxpress_low_power(struct efx_nic *efx)
524{ 318{
525 if (efx->phy_type == PHY_TYPE_SFX7101) 319 efx_mdio_set_mmds_lpower(
526 efx_mdio_set_mmds_lpower( 320 efx, !!(efx->phy_mode & PHY_MODE_LOW_POWER),
527 efx, !!(efx->phy_mode & PHY_MODE_LOW_POWER), 321 TENXPRESS_REQUIRED_DEVS);
528 TENXPRESS_REQUIRED_DEVS);
529 else
530 efx_mdio_set_flag(
531 efx, MDIO_MMD_PMAPMD, PMA_PMD_XCONTROL_REG,
532 1 << PMA_PMD_EXT_LPOWER_LBN,
533 !!(efx->phy_mode & PHY_MODE_LOW_POWER));
534} 322}
535 323
536static int tenxpress_phy_reconfigure(struct efx_nic *efx) 324static int tenxpress_phy_reconfigure(struct efx_nic *efx)
@@ -550,12 +338,7 @@ static int tenxpress_phy_reconfigure(struct efx_nic *efx)
550 338
551 if (loop_reset || phy_mode_change) { 339 if (loop_reset || phy_mode_change) {
552 tenxpress_special_reset(efx); 340 tenxpress_special_reset(efx);
553 341 falcon_reset_xaui(efx);
554 /* Reset XAUI if we were in 10G, and are staying
555 * in 10G. If we're moving into and out of 10G
556 * then xaui will be reset anyway */
557 if (EFX_IS10G(efx))
558 falcon_reset_xaui(efx);
559 } 342 }
560 343
561 tenxpress_low_power(efx); 344 tenxpress_low_power(efx);
@@ -578,29 +361,12 @@ static bool tenxpress_phy_poll(struct efx_nic *efx)
578{ 361{
579 struct efx_link_state old_state = efx->link_state; 362 struct efx_link_state old_state = efx->link_state;
580 363
581 if (efx->phy_type == PHY_TYPE_SFX7101) { 364 efx->link_state.up = sfx7101_link_ok(efx);
582 efx->link_state.up = sfx7101_link_ok(efx); 365 efx->link_state.speed = 10000;
583 efx->link_state.speed = 10000; 366 efx->link_state.fd = true;
584 efx->link_state.fd = true; 367 efx->link_state.fc = efx_mdio_get_pause(efx);
585 efx->link_state.fc = efx_mdio_get_pause(efx);
586
587 sfx7101_check_bad_lp(efx, efx->link_state.up);
588 } else {
589 struct ethtool_cmd ecmd;
590
591 /* Check the LASI alarm first */
592 if (efx->loopback_mode == LOOPBACK_NONE &&
593 !(efx_mdio_read(efx, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_STAT) &
594 MDIO_PMA_LASI_LSALARM))
595 return false;
596 368
597 tenxpress_get_settings(efx, &ecmd); 369 sfx7101_check_bad_lp(efx, efx->link_state.up);
598
599 efx->link_state.up = sft9001_link_ok(efx, &ecmd);
600 efx->link_state.speed = ecmd.speed;
601 efx->link_state.fd = (ecmd.duplex == DUPLEX_FULL);
602 efx->link_state.fc = efx_mdio_get_pause(efx);
603 }
604 370
605 return !efx_link_state_equal(&efx->link_state, &old_state); 371 return !efx_link_state_equal(&efx->link_state, &old_state);
606} 372}
@@ -621,10 +387,6 @@ static void sfx7101_phy_fini(struct efx_nic *efx)
621 387
622static void tenxpress_phy_remove(struct efx_nic *efx) 388static void tenxpress_phy_remove(struct efx_nic *efx)
623{ 389{
624 if (efx->phy_type == PHY_TYPE_SFT9001B)
625 device_remove_file(&efx->pci_dev->dev,
626 &dev_attr_phy_short_reach);
627
628 kfree(efx->phy_data); 390 kfree(efx->phy_data);
629 efx->phy_data = NULL; 391 efx->phy_data = NULL;
630} 392}
@@ -647,10 +409,7 @@ void tenxpress_set_id_led(struct efx_nic *efx, enum efx_led_mode mode)
647 (PMA_PMD_LED_ON << PMA_PMD_LED_LINK_LBN); 409 (PMA_PMD_LED_ON << PMA_PMD_LED_LINK_LBN);
648 break; 410 break;
649 default: 411 default:
650 if (efx->phy_type == PHY_TYPE_SFX7101) 412 reg = SFX7101_PMA_PMD_LED_DEFAULT;
651 reg = SFX7101_PMA_PMD_LED_DEFAULT;
652 else
653 reg = SFT9001_PMA_PMD_LED_DEFAULT;
654 break; 413 break;
655 } 414 }
656 415
@@ -685,102 +444,12 @@ sfx7101_run_tests(struct efx_nic *efx, int *results, unsigned flags)
685 return rc; 444 return rc;
686} 445}
687 446
688static const char *const sft9001_test_names[] = {
689 "bist",
690 "cable.pairA.status",
691 "cable.pairB.status",
692 "cable.pairC.status",
693 "cable.pairD.status",
694 "cable.pairA.length",
695 "cable.pairB.length",
696 "cable.pairC.length",
697 "cable.pairD.length",
698};
699
700static const char *sft9001_test_name(struct efx_nic *efx, unsigned int index)
701{
702 if (index < ARRAY_SIZE(sft9001_test_names))
703 return sft9001_test_names[index];
704 return NULL;
705}
706
707static int sft9001_run_tests(struct efx_nic *efx, int *results, unsigned flags)
708{
709 int rc = 0, rc2, i, ctrl_reg, res_reg;
710
711 /* Initialise cable diagnostic results to unknown failure */
712 for (i = 1; i < 9; ++i)
713 results[i] = -1;
714
715 /* Run cable diagnostics; wait up to 5 seconds for them to complete.
716 * A cable fault is not a self-test failure, but a timeout is. */
717 ctrl_reg = ((1 << CDIAG_CTRL_IMMED_LBN) |
718 (CDIAG_CTRL_LEN_METRES << CDIAG_CTRL_LEN_UNIT_LBN));
719 if (flags & ETH_TEST_FL_OFFLINE) {
720 /* Break the link in order to run full diagnostics. We
721 * must reset the PHY to resume normal service. */
722 ctrl_reg |= (1 << CDIAG_CTRL_BRK_LINK_LBN);
723 }
724 efx_mdio_write(efx, MDIO_MMD_PMAPMD, PMA_PMD_CDIAG_CTRL_REG,
725 ctrl_reg);
726 i = 0;
727 while (efx_mdio_read(efx, MDIO_MMD_PMAPMD, PMA_PMD_CDIAG_CTRL_REG) &
728 (1 << CDIAG_CTRL_IN_PROG_LBN)) {
729 if (++i == 50) {
730 rc = -ETIMEDOUT;
731 goto out;
732 }
733 msleep(100);
734 }
735 res_reg = efx_mdio_read(efx, MDIO_MMD_PMAPMD, PMA_PMD_CDIAG_RES_REG);
736 for (i = 0; i < 4; i++) {
737 int pair_res =
738 (res_reg >> (CDIAG_RES_A_LBN - i * CDIAG_RES_WIDTH))
739 & ((1 << CDIAG_RES_WIDTH) - 1);
740 int len_reg = efx_mdio_read(efx, MDIO_MMD_PMAPMD,
741 PMA_PMD_CDIAG_LEN_REG + i);
742 if (pair_res == CDIAG_RES_OK)
743 results[1 + i] = 1;
744 else if (pair_res == CDIAG_RES_INVALID)
745 results[1 + i] = -1;
746 else
747 results[1 + i] = -pair_res;
748 if (pair_res != CDIAG_RES_INVALID &&
749 pair_res != CDIAG_RES_OPEN &&
750 len_reg != 0xffff)
751 results[5 + i] = len_reg;
752 }
753
754out:
755 if (flags & ETH_TEST_FL_OFFLINE) {
756 /* Reset, running the BIST and then resuming normal service. */
757 rc2 = tenxpress_special_reset(efx);
758 results[0] = rc2 ? -1 : 1;
759 if (!rc)
760 rc = rc2;
761
762 efx_mdio_an_reconfigure(efx);
763 }
764
765 return rc;
766}
767
768static void 447static void
769tenxpress_get_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd) 448tenxpress_get_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd)
770{ 449{
771 u32 adv = 0, lpa = 0; 450 u32 adv = 0, lpa = 0;
772 int reg; 451 int reg;
773 452
774 if (efx->phy_type != PHY_TYPE_SFX7101) {
775 reg = efx_mdio_read(efx, MDIO_MMD_C22EXT, C22EXT_MSTSLV_CTRL);
776 if (reg & (1 << C22EXT_MSTSLV_CTRL_ADV_1000_FD_LBN))
777 adv |= ADVERTISED_1000baseT_Full;
778 reg = efx_mdio_read(efx, MDIO_MMD_C22EXT, C22EXT_MSTSLV_STATUS);
779 if (reg & (1 << C22EXT_MSTSLV_STATUS_LP_1000_HD_LBN))
780 lpa |= ADVERTISED_1000baseT_Half;
781 if (reg & (1 << C22EXT_MSTSLV_STATUS_LP_1000_FD_LBN))
782 lpa |= ADVERTISED_1000baseT_Full;
783 }
784 reg = efx_mdio_read(efx, MDIO_MMD_AN, MDIO_AN_10GBT_CTRL); 453 reg = efx_mdio_read(efx, MDIO_MMD_AN, MDIO_AN_10GBT_CTRL);
785 if (reg & MDIO_AN_10GBT_CTRL_ADV10G) 454 if (reg & MDIO_AN_10GBT_CTRL_ADV10G)
786 adv |= ADVERTISED_10000baseT_Full; 455 adv |= ADVERTISED_10000baseT_Full;
@@ -790,23 +459,9 @@ tenxpress_get_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd)
790 459
791 mdio45_ethtool_gset_npage(&efx->mdio, ecmd, adv, lpa); 460 mdio45_ethtool_gset_npage(&efx->mdio, ecmd, adv, lpa);
792 461
793 if (efx->phy_type != PHY_TYPE_SFX7101) {
794 ecmd->supported |= (SUPPORTED_100baseT_Full |
795 SUPPORTED_1000baseT_Full);
796 if (ecmd->speed != SPEED_10000) {
797 ecmd->eth_tp_mdix =
798 (efx_mdio_read(efx, MDIO_MMD_PMAPMD,
799 PMA_PMD_XSTATUS_REG) &
800 (1 << PMA_PMD_XSTAT_MDIX_LBN))
801 ? ETH_TP_MDI_X : ETH_TP_MDI;
802 }
803 }
804
805 /* In loopback, the PHY automatically brings up the correct interface, 462 /* In loopback, the PHY automatically brings up the correct interface,
806 * but doesn't advertise the correct speed. So override it */ 463 * but doesn't advertise the correct speed. So override it */
807 if (efx->loopback_mode == LOOPBACK_GPHY) 464 if (LOOPBACK_EXTERNAL(efx))
808 ecmd->speed = SPEED_1000;
809 else if (LOOPBACK_EXTERNAL(efx))
810 ecmd->speed = SPEED_10000; 465 ecmd->speed = SPEED_10000;
811} 466}
812 467
@@ -825,16 +480,6 @@ static void sfx7101_set_npage_adv(struct efx_nic *efx, u32 advertising)
825 advertising & ADVERTISED_10000baseT_Full); 480 advertising & ADVERTISED_10000baseT_Full);
826} 481}
827 482
828static void sft9001_set_npage_adv(struct efx_nic *efx, u32 advertising)
829{
830 efx_mdio_set_flag(efx, MDIO_MMD_C22EXT, C22EXT_MSTSLV_CTRL,
831 1 << C22EXT_MSTSLV_CTRL_ADV_1000_FD_LBN,
832 advertising & ADVERTISED_1000baseT_Full);
833 efx_mdio_set_flag(efx, MDIO_MMD_AN, MDIO_AN_10GBT_CTRL,
834 MDIO_AN_10GBT_CTRL_ADV10G,
835 advertising & ADVERTISED_10000baseT_Full);
836}
837
838struct efx_phy_operations falcon_sfx7101_phy_ops = { 483struct efx_phy_operations falcon_sfx7101_phy_ops = {
839 .probe = tenxpress_phy_probe, 484 .probe = tenxpress_phy_probe,
840 .init = tenxpress_phy_init, 485 .init = tenxpress_phy_init,
@@ -849,18 +494,3 @@ struct efx_phy_operations falcon_sfx7101_phy_ops = {
849 .test_name = sfx7101_test_name, 494 .test_name = sfx7101_test_name,
850 .run_tests = sfx7101_run_tests, 495 .run_tests = sfx7101_run_tests,
851}; 496};
852
853struct efx_phy_operations falcon_sft9001_phy_ops = {
854 .probe = tenxpress_phy_probe,
855 .init = tenxpress_phy_init,
856 .reconfigure = tenxpress_phy_reconfigure,
857 .poll = tenxpress_phy_poll,
858 .fini = efx_port_dummy_op_void,
859 .remove = tenxpress_phy_remove,
860 .get_settings = tenxpress_get_settings,
861 .set_settings = tenxpress_set_settings,
862 .set_npage_adv = sft9001_set_npage_adv,
863 .test_alive = efx_mdio_test_alive,
864 .test_name = sft9001_test_name,
865 .run_tests = sft9001_run_tests,
866};
diff --git a/drivers/net/sfc/txc43128_phy.c b/drivers/net/sfc/txc43128_phy.c
new file mode 100644
index 000000000000..351794a79215
--- /dev/null
+++ b/drivers/net/sfc/txc43128_phy.c
@@ -0,0 +1,560 @@
1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2006-2010 Solarflare Communications Inc.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published
7 * by the Free Software Foundation, incorporated herein by reference.
8 */
9
10/*
11 * Driver for Transwitch/Mysticom CX4 retimer
12 * see www.transwitch.com, part is TXC-43128
13 */
14
15#include <linux/delay.h>
16#include <linux/slab.h>
17#include "efx.h"
18#include "mdio_10g.h"
19#include "phy.h"
20#include "nic.h"
21
22/* We expect these MMDs to be in the package */
23#define TXC_REQUIRED_DEVS (MDIO_DEVS_PCS | \
24 MDIO_DEVS_PMAPMD | \
25 MDIO_DEVS_PHYXS)
26
27#define TXC_LOOPBACKS ((1 << LOOPBACK_PCS) | \
28 (1 << LOOPBACK_PMAPMD) | \
29 (1 << LOOPBACK_PHYXS_WS))
30
31/**************************************************************************
32 *
33 * Compile-time config
34 *
35 **************************************************************************
36 */
37#define TXCNAME "TXC43128"
38/* Total length of time we'll wait for the PHY to come out of reset (ms) */
39#define TXC_MAX_RESET_TIME 500
40/* Interval between checks (ms) */
41#define TXC_RESET_WAIT 10
42/* How long to run BIST (us) */
43#define TXC_BIST_DURATION 50
44
45/**************************************************************************
46 *
47 * Register definitions
48 *
49 **************************************************************************
50 */
51
52/* Command register */
53#define TXC_GLRGS_GLCMD 0xc004
54/* Useful bits in command register */
55/* Lane power-down */
56#define TXC_GLCMD_L01PD_LBN 5
57#define TXC_GLCMD_L23PD_LBN 6
58/* Limited SW reset: preserves configuration but
59 * initiates a logic reset. Self-clearing */
60#define TXC_GLCMD_LMTSWRST_LBN 14
61
62/* Signal Quality Control */
63#define TXC_GLRGS_GSGQLCTL 0xc01a
64/* Enable bit */
65#define TXC_GSGQLCT_SGQLEN_LBN 15
66/* Lane selection */
67#define TXC_GSGQLCT_LNSL_LBN 13
68#define TXC_GSGQLCT_LNSL_WIDTH 2
69
70/* Analog TX control */
71#define TXC_ALRGS_ATXCTL 0xc040
72/* Lane power-down */
73#define TXC_ATXCTL_TXPD3_LBN 15
74#define TXC_ATXCTL_TXPD2_LBN 14
75#define TXC_ATXCTL_TXPD1_LBN 13
76#define TXC_ATXCTL_TXPD0_LBN 12
77
78/* Amplitude on lanes 0, 1 */
79#define TXC_ALRGS_ATXAMP0 0xc041
80/* Amplitude on lanes 2, 3 */
81#define TXC_ALRGS_ATXAMP1 0xc042
82/* Bit position of value for lane 0 (or 2) */
83#define TXC_ATXAMP_LANE02_LBN 3
84/* Bit position of value for lane 1 (or 3) */
85#define TXC_ATXAMP_LANE13_LBN 11
86
87#define TXC_ATXAMP_1280_mV 0
88#define TXC_ATXAMP_1200_mV 8
89#define TXC_ATXAMP_1120_mV 12
90#define TXC_ATXAMP_1060_mV 14
91#define TXC_ATXAMP_0820_mV 25
92#define TXC_ATXAMP_0720_mV 26
93#define TXC_ATXAMP_0580_mV 27
94#define TXC_ATXAMP_0440_mV 28
95
96#define TXC_ATXAMP_0820_BOTH \
97 ((TXC_ATXAMP_0820_mV << TXC_ATXAMP_LANE02_LBN) \
98 | (TXC_ATXAMP_0820_mV << TXC_ATXAMP_LANE13_LBN))
99
100#define TXC_ATXAMP_DEFAULT 0x6060 /* From databook */
101
102/* Preemphasis on lanes 0, 1 */
103#define TXC_ALRGS_ATXPRE0 0xc043
104/* Preemphasis on lanes 2, 3 */
105#define TXC_ALRGS_ATXPRE1 0xc044
106
107#define TXC_ATXPRE_NONE 0
108#define TXC_ATXPRE_DEFAULT 0x1010 /* From databook */
109
110#define TXC_ALRGS_ARXCTL 0xc045
111/* Lane power-down */
112#define TXC_ARXCTL_RXPD3_LBN 15
113#define TXC_ARXCTL_RXPD2_LBN 14
114#define TXC_ARXCTL_RXPD1_LBN 13
115#define TXC_ARXCTL_RXPD0_LBN 12
116
117/* Main control */
118#define TXC_MRGS_CTL 0xc340
119/* Bits in main control */
120#define TXC_MCTL_RESET_LBN 15 /* Self clear */
121#define TXC_MCTL_TXLED_LBN 14 /* 1 to show align status */
122#define TXC_MCTL_RXLED_LBN 13 /* 1 to show align status */
123
124/* GPIO output */
125#define TXC_GPIO_OUTPUT 0xc346
126#define TXC_GPIO_DIR 0xc348
127
128/* Vendor-specific BIST registers */
129#define TXC_BIST_CTL 0xc280
130#define TXC_BIST_TXFRMCNT 0xc281
131#define TXC_BIST_RX0FRMCNT 0xc282
132#define TXC_BIST_RX1FRMCNT 0xc283
133#define TXC_BIST_RX2FRMCNT 0xc284
134#define TXC_BIST_RX3FRMCNT 0xc285
135#define TXC_BIST_RX0ERRCNT 0xc286
136#define TXC_BIST_RX1ERRCNT 0xc287
137#define TXC_BIST_RX2ERRCNT 0xc288
138#define TXC_BIST_RX3ERRCNT 0xc289
139
140/* BIST type (controls bit patter in test) */
141#define TXC_BIST_CTRL_TYPE_LBN 10
142#define TXC_BIST_CTRL_TYPE_TSD 0 /* TranSwitch Deterministic */
143#define TXC_BIST_CTRL_TYPE_CRP 1 /* CRPAT standard */
144#define TXC_BIST_CTRL_TYPE_CJP 2 /* CJPAT standard */
145#define TXC_BIST_CTRL_TYPE_TSR 3 /* TranSwitch pseudo-random */
146/* Set this to 1 for 10 bit and 0 for 8 bit */
147#define TXC_BIST_CTRL_B10EN_LBN 12
148/* Enable BIST (write 0 to disable) */
149#define TXC_BIST_CTRL_ENAB_LBN 13
150/* Stop BIST (self-clears when stop complete) */
151#define TXC_BIST_CTRL_STOP_LBN 14
152/* Start BIST (cleared by writing 1 to STOP) */
153#define TXC_BIST_CTRL_STRT_LBN 15
154
155/* Mt. Diablo test configuration */
156#define TXC_MTDIABLO_CTRL 0xc34f
157#define TXC_MTDIABLO_CTRL_PMA_LOOP_LBN 10
158
159struct txc43128_data {
160 unsigned long bug10934_timer;
161 enum efx_phy_mode phy_mode;
162 enum efx_loopback_mode loopback_mode;
163};
164
165/* The PHY sometimes needs a reset to bring the link back up. So long as
166 * it reports link down, we reset it every 5 seconds.
167 */
168#define BUG10934_RESET_INTERVAL (5 * HZ)
169
170/* Perform a reset that doesn't clear configuration changes */
171static void txc_reset_logic(struct efx_nic *efx);
172
173/* Set the output value of a gpio */
174void falcon_txc_set_gpio_val(struct efx_nic *efx, int pin, int on)
175{
176 efx_mdio_set_flag(efx, MDIO_MMD_PHYXS, TXC_GPIO_OUTPUT, 1 << pin, on);
177}
178
179/* Set up the GPIO direction register */
180void falcon_txc_set_gpio_dir(struct efx_nic *efx, int pin, int dir)
181{
182 efx_mdio_set_flag(efx, MDIO_MMD_PHYXS, TXC_GPIO_DIR, 1 << pin, dir);
183}
184
185/* Reset the PMA/PMD MMD. The documentation is explicit that this does a
186 * global reset (it's less clear what reset of other MMDs does).*/
187static int txc_reset_phy(struct efx_nic *efx)
188{
189 int rc = efx_mdio_reset_mmd(efx, MDIO_MMD_PMAPMD,
190 TXC_MAX_RESET_TIME / TXC_RESET_WAIT,
191 TXC_RESET_WAIT);
192 if (rc < 0)
193 goto fail;
194
195 /* Check that all the MMDs we expect are present and responding. */
196 rc = efx_mdio_check_mmds(efx, TXC_REQUIRED_DEVS, 0);
197 if (rc < 0)
198 goto fail;
199
200 return 0;
201
202fail:
203 netif_err(efx, hw, efx->net_dev, TXCNAME ": reset timed out!\n");
204 return rc;
205}
206
207/* Run a single BIST on one MMD */
208static int txc_bist_one(struct efx_nic *efx, int mmd, int test)
209{
210 int ctrl, bctl;
211 int lane;
212 int rc = 0;
213
214 /* Set PMA to test into loopback using Mt Diablo reg as per app note */
215 ctrl = efx_mdio_read(efx, MDIO_MMD_PCS, TXC_MTDIABLO_CTRL);
216 ctrl |= (1 << TXC_MTDIABLO_CTRL_PMA_LOOP_LBN);
217 efx_mdio_write(efx, MDIO_MMD_PCS, TXC_MTDIABLO_CTRL, ctrl);
218
219 /* The BIST app. note lists these as 3 distinct steps. */
220 /* Set the BIST type */
221 bctl = (test << TXC_BIST_CTRL_TYPE_LBN);
222 efx_mdio_write(efx, mmd, TXC_BIST_CTL, bctl);
223
224 /* Set the BSTEN bit in the BIST Control register to enable */
225 bctl |= (1 << TXC_BIST_CTRL_ENAB_LBN);
226 efx_mdio_write(efx, mmd, TXC_BIST_CTL, bctl);
227
228 /* Set the BSTRT bit in the BIST Control register */
229 efx_mdio_write(efx, mmd, TXC_BIST_CTL,
230 bctl | (1 << TXC_BIST_CTRL_STRT_LBN));
231
232 /* Wait. */
233 udelay(TXC_BIST_DURATION);
234
235 /* Set the BSTOP bit in the BIST Control register */
236 bctl |= (1 << TXC_BIST_CTRL_STOP_LBN);
237 efx_mdio_write(efx, mmd, TXC_BIST_CTL, bctl);
238
239 /* The STOP bit should go off when things have stopped */
240 while (bctl & (1 << TXC_BIST_CTRL_STOP_LBN))
241 bctl = efx_mdio_read(efx, mmd, TXC_BIST_CTL);
242
243 /* Check all the error counts are 0 and all the frame counts are
244 non-zero */
245 for (lane = 0; lane < 4; lane++) {
246 int count = efx_mdio_read(efx, mmd, TXC_BIST_RX0ERRCNT + lane);
247 if (count != 0) {
248 netif_err(efx, hw, efx->net_dev, TXCNAME": BIST error. "
249 "Lane %d had %d errs\n", lane, count);
250 rc = -EIO;
251 }
252 count = efx_mdio_read(efx, mmd, TXC_BIST_RX0FRMCNT + lane);
253 if (count == 0) {
254 netif_err(efx, hw, efx->net_dev, TXCNAME": BIST error. "
255 "Lane %d got 0 frames\n", lane);
256 rc = -EIO;
257 }
258 }
259
260 if (rc == 0)
261 netif_info(efx, hw, efx->net_dev, TXCNAME": BIST pass\n");
262
263 /* Disable BIST */
264 efx_mdio_write(efx, mmd, TXC_BIST_CTL, 0);
265
266 /* Turn off loopback */
267 ctrl &= ~(1 << TXC_MTDIABLO_CTRL_PMA_LOOP_LBN);
268 efx_mdio_write(efx, MDIO_MMD_PCS, TXC_MTDIABLO_CTRL, ctrl);
269
270 return rc;
271}
272
273static int txc_bist(struct efx_nic *efx)
274{
275 return txc_bist_one(efx, MDIO_MMD_PCS, TXC_BIST_CTRL_TYPE_TSD);
276}
277
278/* Push the non-configurable defaults into the PHY. This must be
279 * done after every full reset */
280static void txc_apply_defaults(struct efx_nic *efx)
281{
282 int mctrl;
283
284 /* Turn amplitude down and preemphasis off on the host side
285 * (PHY<->MAC) as this is believed less likely to upset Falcon
286 * and no adverse effects have been noted. It probably also
287 * saves a picowatt or two */
288
289 /* Turn off preemphasis */
290 efx_mdio_write(efx, MDIO_MMD_PHYXS, TXC_ALRGS_ATXPRE0, TXC_ATXPRE_NONE);
291 efx_mdio_write(efx, MDIO_MMD_PHYXS, TXC_ALRGS_ATXPRE1, TXC_ATXPRE_NONE);
292
293 /* Turn down the amplitude */
294 efx_mdio_write(efx, MDIO_MMD_PHYXS,
295 TXC_ALRGS_ATXAMP0, TXC_ATXAMP_0820_BOTH);
296 efx_mdio_write(efx, MDIO_MMD_PHYXS,
297 TXC_ALRGS_ATXAMP1, TXC_ATXAMP_0820_BOTH);
298
299 /* Set the line side amplitude and preemphasis to the databook
300 * defaults as an erratum causes them to be 0 on at least some
301 * PHY rev.s */
302 efx_mdio_write(efx, MDIO_MMD_PMAPMD,
303 TXC_ALRGS_ATXPRE0, TXC_ATXPRE_DEFAULT);
304 efx_mdio_write(efx, MDIO_MMD_PMAPMD,
305 TXC_ALRGS_ATXPRE1, TXC_ATXPRE_DEFAULT);
306 efx_mdio_write(efx, MDIO_MMD_PMAPMD,
307 TXC_ALRGS_ATXAMP0, TXC_ATXAMP_DEFAULT);
308 efx_mdio_write(efx, MDIO_MMD_PMAPMD,
309 TXC_ALRGS_ATXAMP1, TXC_ATXAMP_DEFAULT);
310
311 /* Set up the LEDs */
312 mctrl = efx_mdio_read(efx, MDIO_MMD_PHYXS, TXC_MRGS_CTL);
313
314 /* Set the Green and Red LEDs to their default modes */
315 mctrl &= ~((1 << TXC_MCTL_TXLED_LBN) | (1 << TXC_MCTL_RXLED_LBN));
316 efx_mdio_write(efx, MDIO_MMD_PHYXS, TXC_MRGS_CTL, mctrl);
317
318 /* Databook recommends doing this after configuration changes */
319 txc_reset_logic(efx);
320
321 falcon_board(efx)->type->init_phy(efx);
322}
323
324static int txc43128_phy_probe(struct efx_nic *efx)
325{
326 struct txc43128_data *phy_data;
327
328 /* Allocate phy private storage */
329 phy_data = kzalloc(sizeof(*phy_data), GFP_KERNEL);
330 if (!phy_data)
331 return -ENOMEM;
332 efx->phy_data = phy_data;
333 phy_data->phy_mode = efx->phy_mode;
334
335 efx->mdio.mmds = TXC_REQUIRED_DEVS;
336 efx->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
337
338 efx->loopback_modes = TXC_LOOPBACKS | FALCON_XMAC_LOOPBACKS;
339
340 return 0;
341}
342
343/* Initialisation entry point for this PHY driver */
344static int txc43128_phy_init(struct efx_nic *efx)
345{
346 int rc;
347
348 rc = txc_reset_phy(efx);
349 if (rc < 0)
350 return rc;
351
352 rc = txc_bist(efx);
353 if (rc < 0)
354 return rc;
355
356 txc_apply_defaults(efx);
357
358 return 0;
359}
360
361/* Set the lane power down state in the global registers */
362static void txc_glrgs_lane_power(struct efx_nic *efx, int mmd)
363{
364 int pd = (1 << TXC_GLCMD_L01PD_LBN) | (1 << TXC_GLCMD_L23PD_LBN);
365 int ctl = efx_mdio_read(efx, mmd, TXC_GLRGS_GLCMD);
366
367 if (!(efx->phy_mode & PHY_MODE_LOW_POWER))
368 ctl &= ~pd;
369 else
370 ctl |= pd;
371
372 efx_mdio_write(efx, mmd, TXC_GLRGS_GLCMD, ctl);
373}
374
375/* Set the lane power down state in the analog control registers */
376static void txc_analog_lane_power(struct efx_nic *efx, int mmd)
377{
378 int txpd = (1 << TXC_ATXCTL_TXPD3_LBN) | (1 << TXC_ATXCTL_TXPD2_LBN)
379 | (1 << TXC_ATXCTL_TXPD1_LBN) | (1 << TXC_ATXCTL_TXPD0_LBN);
380 int rxpd = (1 << TXC_ARXCTL_RXPD3_LBN) | (1 << TXC_ARXCTL_RXPD2_LBN)
381 | (1 << TXC_ARXCTL_RXPD1_LBN) | (1 << TXC_ARXCTL_RXPD0_LBN);
382 int txctl = efx_mdio_read(efx, mmd, TXC_ALRGS_ATXCTL);
383 int rxctl = efx_mdio_read(efx, mmd, TXC_ALRGS_ARXCTL);
384
385 if (!(efx->phy_mode & PHY_MODE_LOW_POWER)) {
386 txctl &= ~txpd;
387 rxctl &= ~rxpd;
388 } else {
389 txctl |= txpd;
390 rxctl |= rxpd;
391 }
392
393 efx_mdio_write(efx, mmd, TXC_ALRGS_ATXCTL, txctl);
394 efx_mdio_write(efx, mmd, TXC_ALRGS_ARXCTL, rxctl);
395}
396
397static void txc_set_power(struct efx_nic *efx)
398{
399 /* According to the data book, all the MMDs can do low power */
400 efx_mdio_set_mmds_lpower(efx,
401 !!(efx->phy_mode & PHY_MODE_LOW_POWER),
402 TXC_REQUIRED_DEVS);
403
404 /* Global register bank is in PCS, PHY XS. These control the host
405 * side and line side settings respectively. */
406 txc_glrgs_lane_power(efx, MDIO_MMD_PCS);
407 txc_glrgs_lane_power(efx, MDIO_MMD_PHYXS);
408
409 /* Analog register bank in PMA/PMD, PHY XS */
410 txc_analog_lane_power(efx, MDIO_MMD_PMAPMD);
411 txc_analog_lane_power(efx, MDIO_MMD_PHYXS);
412}
413
414static void txc_reset_logic_mmd(struct efx_nic *efx, int mmd)
415{
416 int val = efx_mdio_read(efx, mmd, TXC_GLRGS_GLCMD);
417 int tries = 50;
418
419 val |= (1 << TXC_GLCMD_LMTSWRST_LBN);
420 efx_mdio_write(efx, mmd, TXC_GLRGS_GLCMD, val);
421 while (tries--) {
422 val = efx_mdio_read(efx, mmd, TXC_GLRGS_GLCMD);
423 if (!(val & (1 << TXC_GLCMD_LMTSWRST_LBN)))
424 break;
425 udelay(1);
426 }
427 if (!tries)
428 netif_info(efx, hw, efx->net_dev,
429 TXCNAME " Logic reset timed out!\n");
430}
431
432/* Perform a logic reset. This preserves the configuration registers
433 * and is needed for some configuration changes to take effect */
434static void txc_reset_logic(struct efx_nic *efx)
435{
436 /* The data sheet claims we can do the logic reset on either the
437 * PCS or the PHYXS and the result is a reset of both host- and
438 * line-side logic. */
439 txc_reset_logic_mmd(efx, MDIO_MMD_PCS);
440}
441
442static bool txc43128_phy_read_link(struct efx_nic *efx)
443{
444 return efx_mdio_links_ok(efx, TXC_REQUIRED_DEVS);
445}
446
447static int txc43128_phy_reconfigure(struct efx_nic *efx)
448{
449 struct txc43128_data *phy_data = efx->phy_data;
450 enum efx_phy_mode mode_change = efx->phy_mode ^ phy_data->phy_mode;
451 bool loop_change = LOOPBACK_CHANGED(phy_data, efx, TXC_LOOPBACKS);
452
453 if (efx->phy_mode & mode_change & PHY_MODE_TX_DISABLED) {
454 txc_reset_phy(efx);
455 txc_apply_defaults(efx);
456 falcon_reset_xaui(efx);
457 mode_change &= ~PHY_MODE_TX_DISABLED;
458 }
459
460 efx_mdio_transmit_disable(efx);
461 efx_mdio_phy_reconfigure(efx);
462 if (mode_change & PHY_MODE_LOW_POWER)
463 txc_set_power(efx);
464
465 /* The data sheet claims this is required after every reconfiguration
466 * (note at end of 7.1), but we mustn't do it when nothing changes as
467 * it glitches the link, and reconfigure gets called on link change,
468 * so we get an IRQ storm on link up. */
469 if (loop_change || mode_change)
470 txc_reset_logic(efx);
471
472 phy_data->phy_mode = efx->phy_mode;
473 phy_data->loopback_mode = efx->loopback_mode;
474
475 return 0;
476}
477
478static void txc43128_phy_fini(struct efx_nic *efx)
479{
480 /* Disable link events */
481 efx_mdio_write(efx, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_CTRL, 0);
482}
483
484static void txc43128_phy_remove(struct efx_nic *efx)
485{
486 kfree(efx->phy_data);
487 efx->phy_data = NULL;
488}
489
490/* Periodic callback: this exists mainly to poll link status as we
491 * don't use LASI interrupts */
492static bool txc43128_phy_poll(struct efx_nic *efx)
493{
494 struct txc43128_data *data = efx->phy_data;
495 bool was_up = efx->link_state.up;
496
497 efx->link_state.up = txc43128_phy_read_link(efx);
498 efx->link_state.speed = 10000;
499 efx->link_state.fd = true;
500 efx->link_state.fc = efx->wanted_fc;
501
502 if (efx->link_state.up || (efx->loopback_mode != LOOPBACK_NONE)) {
503 data->bug10934_timer = jiffies;
504 } else {
505 if (time_after_eq(jiffies, (data->bug10934_timer +
506 BUG10934_RESET_INTERVAL))) {
507 data->bug10934_timer = jiffies;
508 txc_reset_logic(efx);
509 }
510 }
511
512 return efx->link_state.up != was_up;
513}
514
515static const char *txc43128_test_names[] = {
516 "bist"
517};
518
519static const char *txc43128_test_name(struct efx_nic *efx, unsigned int index)
520{
521 if (index < ARRAY_SIZE(txc43128_test_names))
522 return txc43128_test_names[index];
523 return NULL;
524}
525
526static int txc43128_run_tests(struct efx_nic *efx, int *results, unsigned flags)
527{
528 int rc;
529
530 if (!(flags & ETH_TEST_FL_OFFLINE))
531 return 0;
532
533 rc = txc_reset_phy(efx);
534 if (rc < 0)
535 return rc;
536
537 rc = txc_bist(efx);
538 txc_apply_defaults(efx);
539 results[0] = rc ? -1 : 1;
540 return rc;
541}
542
543static void txc43128_get_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd)
544{
545 mdio45_ethtool_gset(&efx->mdio, ecmd);
546}
547
548struct efx_phy_operations falcon_txc_phy_ops = {
549 .probe = txc43128_phy_probe,
550 .init = txc43128_phy_init,
551 .reconfigure = txc43128_phy_reconfigure,
552 .poll = txc43128_phy_poll,
553 .fini = txc43128_phy_fini,
554 .remove = txc43128_phy_remove,
555 .get_settings = txc43128_get_settings,
556 .set_settings = efx_mdio_set_settings,
557 .test_alive = efx_mdio_test_alive,
558 .run_tests = txc43128_run_tests,
559 .test_name = txc43128_test_name,
560};
diff --git a/drivers/net/sfc/workarounds.h b/drivers/net/sfc/workarounds.h
index 782e45a613d6..e0d63083c3a8 100644
--- a/drivers/net/sfc/workarounds.h
+++ b/drivers/net/sfc/workarounds.h
@@ -19,9 +19,7 @@
19#define EFX_WORKAROUND_FALCON_A(efx) (efx_nic_rev(efx) <= EFX_REV_FALCON_A1) 19#define EFX_WORKAROUND_FALCON_A(efx) (efx_nic_rev(efx) <= EFX_REV_FALCON_A1)
20#define EFX_WORKAROUND_FALCON_AB(efx) (efx_nic_rev(efx) <= EFX_REV_FALCON_B0) 20#define EFX_WORKAROUND_FALCON_AB(efx) (efx_nic_rev(efx) <= EFX_REV_FALCON_B0)
21#define EFX_WORKAROUND_SIENA(efx) (efx_nic_rev(efx) == EFX_REV_SIENA_A0) 21#define EFX_WORKAROUND_SIENA(efx) (efx_nic_rev(efx) == EFX_REV_SIENA_A0)
22#define EFX_WORKAROUND_10G(efx) EFX_IS10G(efx) 22#define EFX_WORKAROUND_10G(efx) 1
23#define EFX_WORKAROUND_SFT9001(efx) ((efx)->phy_type == PHY_TYPE_SFT9001A || \
24 (efx)->phy_type == PHY_TYPE_SFT9001B)
25 23
26/* XAUI resets if link not detected */ 24/* XAUI resets if link not detected */
27#define EFX_WORKAROUND_5147 EFX_WORKAROUND_ALWAYS 25#define EFX_WORKAROUND_5147 EFX_WORKAROUND_ALWAYS
@@ -58,9 +56,4 @@
58/* Leak overlength packets rather than free */ 56/* Leak overlength packets rather than free */
59#define EFX_WORKAROUND_8071 EFX_WORKAROUND_FALCON_A 57#define EFX_WORKAROUND_8071 EFX_WORKAROUND_FALCON_A
60 58
61/* Need to send XNP pages for 100BaseT */
62#define EFX_WORKAROUND_13204 EFX_WORKAROUND_SFT9001
63/* Don't restart AN in near-side loopback */
64#define EFX_WORKAROUND_15195 EFX_WORKAROUND_SFT9001
65
66#endif /* EFX_WORKAROUNDS_H */ 59#endif /* EFX_WORKAROUNDS_H */
diff --git a/drivers/net/sgiseeq.c b/drivers/net/sgiseeq.c
index cc4bd8c65f8b..9265315baa0b 100644
--- a/drivers/net/sgiseeq.c
+++ b/drivers/net/sgiseeq.c
@@ -804,7 +804,7 @@ static int __devinit sgiseeq_probe(struct platform_device *pdev)
804err_out_free_page: 804err_out_free_page:
805 free_page((unsigned long) sp->srings); 805 free_page((unsigned long) sp->srings);
806err_out_free_dev: 806err_out_free_dev:
807 kfree(dev); 807 free_netdev(dev);
808 808
809err_out: 809err_out:
810 return err; 810 return err;
diff --git a/drivers/net/sis900.c b/drivers/net/sis900.c
index ffdd8591d4bc..581836867098 100644
--- a/drivers/net/sis900.c
+++ b/drivers/net/sis900.c
@@ -832,7 +832,7 @@ static u16 __devinit read_eeprom(long ioaddr, int location)
832 outl(0, ee_addr); 832 outl(0, ee_addr);
833 eeprom_delay(); 833 eeprom_delay();
834 834
835 return (retval); 835 return retval;
836} 836}
837 837
838/* Read and write the MII management registers using software-generated 838/* Read and write the MII management registers using software-generated
@@ -2247,9 +2247,9 @@ static inline u16 sis900_mcast_bitnr(u8 *addr, u8 revision)
2247 2247
2248 /* leave 8 or 7 most siginifant bits */ 2248 /* leave 8 or 7 most siginifant bits */
2249 if ((revision >= SIS635A_900_REV) || (revision == SIS900B_900_REV)) 2249 if ((revision >= SIS635A_900_REV) || (revision == SIS900B_900_REV))
2250 return ((int)(crc >> 24)); 2250 return (int)(crc >> 24);
2251 else 2251 else
2252 return ((int)(crc >> 25)); 2252 return (int)(crc >> 25);
2253} 2253}
2254 2254
2255/** 2255/**
diff --git a/drivers/net/skfp/cfm.c b/drivers/net/skfp/cfm.c
index 5310d39b5737..e395ace3120b 100644
--- a/drivers/net/skfp/cfm.c
+++ b/drivers/net/skfp/cfm.c
@@ -542,8 +542,8 @@ static void cfm_fsm(struct s_smc *smc, int cmd)
542 */ 542 */
543int cfm_get_mac_input(struct s_smc *smc) 543int cfm_get_mac_input(struct s_smc *smc)
544{ 544{
545 return((smc->mib.fddiSMTCF_State == SC10_C_WRAP_B || 545 return (smc->mib.fddiSMTCF_State == SC10_C_WRAP_B ||
546 smc->mib.fddiSMTCF_State == SC5_THRU_B) ? PB : PA) ; 546 smc->mib.fddiSMTCF_State == SC5_THRU_B) ? PB : PA;
547} 547}
548 548
549/* 549/*
@@ -553,8 +553,8 @@ int cfm_get_mac_input(struct s_smc *smc)
553 */ 553 */
554int cfm_get_mac_output(struct s_smc *smc) 554int cfm_get_mac_output(struct s_smc *smc)
555{ 555{
556 return((smc->mib.fddiSMTCF_State == SC10_C_WRAP_B || 556 return (smc->mib.fddiSMTCF_State == SC10_C_WRAP_B ||
557 smc->mib.fddiSMTCF_State == SC4_THRU_A) ? PB : PA) ; 557 smc->mib.fddiSMTCF_State == SC4_THRU_A) ? PB : PA;
558} 558}
559 559
560static char path_iso[] = { 560static char path_iso[] = {
@@ -623,5 +623,5 @@ int cem_build_path(struct s_smc *smc, char *to, int path_index)
623 623
624 LINT_USE(path_index); 624 LINT_USE(path_index);
625 625
626 return(len) ; 626 return len;
627} 627}
diff --git a/drivers/net/skfp/drvfbi.c b/drivers/net/skfp/drvfbi.c
index c77cc14b3227..07da97c303d6 100644
--- a/drivers/net/skfp/drvfbi.c
+++ b/drivers/net/skfp/drvfbi.c
@@ -267,7 +267,7 @@ void timer_irq(struct s_smc *smc)
267int pcm_get_s_port(struct s_smc *smc) 267int pcm_get_s_port(struct s_smc *smc)
268{ 268{
269 SK_UNUSED(smc) ; 269 SK_UNUSED(smc) ;
270 return(PS) ; 270 return PS;
271} 271}
272 272
273/* 273/*
@@ -366,7 +366,7 @@ void sm_pm_bypass_req(struct s_smc *smc, int mode)
366 */ 366 */
367int sm_pm_bypass_present(struct s_smc *smc) 367int sm_pm_bypass_present(struct s_smc *smc)
368{ 368{
369 return( (inp(ADDR(B0_DAS)) & DAS_BYP_ST) ? TRUE: FALSE) ; 369 return (inp(ADDR(B0_DAS)) & DAS_BYP_ST) ? TRUE : FALSE;
370} 370}
371 371
372void plc_clear_irq(struct s_smc *smc, int p) 372void plc_clear_irq(struct s_smc *smc, int p)
@@ -483,9 +483,9 @@ static int is_equal_num(char comp1[], char comp2[], int num)
483 483
484 for (i = 0 ; i < num ; i++) { 484 for (i = 0 ; i < num ; i++) {
485 if (comp1[i] != comp2[i]) 485 if (comp1[i] != comp2[i])
486 return (0) ; 486 return 0;
487 } 487 }
488 return (1) ; 488 return 1;
489} /* is_equal_num */ 489} /* is_equal_num */
490 490
491 491
@@ -522,18 +522,18 @@ int set_oi_id_def(struct s_smc *smc)
522 i++ ; 522 i++ ;
523 break ; /* entry ok */ 523 break ; /* entry ok */
524 default: 524 default:
525 return (1) ; /* invalid oi_status */ 525 return 1; /* invalid oi_status */
526 } 526 }
527 } 527 }
528 528
529 if (i == 0) 529 if (i == 0)
530 return (2) ; 530 return 2;
531 if (!act_entries) 531 if (!act_entries)
532 return (3) ; 532 return 3;
533 533
534 /* ok, we have a valid OEM data base with an active entry */ 534 /* ok, we have a valid OEM data base with an active entry */
535 smc->hw.oem_id = (struct s_oem_ids *) &oem_ids[sel_id] ; 535 smc->hw.oem_id = (struct s_oem_ids *) &oem_ids[sel_id] ;
536 return (0) ; 536 return 0;
537} 537}
538#endif /* MULT_OEM */ 538#endif /* MULT_OEM */
539 539
diff --git a/drivers/net/skfp/ess.c b/drivers/net/skfp/ess.c
index e8387d25f24a..8639a0884f5c 100644
--- a/drivers/net/skfp/ess.c
+++ b/drivers/net/skfp/ess.c
@@ -135,7 +135,7 @@ int ess_raf_received_pack(struct s_smc *smc, SMbuf *mb, struct smt_header *sm,
135 */ 135 */
136 if (!(p = (void *) sm_to_para(smc,sm,SMT_P0015))) { 136 if (!(p = (void *) sm_to_para(smc,sm,SMT_P0015))) {
137 DB_ESS("ESS: RAF frame error, parameter type not found\n",0,0) ; 137 DB_ESS("ESS: RAF frame error, parameter type not found\n",0,0) ;
138 return(fs) ; 138 return fs;
139 } 139 }
140 msg_res_type = ((struct smt_p_0015 *)p)->res_type ; 140 msg_res_type = ((struct smt_p_0015 *)p)->res_type ;
141 141
@@ -147,7 +147,7 @@ int ess_raf_received_pack(struct s_smc *smc, SMbuf *mb, struct smt_header *sm,
147 * error in frame: para ESS command was not found 147 * error in frame: para ESS command was not found
148 */ 148 */
149 DB_ESS("ESS: RAF frame error, parameter command not found\n",0,0); 149 DB_ESS("ESS: RAF frame error, parameter command not found\n",0,0);
150 return(fs) ; 150 return fs;
151 } 151 }
152 152
153 DB_ESSN(2,"fc %x ft %x\n",sm->smt_class,sm->smt_type) ; 153 DB_ESSN(2,"fc %x ft %x\n",sm->smt_class,sm->smt_type) ;
@@ -175,12 +175,12 @@ int ess_raf_received_pack(struct s_smc *smc, SMbuf *mb, struct smt_header *sm,
175 * local and no static allocation is used 175 * local and no static allocation is used
176 */ 176 */
177 if (!local || smc->mib.fddiESSPayload) 177 if (!local || smc->mib.fddiESSPayload)
178 return(fs) ; 178 return fs;
179 179
180 p = (void *) sm_to_para(smc,sm,SMT_P0019) ; 180 p = (void *) sm_to_para(smc,sm,SMT_P0019) ;
181 for (i = 0; i < 5; i++) { 181 for (i = 0; i < 5; i++) {
182 if (((struct smt_p_0019 *)p)->alloc_addr.a[i]) { 182 if (((struct smt_p_0019 *)p)->alloc_addr.a[i]) {
183 return(fs) ; 183 return fs;
184 } 184 }
185 } 185 }
186 186
@@ -199,10 +199,10 @@ int ess_raf_received_pack(struct s_smc *smc, SMbuf *mb, struct smt_header *sm,
199 sm->smt_dest = smt_sba_da ; 199 sm->smt_dest = smt_sba_da ;
200 200
201 if (smc->ess.local_sba_active) 201 if (smc->ess.local_sba_active)
202 return(fs | I_INDICATOR) ; 202 return fs | I_INDICATOR;
203 203
204 if (!(db = smt_get_mbuf(smc))) 204 if (!(db = smt_get_mbuf(smc)))
205 return(fs) ; 205 return fs;
206 206
207 db->sm_len = mb->sm_len ; 207 db->sm_len = mb->sm_len ;
208 db->sm_off = mb->sm_off ; 208 db->sm_off = mb->sm_off ;
@@ -212,7 +212,7 @@ int ess_raf_received_pack(struct s_smc *smc, SMbuf *mb, struct smt_header *sm,
212 (struct smt_header *)(db->sm_data+db->sm_off), 212 (struct smt_header *)(db->sm_data+db->sm_off),
213 "RAF") ; 213 "RAF") ;
214 smt_send_frame(smc,db,FC_SMT_INFO,0) ; 214 smt_send_frame(smc,db,FC_SMT_INFO,0) ;
215 return(fs) ; 215 return fs;
216 } 216 }
217 217
218 /* 218 /*
@@ -221,7 +221,7 @@ int ess_raf_received_pack(struct s_smc *smc, SMbuf *mb, struct smt_header *sm,
221 */ 221 */
222 if (smt_check_para(smc,sm,plist_raf_alc_res)) { 222 if (smt_check_para(smc,sm,plist_raf_alc_res)) {
223 DB_ESS("ESS: RAF with para problem, ignoring\n",0,0) ; 223 DB_ESS("ESS: RAF with para problem, ignoring\n",0,0) ;
224 return(fs) ; 224 return fs;
225 } 225 }
226 226
227 /* 227 /*
@@ -242,7 +242,7 @@ int ess_raf_received_pack(struct s_smc *smc, SMbuf *mb, struct smt_header *sm,
242 (sm->smt_tid != smc->ess.alloc_trans_id)) { 242 (sm->smt_tid != smc->ess.alloc_trans_id)) {
243 243
244 DB_ESS("ESS: Allocation Responce not accepted\n",0,0) ; 244 DB_ESS("ESS: Allocation Responce not accepted\n",0,0) ;
245 return(fs) ; 245 return fs;
246 } 246 }
247 247
248 /* 248 /*
@@ -268,7 +268,7 @@ int ess_raf_received_pack(struct s_smc *smc, SMbuf *mb, struct smt_header *sm,
268 */ 268 */
269 (void)process_bw_alloc(smc,(long)payload,(long)overhead) ; 269 (void)process_bw_alloc(smc,(long)payload,(long)overhead) ;
270 270
271 return(fs) ; 271 return fs;
272 /* end of Process Allocation Request */ 272 /* end of Process Allocation Request */
273 273
274 /* 274 /*
@@ -280,7 +280,7 @@ int ess_raf_received_pack(struct s_smc *smc, SMbuf *mb, struct smt_header *sm,
280 */ 280 */
281 if (sm->smt_type != SMT_REQUEST) { 281 if (sm->smt_type != SMT_REQUEST) {
282 DB_ESS("ESS: Do not process Change Responses\n",0,0) ; 282 DB_ESS("ESS: Do not process Change Responses\n",0,0) ;
283 return(fs) ; 283 return fs;
284 } 284 }
285 285
286 /* 286 /*
@@ -288,7 +288,7 @@ int ess_raf_received_pack(struct s_smc *smc, SMbuf *mb, struct smt_header *sm,
288 */ 288 */
289 if (smt_check_para(smc,sm,plist_raf_chg_req)) { 289 if (smt_check_para(smc,sm,plist_raf_chg_req)) {
290 DB_ESS("ESS: RAF with para problem, ignoring\n",0,0) ; 290 DB_ESS("ESS: RAF with para problem, ignoring\n",0,0) ;
291 return(fs) ; 291 return fs;
292 } 292 }
293 293
294 /* 294 /*
@@ -300,7 +300,7 @@ int ess_raf_received_pack(struct s_smc *smc, SMbuf *mb, struct smt_header *sm,
300 if ((((struct smt_p_320b *)sm_to_para(smc,sm,SMT_P320B))->path_index 300 if ((((struct smt_p_320b *)sm_to_para(smc,sm,SMT_P320B))->path_index
301 != PRIMARY_RING) || (msg_res_type != SYNC_BW)) { 301 != PRIMARY_RING) || (msg_res_type != SYNC_BW)) {
302 DB_ESS("ESS: RAF frame with para problem, ignoring\n",0,0) ; 302 DB_ESS("ESS: RAF frame with para problem, ignoring\n",0,0) ;
303 return(fs) ; 303 return fs;
304 } 304 }
305 305
306 /* 306 /*
@@ -319,14 +319,14 @@ int ess_raf_received_pack(struct s_smc *smc, SMbuf *mb, struct smt_header *sm,
319 * process the bandwidth allocation 319 * process the bandwidth allocation
320 */ 320 */
321 if(!process_bw_alloc(smc,(long)payload,(long)overhead)) 321 if(!process_bw_alloc(smc,(long)payload,(long)overhead))
322 return(fs) ; 322 return fs;
323 323
324 /* 324 /*
325 * send an RAF Change Reply 325 * send an RAF Change Reply
326 */ 326 */
327 ess_send_response(smc,sm,CHANGE_ALLOCATION) ; 327 ess_send_response(smc,sm,CHANGE_ALLOCATION) ;
328 328
329 return(fs) ; 329 return fs;
330 /* end of Process Change Request */ 330 /* end of Process Change Request */
331 331
332 /* 332 /*
@@ -338,7 +338,7 @@ int ess_raf_received_pack(struct s_smc *smc, SMbuf *mb, struct smt_header *sm,
338 */ 338 */
339 if (sm->smt_type != SMT_REQUEST) { 339 if (sm->smt_type != SMT_REQUEST) {
340 DB_ESS("ESS: Do not process a Report Reply\n",0,0) ; 340 DB_ESS("ESS: Do not process a Report Reply\n",0,0) ;
341 return(fs) ; 341 return fs;
342 } 342 }
343 343
344 DB_ESSN(2,"ESS: Report Request from %s\n", 344 DB_ESSN(2,"ESS: Report Request from %s\n",
@@ -349,7 +349,7 @@ int ess_raf_received_pack(struct s_smc *smc, SMbuf *mb, struct smt_header *sm,
349 */ 349 */
350 if (msg_res_type != SYNC_BW) { 350 if (msg_res_type != SYNC_BW) {
351 DB_ESS("ESS: ignoring RAF with para problem\n",0,0) ; 351 DB_ESS("ESS: ignoring RAF with para problem\n",0,0) ;
352 return(fs) ; 352 return fs;
353 } 353 }
354 354
355 /* 355 /*
@@ -357,7 +357,7 @@ int ess_raf_received_pack(struct s_smc *smc, SMbuf *mb, struct smt_header *sm,
357 */ 357 */
358 ess_send_response(smc,sm,REPORT_ALLOCATION) ; 358 ess_send_response(smc,sm,REPORT_ALLOCATION) ;
359 359
360 return(fs) ; 360 return fs;
361 /* end of Process Report Request */ 361 /* end of Process Report Request */
362 362
363 default: 363 default:
@@ -368,7 +368,7 @@ int ess_raf_received_pack(struct s_smc *smc, SMbuf *mb, struct smt_header *sm,
368 break ; 368 break ;
369 } 369 }
370 370
371 return(fs) ; 371 return fs;
372} 372}
373 373
374/* 374/*
@@ -418,17 +418,17 @@ static int process_bw_alloc(struct s_smc *smc, long int payload, long int overhe
418 */ 418 */
419/* if (smt_set_obj(smc,SMT_P320F,payload,S_SET)) { 419/* if (smt_set_obj(smc,SMT_P320F,payload,S_SET)) {
420 DB_ESS("ESS: SMT does not accept the payload value\n",0,0) ; 420 DB_ESS("ESS: SMT does not accept the payload value\n",0,0) ;
421 return(FALSE) ; 421 return FALSE;
422 } 422 }
423 if (smt_set_obj(smc,SMT_P3210,overhead,S_SET)) { 423 if (smt_set_obj(smc,SMT_P3210,overhead,S_SET)) {
424 DB_ESS("ESS: SMT does not accept the overhead value\n",0,0) ; 424 DB_ESS("ESS: SMT does not accept the overhead value\n",0,0) ;
425 return(FALSE) ; 425 return FALSE;
426 } */ 426 } */
427 427
428 /* premliminary */ 428 /* premliminary */
429 if (payload > MAX_PAYLOAD || overhead > 5000) { 429 if (payload > MAX_PAYLOAD || overhead > 5000) {
430 DB_ESS("ESS: payload / overhead not accepted\n",0,0) ; 430 DB_ESS("ESS: payload / overhead not accepted\n",0,0) ;
431 return(FALSE) ; 431 return FALSE;
432 } 432 }
433 433
434 /* 434 /*
@@ -468,7 +468,7 @@ static int process_bw_alloc(struct s_smc *smc, long int payload, long int overhe
468 468
469 ess_config_fifo(smc) ; 469 ess_config_fifo(smc) ;
470 set_formac_tsync(smc,smc->ess.sync_bw) ; 470 set_formac_tsync(smc,smc->ess.sync_bw) ;
471 return(TRUE) ; 471 return TRUE;
472} 472}
473 473
474static void ess_send_response(struct s_smc *smc, struct smt_header *sm, 474static void ess_send_response(struct s_smc *smc, struct smt_header *sm,
diff --git a/drivers/net/skfp/fplustm.c b/drivers/net/skfp/fplustm.c
index 9d8d1ac48176..ca4e7bb6a5a8 100644
--- a/drivers/net/skfp/fplustm.c
+++ b/drivers/net/skfp/fplustm.c
@@ -112,8 +112,8 @@ static u_long mac_get_tneg(struct s_smc *smc)
112 u_long tneg ; 112 u_long tneg ;
113 113
114 tneg = (u_long)((long)inpw(FM_A(FM_TNEG))<<5) ; 114 tneg = (u_long)((long)inpw(FM_A(FM_TNEG))<<5) ;
115 return((u_long)((tneg + ((inpw(FM_A(FM_TMRS))>>10)&0x1f)) | 115 return (u_long)((tneg + ((inpw(FM_A(FM_TMRS))>>10)&0x1f)) |
116 0xffe00000L)) ; 116 0xffe00000L) ;
117} 117}
118 118
119void mac_update_counter(struct s_smc *smc) 119void mac_update_counter(struct s_smc *smc)
@@ -163,7 +163,7 @@ static u_long read_mdr(struct s_smc *smc, unsigned int addr)
163 /* is used */ 163 /* is used */
164 p = (u_long)inpw(FM_A(FM_MDRU))<<16 ; 164 p = (u_long)inpw(FM_A(FM_MDRU))<<16 ;
165 p += (u_long)inpw(FM_A(FM_MDRL)) ; 165 p += (u_long)inpw(FM_A(FM_MDRL)) ;
166 return(p) ; 166 return p;
167} 167}
168#endif 168#endif
169 169
@@ -887,7 +887,7 @@ int init_fplus(struct s_smc *smc)
887 /* make sure all PCI settings are correct */ 887 /* make sure all PCI settings are correct */
888 mac_do_pci_fix(smc) ; 888 mac_do_pci_fix(smc) ;
889 889
890 return(init_mac(smc,1)) ; 890 return init_mac(smc, 1);
891 /* enable_formac(smc) ; */ 891 /* enable_formac(smc) ; */
892} 892}
893 893
@@ -989,7 +989,7 @@ static int init_mac(struct s_smc *smc, int all)
989 } 989 }
990 smc->hw.hw_state = STARTED ; 990 smc->hw.hw_state = STARTED ;
991 991
992 return(0) ; 992 return 0;
993} 993}
994 994
995 995
@@ -1049,7 +1049,7 @@ void sm_ma_control(struct s_smc *smc, int mode)
1049 1049
1050int sm_mac_get_tx_state(struct s_smc *smc) 1050int sm_mac_get_tx_state(struct s_smc *smc)
1051{ 1051{
1052 return((inpw(FM_A(FM_STMCHN))>>4)&7) ; 1052 return (inpw(FM_A(FM_STMCHN))>>4) & 7;
1053} 1053}
1054 1054
1055/* 1055/*
@@ -1084,9 +1084,9 @@ static struct s_fpmc* mac_get_mc_table(struct s_smc *smc,
1084 } 1084 }
1085 if (memcmp((char *)&tb->a,(char *)own,6)) 1085 if (memcmp((char *)&tb->a,(char *)own,6))
1086 continue ; 1086 continue ;
1087 return(tb) ; 1087 return tb;
1088 } 1088 }
1089 return(slot) ; /* return first free or NULL */ 1089 return slot; /* return first free or NULL */
1090} 1090}
1091 1091
1092/* 1092/*
@@ -1152,12 +1152,12 @@ int mac_add_multicast(struct s_smc *smc, struct fddi_addr *addr, int can)
1152 */ 1152 */
1153 if (can & 0x80) { 1153 if (can & 0x80) {
1154 if (smc->hw.fp.smt_slots_used >= SMT_MAX_MULTI) { 1154 if (smc->hw.fp.smt_slots_used >= SMT_MAX_MULTI) {
1155 return(1) ; 1155 return 1;
1156 } 1156 }
1157 } 1157 }
1158 else { 1158 else {
1159 if (smc->hw.fp.os_slots_used >= FPMAX_MULTICAST-SMT_MAX_MULTI) { 1159 if (smc->hw.fp.os_slots_used >= FPMAX_MULTICAST-SMT_MAX_MULTI) {
1160 return(1) ; 1160 return 1;
1161 } 1161 }
1162 } 1162 }
1163 1163
@@ -1165,7 +1165,7 @@ int mac_add_multicast(struct s_smc *smc, struct fddi_addr *addr, int can)
1165 * find empty slot 1165 * find empty slot
1166 */ 1166 */
1167 if (!(tb = mac_get_mc_table(smc,addr,&own,0,can & ~0x80))) 1167 if (!(tb = mac_get_mc_table(smc,addr,&own,0,can & ~0x80)))
1168 return(1) ; 1168 return 1;
1169 tb->n++ ; 1169 tb->n++ ;
1170 tb->a = own ; 1170 tb->a = own ;
1171 tb->perm = (can & 0x80) ? 1 : 0 ; 1171 tb->perm = (can & 0x80) ? 1 : 0 ;
@@ -1175,7 +1175,7 @@ int mac_add_multicast(struct s_smc *smc, struct fddi_addr *addr, int can)
1175 else 1175 else
1176 smc->hw.fp.os_slots_used++ ; 1176 smc->hw.fp.os_slots_used++ ;
1177 1177
1178 return(0) ; 1178 return 0;
1179} 1179}
1180 1180
1181/* 1181/*
diff --git a/drivers/net/skfp/hwmtm.c b/drivers/net/skfp/hwmtm.c
index d322f1b702ac..af5a755e269d 100644
--- a/drivers/net/skfp/hwmtm.c
+++ b/drivers/net/skfp/hwmtm.c
@@ -232,16 +232,16 @@ u_int mac_drv_check_space(void)
232#ifdef COMMON_MB_POOL 232#ifdef COMMON_MB_POOL
233 call_count++ ; 233 call_count++ ;
234 if (call_count == 1) { 234 if (call_count == 1) {
235 return(EXT_VIRT_MEM) ; 235 return EXT_VIRT_MEM;
236 } 236 }
237 else { 237 else {
238 return(EXT_VIRT_MEM_2) ; 238 return EXT_VIRT_MEM_2;
239 } 239 }
240#else 240#else
241 return (EXT_VIRT_MEM) ; 241 return EXT_VIRT_MEM;
242#endif 242#endif
243#else 243#else
244 return (0) ; 244 return 0;
245#endif 245#endif
246} 246}
247 247
@@ -271,7 +271,7 @@ int mac_drv_init(struct s_smc *smc)
271 if (!(smc->os.hwm.descr_p = (union s_fp_descr volatile *) 271 if (!(smc->os.hwm.descr_p = (union s_fp_descr volatile *)
272 mac_drv_get_desc_mem(smc,(u_int) 272 mac_drv_get_desc_mem(smc,(u_int)
273 (RXD_TXD_COUNT+1)*sizeof(struct s_smt_fp_txd)))) { 273 (RXD_TXD_COUNT+1)*sizeof(struct s_smt_fp_txd)))) {
274 return(1) ; /* no space the hwm modul can't work */ 274 return 1; /* no space the hwm modul can't work */
275 } 275 }
276 276
277 /* 277 /*
@@ -283,18 +283,18 @@ int mac_drv_init(struct s_smc *smc)
283#ifndef COMMON_MB_POOL 283#ifndef COMMON_MB_POOL
284 if (!(smc->os.hwm.mbuf_pool.mb_start = (SMbuf *) mac_drv_get_space(smc, 284 if (!(smc->os.hwm.mbuf_pool.mb_start = (SMbuf *) mac_drv_get_space(smc,
285 MAX_MBUF*sizeof(SMbuf)))) { 285 MAX_MBUF*sizeof(SMbuf)))) {
286 return(1) ; /* no space the hwm modul can't work */ 286 return 1; /* no space the hwm modul can't work */
287 } 287 }
288#else 288#else
289 if (!mb_start) { 289 if (!mb_start) {
290 if (!(mb_start = (SMbuf *) mac_drv_get_space(smc, 290 if (!(mb_start = (SMbuf *) mac_drv_get_space(smc,
291 MAX_MBUF*sizeof(SMbuf)))) { 291 MAX_MBUF*sizeof(SMbuf)))) {
292 return(1) ; /* no space the hwm modul can't work */ 292 return 1; /* no space the hwm modul can't work */
293 } 293 }
294 } 294 }
295#endif 295#endif
296#endif 296#endif
297 return (0) ; 297 return 0;
298} 298}
299 299
300/* 300/*
@@ -349,7 +349,7 @@ static u_long init_descr_ring(struct s_smc *smc,
349 DRV_BUF_FLUSH(&d1->r,DDI_DMA_SYNC_FORDEV) ; 349 DRV_BUF_FLUSH(&d1->r,DDI_DMA_SYNC_FORDEV) ;
350 d1++; 350 d1++;
351 } 351 }
352 return(phys) ; 352 return phys;
353} 353}
354 354
355static void init_txd_ring(struct s_smc *smc) 355static void init_txd_ring(struct s_smc *smc)
@@ -502,7 +502,7 @@ SMbuf *smt_get_mbuf(struct s_smc *smc)
502 mb->sm_use_count = 1 ; 502 mb->sm_use_count = 1 ;
503 } 503 }
504 DB_GEN("get SMbuf: mb = %x",(void *)mb,0,3) ; 504 DB_GEN("get SMbuf: mb = %x",(void *)mb,0,3) ;
505 return (mb) ; /* May be NULL */ 505 return mb; /* May be NULL */
506} 506}
507 507
508void smt_free_mbuf(struct s_smc *smc, SMbuf *mb) 508void smt_free_mbuf(struct s_smc *smc, SMbuf *mb)
@@ -621,7 +621,7 @@ static u_long repair_txd_ring(struct s_smc *smc, struct s_smt_tx_queue *queue)
621 t = t->txd_next ; 621 t = t->txd_next ;
622 tx_used-- ; 622 tx_used-- ;
623 } 623 }
624 return(phys) ; 624 return phys;
625} 625}
626 626
627/* 627/*
@@ -673,7 +673,7 @@ static u_long repair_rxd_ring(struct s_smc *smc, struct s_smt_rx_queue *queue)
673 r = r->rxd_next ; 673 r = r->rxd_next ;
674 rx_used-- ; 674 rx_used-- ;
675 } 675 }
676 return(phys) ; 676 return phys;
677} 677}
678 678
679 679
@@ -1595,7 +1595,7 @@ int hwm_tx_init(struct s_smc *smc, u_char fc, int frag_count, int frame_len,
1595 } 1595 }
1596 DB_TX("frame_status = %x",frame_status,0,3) ; 1596 DB_TX("frame_status = %x",frame_status,0,3) ;
1597 NDD_TRACE("THiE",frame_status,smc->os.hwm.tx_p->tx_free,0) ; 1597 NDD_TRACE("THiE",frame_status,smc->os.hwm.tx_p->tx_free,0) ;
1598 return(frame_status) ; 1598 return frame_status;
1599} 1599}
1600 1600
1601/* 1601/*
@@ -1764,7 +1764,7 @@ static SMbuf *get_llc_rx(struct s_smc *smc)
1764 smc->os.hwm.llc_rx_pipe = mb->sm_next ; 1764 smc->os.hwm.llc_rx_pipe = mb->sm_next ;
1765 } 1765 }
1766 DB_GEN("get_llc_rx: mb = 0x%x",(void *)mb,0,4) ; 1766 DB_GEN("get_llc_rx: mb = 0x%x",(void *)mb,0,4) ;
1767 return(mb) ; 1767 return mb;
1768} 1768}
1769 1769
1770/* 1770/*
@@ -1797,7 +1797,7 @@ static SMbuf *get_txd_mb(struct s_smc *smc)
1797 smc->os.hwm.txd_tx_pipe = mb->sm_next ; 1797 smc->os.hwm.txd_tx_pipe = mb->sm_next ;
1798 } 1798 }
1799 DB_GEN("get_txd_mb: mb = 0x%x",(void *)mb,0,4) ; 1799 DB_GEN("get_txd_mb: mb = 0x%x",(void *)mb,0,4) ;
1800 return(mb) ; 1800 return mb;
1801} 1801}
1802 1802
1803/* 1803/*
diff --git a/drivers/net/skfp/hwt.c b/drivers/net/skfp/hwt.c
index 053151468f93..e6baa53307c7 100644
--- a/drivers/net/skfp/hwt.c
+++ b/drivers/net/skfp/hwt.c
@@ -179,7 +179,7 @@ u_long hwt_read(struct s_smc *smc)
179 else 179 else
180 smc->hw.t_stop = smc->hw.t_start - tr ; 180 smc->hw.t_stop = smc->hw.t_start - tr ;
181 } 181 }
182 return (smc->hw.t_stop) ; 182 return smc->hw.t_stop;
183} 183}
184 184
185#ifdef PCI 185#ifdef PCI
@@ -208,7 +208,7 @@ u_long hwt_quick_read(struct s_smc *smc)
208 outpw(ADDR(B2_TI_CRTL), TIM_START) ; 208 outpw(ADDR(B2_TI_CRTL), TIM_START) ;
209 outpd(ADDR(B2_TI_INI),interval) ; 209 outpd(ADDR(B2_TI_INI),interval) ;
210 210
211 return(time) ; 211 return time;
212} 212}
213 213
214/************************ 214/************************
diff --git a/drivers/net/skfp/pcmplc.c b/drivers/net/skfp/pcmplc.c
index ba45bc794d77..112d35b1bf0e 100644
--- a/drivers/net/skfp/pcmplc.c
+++ b/drivers/net/skfp/pcmplc.c
@@ -504,7 +504,7 @@ int sm_pm_get_ls(struct s_smc *smc, int phy)
504 504
505#ifdef CONCENTRATOR 505#ifdef CONCENTRATOR
506 if (!plc_is_installed(smc,phy)) 506 if (!plc_is_installed(smc,phy))
507 return(PC_QLS) ; 507 return PC_QLS;
508#endif 508#endif
509 509
510 state = inpw(PLC(phy,PL_STATUS_A)) & PL_LINE_ST ; 510 state = inpw(PLC(phy,PL_STATUS_A)) & PL_LINE_ST ;
@@ -528,7 +528,7 @@ int sm_pm_get_ls(struct s_smc *smc, int phy)
528 default : 528 default :
529 state = PC_LS_NONE ; 529 state = PC_LS_NONE ;
530 } 530 }
531 return(state) ; 531 return state;
532} 532}
533 533
534static int plc_send_bits(struct s_smc *smc, struct s_phy *phy, int len) 534static int plc_send_bits(struct s_smc *smc, struct s_phy *phy, int len)
@@ -547,7 +547,7 @@ static int plc_send_bits(struct s_smc *smc, struct s_phy *phy, int len)
547#if 0 547#if 0
548 printf("PL_PCM_SIGNAL is set\n") ; 548 printf("PL_PCM_SIGNAL is set\n") ;
549#endif 549#endif
550 return(1) ; 550 return 1;
551 } 551 }
552 /* write bit[n] & length = 1 to regs */ 552 /* write bit[n] & length = 1 to regs */
553 outpw(PLC(np,PL_VECTOR_LEN),len-1) ; /* len=nr-1 */ 553 outpw(PLC(np,PL_VECTOR_LEN),len-1) ; /* len=nr-1 */
@@ -562,7 +562,7 @@ static int plc_send_bits(struct s_smc *smc, struct s_phy *phy, int len)
562 printf("SIGNALING bit %d .. %d\n",phy->bitn,phy->bitn+len-1) ; 562 printf("SIGNALING bit %d .. %d\n",phy->bitn,phy->bitn+len-1) ;
563#endif 563#endif
564#endif 564#endif
565 return(0) ; 565 return 0;
566} 566}
567 567
568/* 568/*
@@ -1590,12 +1590,12 @@ int pcm_status_twisted(struct s_smc *smc)
1590{ 1590{
1591 int twist = 0 ; 1591 int twist = 0 ;
1592 if (smc->s.sas != SMT_DAS) 1592 if (smc->s.sas != SMT_DAS)
1593 return(0) ; 1593 return 0;
1594 if (smc->y[PA].twisted && (smc->y[PA].mib->fddiPORTPCMState == PC8_ACTIVE)) 1594 if (smc->y[PA].twisted && (smc->y[PA].mib->fddiPORTPCMState == PC8_ACTIVE))
1595 twist |= 1 ; 1595 twist |= 1 ;
1596 if (smc->y[PB].twisted && (smc->y[PB].mib->fddiPORTPCMState == PC8_ACTIVE)) 1596 if (smc->y[PB].twisted && (smc->y[PB].mib->fddiPORTPCMState == PC8_ACTIVE))
1597 twist |= 2 ; 1597 twist |= 2 ;
1598 return(twist) ; 1598 return twist;
1599} 1599}
1600 1600
1601/* 1601/*
@@ -1636,9 +1636,9 @@ int pcm_rooted_station(struct s_smc *smc)
1636 for (n = 0 ; n < NUMPHYS ; n++) { 1636 for (n = 0 ; n < NUMPHYS ; n++) {
1637 if (smc->y[n].mib->fddiPORTPCMState == PC8_ACTIVE && 1637 if (smc->y[n].mib->fddiPORTPCMState == PC8_ACTIVE &&
1638 smc->y[n].mib->fddiPORTNeighborType == TM) 1638 smc->y[n].mib->fddiPORTNeighborType == TM)
1639 return(0) ; 1639 return 0;
1640 } 1640 }
1641 return(1) ; 1641 return 1;
1642} 1642}
1643 1643
1644/* 1644/*
@@ -1915,7 +1915,7 @@ int get_pcm_state(struct s_smc *smc, int np)
1915 case PL_PC9 : pcs = PC_MAINT ; break ; 1915 case PL_PC9 : pcs = PC_MAINT ; break ;
1916 default : pcs = PC_DISABLE ; break ; 1916 default : pcs = PC_DISABLE ; break ;
1917 } 1917 }
1918 return(pcs) ; 1918 return pcs;
1919} 1919}
1920 1920
1921char *get_linestate(struct s_smc *smc, int np) 1921char *get_linestate(struct s_smc *smc, int np)
@@ -1937,7 +1937,7 @@ char *get_linestate(struct s_smc *smc, int np)
1937 default: ls = "unknown" ; break ; 1937 default: ls = "unknown" ; break ;
1938#endif 1938#endif
1939 } 1939 }
1940 return(ls) ; 1940 return ls;
1941} 1941}
1942 1942
1943char *get_pcmstate(struct s_smc *smc, int np) 1943char *get_pcmstate(struct s_smc *smc, int np)
@@ -1959,7 +1959,7 @@ char *get_pcmstate(struct s_smc *smc, int np)
1959 case PL_PC9 : pcs = "MAINT" ; break ; 1959 case PL_PC9 : pcs = "MAINT" ; break ;
1960 default : pcs = "UNKNOWN" ; break ; 1960 default : pcs = "UNKNOWN" ; break ;
1961 } 1961 }
1962 return(pcs) ; 1962 return pcs;
1963} 1963}
1964 1964
1965void list_phy(struct s_smc *smc) 1965void list_phy(struct s_smc *smc)
diff --git a/drivers/net/skfp/pmf.c b/drivers/net/skfp/pmf.c
index a320fdb3727d..9ac4665d7411 100644
--- a/drivers/net/skfp/pmf.c
+++ b/drivers/net/skfp/pmf.c
@@ -328,7 +328,7 @@ static SMbuf *smt_build_pmf_response(struct s_smc *smc, struct smt_header *req,
328 * build SMT header 328 * build SMT header
329 */ 329 */
330 if (!(mb = smt_get_mbuf(smc))) 330 if (!(mb = smt_get_mbuf(smc)))
331 return(mb) ; 331 return mb;
332 332
333 smt = smtod(mb, struct smt_header *) ; 333 smt = smtod(mb, struct smt_header *) ;
334 smt->smt_dest = req->smt_source ; /* DA == source of request */ 334 smt->smt_dest = req->smt_source ; /* DA == source of request */
@@ -493,7 +493,7 @@ static SMbuf *smt_build_pmf_response(struct s_smc *smc, struct smt_header *req,
493 smt_add_para(smc,&set_pcon,(u_short) SMT_P1035,0,0) ; 493 smt_add_para(smc,&set_pcon,(u_short) SMT_P1035,0,0) ;
494 smt_add_para(smc,&set_pcon,(u_short) SMT_P1036,0,0) ; 494 smt_add_para(smc,&set_pcon,(u_short) SMT_P1036,0,0) ;
495 } 495 }
496 return(mb) ; 496 return mb;
497} 497}
498 498
499static int smt_authorize(struct s_smc *smc, struct smt_header *sm) 499static int smt_authorize(struct s_smc *smc, struct smt_header *sm)
@@ -511,7 +511,7 @@ static int smt_authorize(struct s_smc *smc, struct smt_header *sm)
511 if (i != 8) { 511 if (i != 8) {
512 if (memcmp((char *) &sm->smt_sid, 512 if (memcmp((char *) &sm->smt_sid,
513 (char *) &smc->mib.fddiPRPMFStation,8)) 513 (char *) &smc->mib.fddiPRPMFStation,8))
514 return(1) ; 514 return 1;
515 } 515 }
516 /* 516 /*
517 * check authoriziation parameter if passwd not zero 517 * check authoriziation parameter if passwd not zero
@@ -522,13 +522,13 @@ static int smt_authorize(struct s_smc *smc, struct smt_header *sm)
522 if (i != 8) { 522 if (i != 8) {
523 pa = (struct smt_para *) sm_to_para(smc,sm,SMT_P_AUTHOR) ; 523 pa = (struct smt_para *) sm_to_para(smc,sm,SMT_P_AUTHOR) ;
524 if (!pa) 524 if (!pa)
525 return(1) ; 525 return 1;
526 if (pa->p_len != 8) 526 if (pa->p_len != 8)
527 return(1) ; 527 return 1;
528 if (memcmp((char *)(pa+1),(char *)smc->mib.fddiPRPMFPasswd,8)) 528 if (memcmp((char *)(pa+1),(char *)smc->mib.fddiPRPMFPasswd,8))
529 return(1) ; 529 return 1;
530 } 530 }
531 return(0) ; 531 return 0;
532} 532}
533 533
534static int smt_check_set_count(struct s_smc *smc, struct smt_header *sm) 534static int smt_check_set_count(struct s_smc *smc, struct smt_header *sm)
@@ -542,9 +542,9 @@ static int smt_check_set_count(struct s_smc *smc, struct smt_header *sm)
542 if ((smc->mib.fddiSMTSetCount.count != sc->count) || 542 if ((smc->mib.fddiSMTSetCount.count != sc->count) ||
543 memcmp((char *) smc->mib.fddiSMTSetCount.timestamp, 543 memcmp((char *) smc->mib.fddiSMTSetCount.timestamp,
544 (char *)sc->timestamp,8)) 544 (char *)sc->timestamp,8))
545 return(1) ; 545 return 1;
546 } 546 }
547 return(0) ; 547 return 0;
548} 548}
549 549
550void smt_add_para(struct s_smc *smc, struct s_pcon *pcon, u_short para, 550void smt_add_para(struct s_smc *smc, struct s_pcon *pcon, u_short para,
@@ -1109,7 +1109,7 @@ static int smt_set_para(struct s_smc *smc, struct smt_para *pa, int index,
1109 break ; 1109 break ;
1110 case 0x2000 : 1110 case 0x2000 :
1111 if (mac < 0 || mac >= NUMMACS) { 1111 if (mac < 0 || mac >= NUMMACS) {
1112 return(SMT_RDF_NOPARAM) ; 1112 return SMT_RDF_NOPARAM;
1113 } 1113 }
1114 mib_m = &smc->mib.m[mac] ; 1114 mib_m = &smc->mib.m[mac] ;
1115 mib_addr = (char *) mib_m ; 1115 mib_addr = (char *) mib_m ;
@@ -1118,7 +1118,7 @@ static int smt_set_para(struct s_smc *smc, struct smt_para *pa, int index,
1118 break ; 1118 break ;
1119 case 0x3000 : 1119 case 0x3000 :
1120 if (path < 0 || path >= NUMPATHS) { 1120 if (path < 0 || path >= NUMPATHS) {
1121 return(SMT_RDF_NOPARAM) ; 1121 return SMT_RDF_NOPARAM;
1122 } 1122 }
1123 mib_a = &smc->mib.a[path] ; 1123 mib_a = &smc->mib.a[path] ;
1124 mib_addr = (char *) mib_a ; 1124 mib_addr = (char *) mib_a ;
@@ -1127,7 +1127,7 @@ static int smt_set_para(struct s_smc *smc, struct smt_para *pa, int index,
1127 break ; 1127 break ;
1128 case 0x4000 : 1128 case 0x4000 :
1129 if (port < 0 || port >= smt_mib_phys(smc)) { 1129 if (port < 0 || port >= smt_mib_phys(smc)) {
1130 return(SMT_RDF_NOPARAM) ; 1130 return SMT_RDF_NOPARAM;
1131 } 1131 }
1132 mib_p = &smc->mib.p[port_to_mib(smc,port)] ; 1132 mib_p = &smc->mib.p[port_to_mib(smc,port)] ;
1133 mib_addr = (char *) mib_p ; 1133 mib_addr = (char *) mib_p ;
@@ -1151,22 +1151,20 @@ static int smt_set_para(struct s_smc *smc, struct smt_para *pa, int index,
1151 case SMT_P10F9 : 1151 case SMT_P10F9 :
1152#endif 1152#endif
1153 case SMT_P20F1 : 1153 case SMT_P20F1 :
1154 if (!local) { 1154 if (!local)
1155 return(SMT_RDF_NOPARAM) ; 1155 return SMT_RDF_NOPARAM;
1156 }
1157 break ; 1156 break ;
1158 } 1157 }
1159 pt = smt_get_ptab(pa->p_type) ; 1158 pt = smt_get_ptab(pa->p_type) ;
1160 if (!pt) { 1159 if (!pt)
1161 return( (pa->p_type & 0xff00) ? SMT_RDF_NOPARAM : 1160 return (pa->p_type & 0xff00) ? SMT_RDF_NOPARAM :
1162 SMT_RDF_ILLEGAL ) ; 1161 SMT_RDF_ILLEGAL;
1163 }
1164 switch (pt->p_access) { 1162 switch (pt->p_access) {
1165 case AC_GR : 1163 case AC_GR :
1166 case AC_S : 1164 case AC_S :
1167 break ; 1165 break ;
1168 default : 1166 default :
1169 return(SMT_RDF_ILLEGAL) ; 1167 return SMT_RDF_ILLEGAL;
1170 } 1168 }
1171 to = mib_addr + pt->p_offset ; 1169 to = mib_addr + pt->p_offset ;
1172 swap = pt->p_swap ; /* pointer to swap string */ 1170 swap = pt->p_swap ; /* pointer to swap string */
@@ -1292,7 +1290,7 @@ static int smt_set_para(struct s_smc *smc, struct smt_para *pa, int index,
1292 break ; 1290 break ;
1293 default : 1291 default :
1294 SMT_PANIC(smc,SMT_E0120, SMT_E0120_MSG) ; 1292 SMT_PANIC(smc,SMT_E0120, SMT_E0120_MSG) ;
1295 return(SMT_RDF_ILLEGAL) ; 1293 return SMT_RDF_ILLEGAL;
1296 } 1294 }
1297 } 1295 }
1298 /* 1296 /*
@@ -1501,15 +1499,15 @@ change_mac_para:
1501 default : 1499 default :
1502 break ; 1500 break ;
1503 } 1501 }
1504 return(0) ; 1502 return 0;
1505 1503
1506val_error: 1504val_error:
1507 /* parameter value in frame is out of range */ 1505 /* parameter value in frame is out of range */
1508 return(SMT_RDF_RANGE) ; 1506 return SMT_RDF_RANGE;
1509 1507
1510len_error: 1508len_error:
1511 /* parameter value in frame is too short */ 1509 /* parameter value in frame is too short */
1512 return(SMT_RDF_LENGTH) ; 1510 return SMT_RDF_LENGTH;
1513 1511
1514#if 0 1512#if 0
1515no_author_error: 1513no_author_error:
@@ -1518,7 +1516,7 @@ no_author_error:
1518 * because SBA denied is not a valid return code in the 1516 * because SBA denied is not a valid return code in the
1519 * PMF protocol. 1517 * PMF protocol.
1520 */ 1518 */
1521 return(SMT_RDF_AUTHOR) ; 1519 return SMT_RDF_AUTHOR;
1522#endif 1520#endif
1523} 1521}
1524 1522
@@ -1527,7 +1525,7 @@ static const struct s_p_tab *smt_get_ptab(u_short para)
1527 const struct s_p_tab *pt ; 1525 const struct s_p_tab *pt ;
1528 for (pt = p_tab ; pt->p_num && pt->p_num != para ; pt++) 1526 for (pt = p_tab ; pt->p_num && pt->p_num != para ; pt++)
1529 ; 1527 ;
1530 return(pt->p_num ? pt : NULL) ; 1528 return pt->p_num ? pt : NULL;
1531} 1529}
1532 1530
1533static int smt_mib_phys(struct s_smc *smc) 1531static int smt_mib_phys(struct s_smc *smc)
@@ -1535,11 +1533,11 @@ static int smt_mib_phys(struct s_smc *smc)
1535#ifdef CONCENTRATOR 1533#ifdef CONCENTRATOR
1536 SK_UNUSED(smc) ; 1534 SK_UNUSED(smc) ;
1537 1535
1538 return(NUMPHYS) ; 1536 return NUMPHYS;
1539#else 1537#else
1540 if (smc->s.sas == SMT_SAS) 1538 if (smc->s.sas == SMT_SAS)
1541 return(1) ; 1539 return 1;
1542 return(NUMPHYS) ; 1540 return NUMPHYS;
1543#endif 1541#endif
1544} 1542}
1545 1543
@@ -1548,11 +1546,11 @@ static int port_to_mib(struct s_smc *smc, int p)
1548#ifdef CONCENTRATOR 1546#ifdef CONCENTRATOR
1549 SK_UNUSED(smc) ; 1547 SK_UNUSED(smc) ;
1550 1548
1551 return(p) ; 1549 return p;
1552#else 1550#else
1553 if (smc->s.sas == SMT_SAS) 1551 if (smc->s.sas == SMT_SAS)
1554 return(PS) ; 1552 return PS;
1555 return(p) ; 1553 return p;
1556#endif 1554#endif
1557} 1555}
1558 1556
diff --git a/drivers/net/skfp/queue.c b/drivers/net/skfp/queue.c
index 09adb3d68b7c..c1a0df455a59 100644
--- a/drivers/net/skfp/queue.c
+++ b/drivers/net/skfp/queue.c
@@ -128,7 +128,7 @@ u_short smt_online(struct s_smc *smc, int on)
128{ 128{
129 queue_event(smc,EVENT_ECM,on ? EC_CONNECT : EC_DISCONNECT) ; 129 queue_event(smc,EVENT_ECM,on ? EC_CONNECT : EC_DISCONNECT) ;
130 ev_dispatcher(smc) ; 130 ev_dispatcher(smc) ;
131 return(smc->mib.fddiSMTCF_State) ; 131 return smc->mib.fddiSMTCF_State;
132} 132}
133 133
134/* 134/*
diff --git a/drivers/net/skfp/skfddi.c b/drivers/net/skfp/skfddi.c
index 8a12bd9d28ba..ba2e8339fe90 100644
--- a/drivers/net/skfp/skfddi.c
+++ b/drivers/net/skfp/skfddi.c
@@ -440,7 +440,7 @@ static int skfp_driver_init(struct net_device *dev)
440 440
441 smt_reset_defaults(smc, 0); 441 smt_reset_defaults(smc, 0);
442 442
443 return (0); 443 return 0;
444 444
445fail: 445fail:
446 if (bp->SharedMemAddr) { 446 if (bp->SharedMemAddr) {
@@ -516,7 +516,7 @@ static int skfp_open(struct net_device *dev)
516 mac_drv_rx_mode(smc, RX_DISABLE_PROMISC); 516 mac_drv_rx_mode(smc, RX_DISABLE_PROMISC);
517 517
518 netif_start_queue(dev); 518 netif_start_queue(dev);
519 return (0); 519 return 0;
520} // skfp_open 520} // skfp_open
521 521
522 522
@@ -565,7 +565,7 @@ static int skfp_close(struct net_device *dev)
565 skb_queue_purge(&bp->SendSkbQueue); 565 skb_queue_purge(&bp->SendSkbQueue);
566 bp->QueueSkb = MAX_TX_QUEUE_LEN; 566 bp->QueueSkb = MAX_TX_QUEUE_LEN;
567 567
568 return (0); 568 return 0;
569} // skfp_close 569} // skfp_close
570 570
571 571
@@ -794,7 +794,7 @@ static struct net_device_stats *skfp_ctl_get_stats(struct net_device *dev)
794 bp->stats.port_lem_cts[1] = bp->cmd_rsp_virt->cntrs_get.cntrs.link_errors[1].ls; 794 bp->stats.port_lem_cts[1] = bp->cmd_rsp_virt->cntrs_get.cntrs.link_errors[1].ls;
795 795
796#endif 796#endif
797 return ((struct net_device_stats *) &bp->os.MacStat); 797 return (struct net_device_stats *)&bp->os.MacStat;
798} // ctl_get_stat 798} // ctl_get_stat
799 799
800 800
@@ -932,7 +932,7 @@ static int skfp_ctl_set_mac_address(struct net_device *dev, void *addr)
932 ResetAdapter(smc); 932 ResetAdapter(smc);
933 spin_unlock_irqrestore(&bp->DriverLock, Flags); 933 spin_unlock_irqrestore(&bp->DriverLock, Flags);
934 934
935 return (0); /* always return zero */ 935 return 0; /* always return zero */
936} // skfp_ctl_set_mac_address 936} // skfp_ctl_set_mac_address
937 937
938 938
@@ -1313,7 +1313,7 @@ void *mac_drv_get_space(struct s_smc *smc, unsigned int size)
1313 1313
1314 if ((smc->os.SharedMemHeap + size) > smc->os.SharedMemSize) { 1314 if ((smc->os.SharedMemHeap + size) > smc->os.SharedMemSize) {
1315 printk("Unexpected SMT memory size requested: %d\n", size); 1315 printk("Unexpected SMT memory size requested: %d\n", size);
1316 return (NULL); 1316 return NULL;
1317 } 1317 }
1318 smc->os.SharedMemHeap += size; // Move heap pointer. 1318 smc->os.SharedMemHeap += size; // Move heap pointer.
1319 1319
@@ -1322,7 +1322,7 @@ void *mac_drv_get_space(struct s_smc *smc, unsigned int size)
1322 pr_debug("bus addr: %lx\n", (ulong) 1322 pr_debug("bus addr: %lx\n", (ulong)
1323 (smc->os.SharedMemDMA + 1323 (smc->os.SharedMemDMA +
1324 ((char *) virt - (char *)smc->os.SharedMemAddr))); 1324 ((char *) virt - (char *)smc->os.SharedMemAddr)));
1325 return (virt); 1325 return virt;
1326} // mac_drv_get_space 1326} // mac_drv_get_space
1327 1327
1328 1328
@@ -1363,9 +1363,9 @@ void *mac_drv_get_desc_mem(struct s_smc *smc, unsigned int size)
1363 1363
1364 if (!mac_drv_get_space(smc, size)) { 1364 if (!mac_drv_get_space(smc, size)) {
1365 printk("fddi: Unable to align descriptor memory.\n"); 1365 printk("fddi: Unable to align descriptor memory.\n");
1366 return (NULL); 1366 return NULL;
1367 } 1367 }
1368 return (virt + size); 1368 return virt + size;
1369} // mac_drv_get_desc_mem 1369} // mac_drv_get_desc_mem
1370 1370
1371 1371
@@ -1384,8 +1384,8 @@ void *mac_drv_get_desc_mem(struct s_smc *smc, unsigned int size)
1384 ************************/ 1384 ************************/
1385unsigned long mac_drv_virt2phys(struct s_smc *smc, void *virt) 1385unsigned long mac_drv_virt2phys(struct s_smc *smc, void *virt)
1386{ 1386{
1387 return (smc->os.SharedMemDMA + 1387 return smc->os.SharedMemDMA +
1388 ((char *) virt - (char *)smc->os.SharedMemAddr)); 1388 ((char *) virt - (char *)smc->os.SharedMemAddr);
1389} // mac_drv_virt2phys 1389} // mac_drv_virt2phys
1390 1390
1391 1391
@@ -1419,8 +1419,8 @@ unsigned long mac_drv_virt2phys(struct s_smc *smc, void *virt)
1419 ************************/ 1419 ************************/
1420u_long dma_master(struct s_smc * smc, void *virt, int len, int flag) 1420u_long dma_master(struct s_smc * smc, void *virt, int len, int flag)
1421{ 1421{
1422 return (smc->os.SharedMemDMA + 1422 return smc->os.SharedMemDMA +
1423 ((char *) virt - (char *)smc->os.SharedMemAddr)); 1423 ((char *) virt - (char *)smc->os.SharedMemAddr);
1424} // dma_master 1424} // dma_master
1425 1425
1426 1426
@@ -1904,12 +1904,12 @@ int mac_drv_rx_init(struct s_smc *smc, int len, int fc,
1904 pr_debug("fddi: Discard invalid local SMT frame\n"); 1904 pr_debug("fddi: Discard invalid local SMT frame\n");
1905 pr_debug(" len=%d, la_len=%d, (ULONG) look_ahead=%08lXh.\n", 1905 pr_debug(" len=%d, la_len=%d, (ULONG) look_ahead=%08lXh.\n",
1906 len, la_len, (unsigned long) look_ahead); 1906 len, la_len, (unsigned long) look_ahead);
1907 return (0); 1907 return 0;
1908 } 1908 }
1909 skb = alloc_skb(len + 3, GFP_ATOMIC); 1909 skb = alloc_skb(len + 3, GFP_ATOMIC);
1910 if (!skb) { 1910 if (!skb) {
1911 pr_debug("fddi: Local SMT: skb memory exhausted.\n"); 1911 pr_debug("fddi: Local SMT: skb memory exhausted.\n");
1912 return (0); 1912 return 0;
1913 } 1913 }
1914 skb_reserve(skb, 3); 1914 skb_reserve(skb, 3);
1915 skb_put(skb, len); 1915 skb_put(skb, len);
@@ -1919,7 +1919,7 @@ int mac_drv_rx_init(struct s_smc *smc, int len, int fc,
1919 skb->protocol = fddi_type_trans(skb, smc->os.dev); 1919 skb->protocol = fddi_type_trans(skb, smc->os.dev);
1920 netif_rx(skb); 1920 netif_rx(skb);
1921 1921
1922 return (0); 1922 return 0;
1923} // mac_drv_rx_init 1923} // mac_drv_rx_init
1924 1924
1925 1925
diff --git a/drivers/net/skfp/smt.c b/drivers/net/skfp/smt.c
index 6f35bb77595f..2d9941c045bc 100644
--- a/drivers/net/skfp/smt.c
+++ b/drivers/net/skfp/smt.c
@@ -127,22 +127,22 @@ static inline int is_my_addr(const struct s_smc *smc,
127 127
128static inline int is_broadcast(const struct fddi_addr *addr) 128static inline int is_broadcast(const struct fddi_addr *addr)
129{ 129{
130 return(*(u_short *)(&addr->a[0]) == 0xffff && 130 return *(u_short *)(&addr->a[0]) == 0xffff &&
131 *(u_short *)(&addr->a[2]) == 0xffff && 131 *(u_short *)(&addr->a[2]) == 0xffff &&
132 *(u_short *)(&addr->a[4]) == 0xffff ) ; 132 *(u_short *)(&addr->a[4]) == 0xffff;
133} 133}
134 134
135static inline int is_individual(const struct fddi_addr *addr) 135static inline int is_individual(const struct fddi_addr *addr)
136{ 136{
137 return(!(addr->a[0] & GROUP_ADDR)) ; 137 return !(addr->a[0] & GROUP_ADDR);
138} 138}
139 139
140static inline int is_equal(const struct fddi_addr *addr1, 140static inline int is_equal(const struct fddi_addr *addr1,
141 const struct fddi_addr *addr2) 141 const struct fddi_addr *addr2)
142{ 142{
143 return(*(u_short *)(&addr1->a[0]) == *(u_short *)(&addr2->a[0]) && 143 return *(u_short *)(&addr1->a[0]) == *(u_short *)(&addr2->a[0]) &&
144 *(u_short *)(&addr1->a[2]) == *(u_short *)(&addr2->a[2]) && 144 *(u_short *)(&addr1->a[2]) == *(u_short *)(&addr2->a[2]) &&
145 *(u_short *)(&addr1->a[4]) == *(u_short *)(&addr2->a[4]) ) ; 145 *(u_short *)(&addr1->a[4]) == *(u_short *)(&addr2->a[4]);
146} 146}
147 147
148/* 148/*
@@ -457,8 +457,8 @@ static int div_ratio(u_long upper, u_long lower)
457 else 457 else
458 upper <<= 16L ; 458 upper <<= 16L ;
459 if (!lower) 459 if (!lower)
460 return(0) ; 460 return 0;
461 return((int)(upper/lower)) ; 461 return (int)(upper/lower) ;
462} 462}
463 463
464#ifndef SLIM_SMT 464#ifndef SLIM_SMT
@@ -1111,11 +1111,11 @@ SMbuf *smt_build_frame(struct s_smc *smc, int class, int type,
1111 1111
1112#if 0 1112#if 0
1113 if (!smc->r.sm_ma_avail) { 1113 if (!smc->r.sm_ma_avail) {
1114 return(0) ; 1114 return 0;
1115 } 1115 }
1116#endif 1116#endif
1117 if (!(mb = smt_get_mbuf(smc))) 1117 if (!(mb = smt_get_mbuf(smc)))
1118 return(mb) ; 1118 return mb;
1119 1119
1120 mb->sm_len = length ; 1120 mb->sm_len = length ;
1121 smt = smtod(mb, struct smt_header *) ; 1121 smt = smtod(mb, struct smt_header *) ;
@@ -1136,7 +1136,7 @@ SMbuf *smt_build_frame(struct s_smc *smc, int class, int type,
1136 smt->smt_tid = smt_get_tid(smc) ; /* set transaction ID */ 1136 smt->smt_tid = smt_get_tid(smc) ; /* set transaction ID */
1137 smt->smt_pad = 0 ; 1137 smt->smt_pad = 0 ;
1138 smt->smt_len = length - sizeof(struct smt_header) ; 1138 smt->smt_len = length - sizeof(struct smt_header) ;
1139 return(mb) ; 1139 return mb;
1140} 1140}
1141 1141
1142static void smt_add_frame_len(SMbuf *mb, int len) 1142static void smt_add_frame_len(SMbuf *mb, int len)
@@ -1375,7 +1375,7 @@ static int smt_fill_path(struct s_smc *smc, struct smt_p_path *path)
1375 pd_mac = (struct smt_mac_rec *) phy ; 1375 pd_mac = (struct smt_mac_rec *) phy ;
1376 pd_mac->mac_addr = smc->mib.m[MAC0].fddiMACSMTAddress ; 1376 pd_mac->mac_addr = smc->mib.m[MAC0].fddiMACSMTAddress ;
1377 pd_mac->mac_resource_idx = mac_con_resource_index(smc,1) ; 1377 pd_mac->mac_resource_idx = mac_con_resource_index(smc,1) ;
1378 return(len) ; 1378 return len;
1379} 1379}
1380 1380
1381/* 1381/*
@@ -1563,7 +1563,7 @@ u_long smt_get_tid(struct s_smc *smc)
1563 u_long tid ; 1563 u_long tid ;
1564 while ((tid = ++(smc->sm.smt_tid) ^ SMT_TID_MAGIC) == 0) 1564 while ((tid = ++(smc->sm.smt_tid) ^ SMT_TID_MAGIC) == 0)
1565 ; 1565 ;
1566 return(tid & 0x3fffffffL) ; 1566 return tid & 0x3fffffffL;
1567} 1567}
1568 1568
1569 1569
@@ -1654,11 +1654,11 @@ int smt_check_para(struct s_smc *smc, struct smt_header *sm,
1654 while (*p) { 1654 while (*p) {
1655 if (!sm_to_para(smc,sm,(int) *p)) { 1655 if (!sm_to_para(smc,sm,(int) *p)) {
1656 DB_SMT("SMT: smt_check_para - missing para %x\n",*p,0); 1656 DB_SMT("SMT: smt_check_para - missing para %x\n",*p,0);
1657 return(-1) ; 1657 return -1;
1658 } 1658 }
1659 p++ ; 1659 p++ ;
1660 } 1660 }
1661 return(0) ; 1661 return 0;
1662} 1662}
1663 1663
1664void *sm_to_para(struct s_smc *smc, struct smt_header *sm, int para) 1664void *sm_to_para(struct s_smc *smc, struct smt_header *sm, int para)
@@ -1687,7 +1687,7 @@ void *sm_to_para(struct s_smc *smc, struct smt_header *sm, int para)
1687 return NULL; 1687 return NULL;
1688 } 1688 }
1689 if (found) 1689 if (found)
1690 return(found) ; 1690 return found;
1691 } 1691 }
1692 return NULL; 1692 return NULL;
1693} 1693}
@@ -1732,7 +1732,7 @@ char *addr_to_string(struct fddi_addr *addr)
1732 string[i * 3 + 2] = ':'; 1732 string[i * 3 + 2] = ':';
1733 } 1733 }
1734 string[5 * 3 + 2] = 0; 1734 string[5 * 3 + 2] = 0;
1735 return(string); 1735 return string;
1736} 1736}
1737#endif 1737#endif
1738 1738
@@ -1742,9 +1742,9 @@ int smt_ifconfig(int argc, char *argv[])
1742 if (argc >= 2 && !strcmp(argv[0],"opt_bypass") && 1742 if (argc >= 2 && !strcmp(argv[0],"opt_bypass") &&
1743 !strcmp(argv[1],"yes")) { 1743 !strcmp(argv[1],"yes")) {
1744 smc->mib.fddiSMTBypassPresent = 1 ; 1744 smc->mib.fddiSMTBypassPresent = 1 ;
1745 return(0) ; 1745 return 0;
1746 } 1746 }
1747 return(amdfddi_config(0,argc,argv)) ; 1747 return amdfddi_config(0, argc, argv);
1748} 1748}
1749#endif 1749#endif
1750 1750
@@ -1756,9 +1756,9 @@ static int mac_index(struct s_smc *smc, int mac)
1756 SK_UNUSED(mac) ; 1756 SK_UNUSED(mac) ;
1757#ifdef CONCENTRATOR 1757#ifdef CONCENTRATOR
1758 SK_UNUSED(smc) ; 1758 SK_UNUSED(smc) ;
1759 return(NUMPHYS+1) ; 1759 return NUMPHYS + 1;
1760#else 1760#else
1761 return((smc->s.sas == SMT_SAS) ? 2 : 3) ; 1761 return (smc->s.sas == SMT_SAS) ? 2 : 3;
1762#endif 1762#endif
1763} 1763}
1764 1764
@@ -1768,7 +1768,7 @@ static int mac_index(struct s_smc *smc, int mac)
1768static int phy_index(struct s_smc *smc, int phy) 1768static int phy_index(struct s_smc *smc, int phy)
1769{ 1769{
1770 SK_UNUSED(smc) ; 1770 SK_UNUSED(smc) ;
1771 return(phy+1); 1771 return phy + 1;
1772} 1772}
1773 1773
1774/* 1774/*
@@ -1779,19 +1779,19 @@ static int mac_con_resource_index(struct s_smc *smc, int mac)
1779#ifdef CONCENTRATOR 1779#ifdef CONCENTRATOR
1780 SK_UNUSED(smc) ; 1780 SK_UNUSED(smc) ;
1781 SK_UNUSED(mac) ; 1781 SK_UNUSED(mac) ;
1782 return(entity_to_index(smc,cem_get_downstream(smc,ENTITY_MAC))) ; 1782 return entity_to_index(smc, cem_get_downstream(smc, ENTITY_MAC));
1783#else 1783#else
1784 SK_UNUSED(mac) ; 1784 SK_UNUSED(mac) ;
1785 switch (smc->mib.fddiSMTCF_State) { 1785 switch (smc->mib.fddiSMTCF_State) {
1786 case SC9_C_WRAP_A : 1786 case SC9_C_WRAP_A :
1787 case SC5_THRU_B : 1787 case SC5_THRU_B :
1788 case SC11_C_WRAP_S : 1788 case SC11_C_WRAP_S :
1789 return(1) ; 1789 return 1;
1790 case SC10_C_WRAP_B : 1790 case SC10_C_WRAP_B :
1791 case SC4_THRU_A : 1791 case SC4_THRU_A :
1792 return(2) ; 1792 return 2;
1793 } 1793 }
1794 return(smc->s.sas == SMT_SAS ? 2 : 3) ; 1794 return smc->s.sas == SMT_SAS ? 2 : 3;
1795#endif 1795#endif
1796} 1796}
1797 1797
@@ -1801,21 +1801,21 @@ static int mac_con_resource_index(struct s_smc *smc, int mac)
1801static int phy_con_resource_index(struct s_smc *smc, int phy) 1801static int phy_con_resource_index(struct s_smc *smc, int phy)
1802{ 1802{
1803#ifdef CONCENTRATOR 1803#ifdef CONCENTRATOR
1804 return(entity_to_index(smc,cem_get_downstream(smc,ENTITY_PHY(phy)))) ; 1804 return entity_to_index(smc, cem_get_downstream(smc, ENTITY_PHY(phy))) ;
1805#else 1805#else
1806 switch (smc->mib.fddiSMTCF_State) { 1806 switch (smc->mib.fddiSMTCF_State) {
1807 case SC9_C_WRAP_A : 1807 case SC9_C_WRAP_A :
1808 return(phy == PA ? 3 : 2) ; 1808 return phy == PA ? 3 : 2;
1809 case SC10_C_WRAP_B : 1809 case SC10_C_WRAP_B :
1810 return(phy == PA ? 1 : 3) ; 1810 return phy == PA ? 1 : 3;
1811 case SC4_THRU_A : 1811 case SC4_THRU_A :
1812 return(phy == PA ? 3 : 1) ; 1812 return phy == PA ? 3 : 1;
1813 case SC5_THRU_B : 1813 case SC5_THRU_B :
1814 return(phy == PA ? 2 : 3) ; 1814 return phy == PA ? 2 : 3;
1815 case SC11_C_WRAP_S : 1815 case SC11_C_WRAP_S :
1816 return(2) ; 1816 return 2;
1817 } 1817 }
1818 return(phy) ; 1818 return phy;
1819#endif 1819#endif
1820} 1820}
1821 1821
@@ -1823,16 +1823,16 @@ static int phy_con_resource_index(struct s_smc *smc, int phy)
1823static int entity_to_index(struct s_smc *smc, int e) 1823static int entity_to_index(struct s_smc *smc, int e)
1824{ 1824{
1825 if (e == ENTITY_MAC) 1825 if (e == ENTITY_MAC)
1826 return(mac_index(smc,1)) ; 1826 return mac_index(smc, 1);
1827 else 1827 else
1828 return(phy_index(smc,e - ENTITY_PHY(0))) ; 1828 return phy_index(smc, e - ENTITY_PHY(0));
1829} 1829}
1830#endif 1830#endif
1831 1831
1832#ifdef LITTLE_ENDIAN 1832#ifdef LITTLE_ENDIAN
1833static int smt_swap_short(u_short s) 1833static int smt_swap_short(u_short s)
1834{ 1834{
1835 return(((s>>8)&0xff)|((s&0xff)<<8)) ; 1835 return ((s>>8)&0xff) | ((s&0xff)<<8);
1836} 1836}
1837 1837
1838void smt_swap_para(struct smt_header *sm, int len, int direction) 1838void smt_swap_para(struct smt_header *sm, int len, int direction)
@@ -1996,7 +1996,7 @@ int smt_action(struct s_smc *smc, int class, int code, int index)
1996 } 1996 }
1997 break ; 1997 break ;
1998 default : 1998 default :
1999 return(1) ; 1999 return 1;
2000 } 2000 }
2001 break ; 2001 break ;
2002 case SMT_PORT_ACTION : 2002 case SMT_PORT_ACTION :
@@ -2017,14 +2017,14 @@ int smt_action(struct s_smc *smc, int class, int code, int index)
2017 event = PC_STOP ; 2017 event = PC_STOP ;
2018 break ; 2018 break ;
2019 default : 2019 default :
2020 return(1) ; 2020 return 1;
2021 } 2021 }
2022 queue_event(smc,EVENT_PCM+index,event) ; 2022 queue_event(smc,EVENT_PCM+index,event) ;
2023 break ; 2023 break ;
2024 default : 2024 default :
2025 return(1) ; 2025 return 1;
2026 } 2026 }
2027 return(0) ; 2027 return 0;
2028} 2028}
2029 2029
2030/* 2030/*
diff --git a/drivers/net/skfp/smtdef.c b/drivers/net/skfp/smtdef.c
index 4e07ff7073f1..1acab0b368e3 100644
--- a/drivers/net/skfp/smtdef.c
+++ b/drivers/net/skfp/smtdef.c
@@ -303,7 +303,7 @@ int smt_set_mac_opvalues(struct s_smc *smc)
303 FDDI_SMT_EVENT, (u_long) FDDI_REMOTE_T_REQ, 303 FDDI_SMT_EVENT, (u_long) FDDI_REMOTE_T_REQ,
304 smt_get_event_word(smc)); 304 smt_get_event_word(smc));
305 } 305 }
306 return(st) ; 306 return st;
307} 307}
308 308
309void smt_fixup_mib(struct s_smc *smc) 309void smt_fixup_mib(struct s_smc *smc)
@@ -350,6 +350,6 @@ static int set_min_max(int maxflag, u_long mib, u_long limit, u_long *oper)
350 *oper = limit ; 350 *oper = limit ;
351 else 351 else
352 *oper = mib ; 352 *oper = mib ;
353 return(old != *oper) ; 353 return old != *oper;
354} 354}
355 355
diff --git a/drivers/net/skfp/smtinit.c b/drivers/net/skfp/smtinit.c
index 3c8964ce1837..e3a0c0bc2233 100644
--- a/drivers/net/skfp/smtinit.c
+++ b/drivers/net/skfp/smtinit.c
@@ -120,6 +120,6 @@ int init_smt(struct s_smc *smc, u_char *mac_addr)
120 120
121 PNMI_INIT(smc) ; /* PNMI initialization */ 121 PNMI_INIT(smc) ; /* PNMI initialization */
122 122
123 return(0) ; 123 return 0;
124} 124}
125 125
diff --git a/drivers/net/skfp/srf.c b/drivers/net/skfp/srf.c
index 40882b3faba6..f6f7baf9f27a 100644
--- a/drivers/net/skfp/srf.c
+++ b/drivers/net/skfp/srf.c
@@ -165,7 +165,7 @@ static struct s_srf_evc *smt_get_evc(struct s_smc *smc, int code, int index)
165 165
166 for (i = 0, evc = smc->evcs ; (unsigned) i < MAX_EVCS ; i++, evc++) { 166 for (i = 0, evc = smc->evcs ; (unsigned) i < MAX_EVCS ; i++, evc++) {
167 if (evc->evc_code == code && evc->evc_index == index) 167 if (evc->evc_code == code && evc->evc_index == index)
168 return(evc) ; 168 return evc;
169 } 169 }
170 return NULL; 170 return NULL;
171} 171}
diff --git a/drivers/net/skge.c b/drivers/net/skge.c
index a8a63581d63d..bfec2e0f5275 100644
--- a/drivers/net/skge.c
+++ b/drivers/net/skge.c
@@ -43,6 +43,7 @@
43#include <linux/seq_file.h> 43#include <linux/seq_file.h>
44#include <linux/mii.h> 44#include <linux/mii.h>
45#include <linux/slab.h> 45#include <linux/slab.h>
46#include <linux/dmi.h>
46#include <asm/irq.h> 47#include <asm/irq.h>
47 48
48#include "skge.h" 49#include "skge.h"
@@ -3869,6 +3870,8 @@ static void __devinit skge_show_addr(struct net_device *dev)
3869 netif_info(skge, probe, skge->netdev, "addr %pM\n", dev->dev_addr); 3870 netif_info(skge, probe, skge->netdev, "addr %pM\n", dev->dev_addr);
3870} 3871}
3871 3872
3873static int only_32bit_dma;
3874
3872static int __devinit skge_probe(struct pci_dev *pdev, 3875static int __devinit skge_probe(struct pci_dev *pdev,
3873 const struct pci_device_id *ent) 3876 const struct pci_device_id *ent)
3874{ 3877{
@@ -3890,7 +3893,7 @@ static int __devinit skge_probe(struct pci_dev *pdev,
3890 3893
3891 pci_set_master(pdev); 3894 pci_set_master(pdev);
3892 3895
3893 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { 3896 if (!only_32bit_dma && !pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
3894 using_dac = 1; 3897 using_dac = 1;
3895 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); 3898 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
3896 } else if (!(err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))) { 3899 } else if (!(err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))) {
@@ -4148,8 +4151,21 @@ static struct pci_driver skge_driver = {
4148 .shutdown = skge_shutdown, 4151 .shutdown = skge_shutdown,
4149}; 4152};
4150 4153
4154static struct dmi_system_id skge_32bit_dma_boards[] = {
4155 {
4156 .ident = "Gigabyte nForce boards",
4157 .matches = {
4158 DMI_MATCH(DMI_BOARD_VENDOR, "Gigabyte Technology Co"),
4159 DMI_MATCH(DMI_BOARD_NAME, "nForce"),
4160 },
4161 },
4162 {}
4163};
4164
4151static int __init skge_init_module(void) 4165static int __init skge_init_module(void)
4152{ 4166{
4167 if (dmi_check_system(skge_32bit_dma_boards))
4168 only_32bit_dma = 1;
4153 skge_debug_init(); 4169 skge_debug_init();
4154 return pci_register_driver(&skge_driver); 4170 return pci_register_driver(&skge_driver);
4155} 4171}
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c
index 194e5cf8c763..d6577084ce70 100644
--- a/drivers/net/sky2.c
+++ b/drivers/net/sky2.c
@@ -1782,7 +1782,7 @@ static netdev_tx_t sky2_xmit_frame(struct sk_buff *skb,
1782 ctrl = 0; 1782 ctrl = 0;
1783#ifdef SKY2_VLAN_TAG_USED 1783#ifdef SKY2_VLAN_TAG_USED
1784 /* Add VLAN tag, can piggyback on LRGLEN or ADDR64 */ 1784 /* Add VLAN tag, can piggyback on LRGLEN or ADDR64 */
1785 if (sky2->vlgrp && vlan_tx_tag_present(skb)) { 1785 if (vlan_tx_tag_present(skb)) {
1786 if (!le) { 1786 if (!le) {
1787 le = get_tx_le(sky2, &slot); 1787 le = get_tx_le(sky2, &slot);
1788 le->addr = 0; 1788 le->addr = 0;
@@ -4581,7 +4581,8 @@ static __devinit struct net_device *sky2_init_netdev(struct sky2_hw *hw,
4581 4581
4582 sky2->port = port; 4582 sky2->port = port;
4583 4583
4584 dev->features |= NETIF_F_TSO | NETIF_F_IP_CSUM | NETIF_F_SG; 4584 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG
4585 | NETIF_F_TSO | NETIF_F_GRO;
4585 if (highmem) 4586 if (highmem)
4586 dev->features |= NETIF_F_HIGHDMA; 4587 dev->features |= NETIF_F_HIGHDMA;
4587 4588
diff --git a/drivers/net/slip.c b/drivers/net/slip.c
index 38547a8938fe..86cbb9ea2f26 100644
--- a/drivers/net/slip.c
+++ b/drivers/net/slip.c
@@ -944,7 +944,7 @@ static int slip_esc(unsigned char *s, unsigned char *d, int len)
944 } 944 }
945 } 945 }
946 *ptr++ = END; 946 *ptr++ = END;
947 return (ptr - d); 947 return ptr - d;
948} 948}
949 949
950static void slip_unesc(struct slip *sl, unsigned char s) 950static void slip_unesc(struct slip *sl, unsigned char s)
diff --git a/drivers/net/smsc911x.c b/drivers/net/smsc911x.c
index 13ddcd487200..a8e5856ce882 100644
--- a/drivers/net/smsc911x.c
+++ b/drivers/net/smsc911x.c
@@ -58,6 +58,7 @@
58 58
59MODULE_LICENSE("GPL"); 59MODULE_LICENSE("GPL");
60MODULE_VERSION(SMSC_DRV_VERSION); 60MODULE_VERSION(SMSC_DRV_VERSION);
61MODULE_ALIAS("platform:smsc911x");
61 62
62#if USE_DEBUG > 0 63#if USE_DEBUG > 0
63static int debug = 16; 64static int debug = 16;
diff --git a/drivers/net/stmmac/Kconfig b/drivers/net/stmmac/Kconfig
index 3c2af7c6a39b..7df7df4e79c5 100644
--- a/drivers/net/stmmac/Kconfig
+++ b/drivers/net/stmmac/Kconfig
@@ -3,7 +3,7 @@ config STMMAC_ETH
3 select MII 3 select MII
4 select PHYLIB 4 select PHYLIB
5 select CRC32 5 select CRC32
6 depends on NETDEVICES 6 depends on NETDEVICES && HAS_IOMEM
7 help 7 help
8 This is the driver for the Ethernet IPs are built around a 8 This is the driver for the Ethernet IPs are built around a
9 Synopsys IP Core and only tested on the STMicroelectronics 9 Synopsys IP Core and only tested on the STMicroelectronics
diff --git a/drivers/net/stmmac/common.h b/drivers/net/stmmac/common.h
index e8cbcb5c206e..375ea193e139 100644
--- a/drivers/net/stmmac/common.h
+++ b/drivers/net/stmmac/common.h
@@ -102,8 +102,6 @@ struct stmmac_extra_stats {
102 102
103#define SF_DMA_MODE 1 /* DMA STORE-AND-FORWARD Operation Mode */ 103#define SF_DMA_MODE 1 /* DMA STORE-AND-FORWARD Operation Mode */
104 104
105#define HW_CSUM 1
106#define NO_HW_CSUM 0
107enum rx_frame_status { /* IPC status */ 105enum rx_frame_status { /* IPC status */
108 good_frame = 0, 106 good_frame = 0,
109 discard_frame = 1, 107 discard_frame = 1,
@@ -205,6 +203,8 @@ struct stmmac_dma_ops {
205struct stmmac_ops { 203struct stmmac_ops {
206 /* MAC core initialization */ 204 /* MAC core initialization */
207 void (*core_init) (void __iomem *ioaddr) ____cacheline_aligned; 205 void (*core_init) (void __iomem *ioaddr) ____cacheline_aligned;
206 /* Support checksum offload engine */
207 int (*rx_coe) (void __iomem *ioaddr);
208 /* Dump MAC registers */ 208 /* Dump MAC registers */
209 void (*dump_regs) (void __iomem *ioaddr); 209 void (*dump_regs) (void __iomem *ioaddr);
210 /* Handle extra events on specific interrupts hw dependent */ 210 /* Handle extra events on specific interrupts hw dependent */
@@ -235,10 +235,9 @@ struct mii_regs {
235}; 235};
236 236
237struct mac_device_info { 237struct mac_device_info {
238 struct stmmac_ops *mac; 238 const struct stmmac_ops *mac;
239 struct stmmac_desc_ops *desc; 239 const struct stmmac_desc_ops *desc;
240 struct stmmac_dma_ops *dma; 240 const struct stmmac_dma_ops *dma;
241 unsigned int pmt; /* support Power-Down */
242 struct mii_regs mii; /* MII register Addresses */ 241 struct mii_regs mii; /* MII register Addresses */
243 struct mac_link link; 242 struct mac_link link;
244}; 243};
diff --git a/drivers/net/stmmac/dwmac100.h b/drivers/net/stmmac/dwmac100.h
index 97956cbf1cb4..7c6d857a9cc7 100644
--- a/drivers/net/stmmac/dwmac100.h
+++ b/drivers/net/stmmac/dwmac100.h
@@ -118,4 +118,4 @@ enum ttc_control {
118#define DMA_MISSED_FRAME_OVE_M 0x00010000 /* Missed Frame Overflow */ 118#define DMA_MISSED_FRAME_OVE_M 0x00010000 /* Missed Frame Overflow */
119#define DMA_MISSED_FRAME_M_CNTR 0x0000ffff /* Missed Frame Couinter */ 119#define DMA_MISSED_FRAME_M_CNTR 0x0000ffff /* Missed Frame Couinter */
120 120
121extern struct stmmac_dma_ops dwmac100_dma_ops; 121extern const struct stmmac_dma_ops dwmac100_dma_ops;
diff --git a/drivers/net/stmmac/dwmac1000.h b/drivers/net/stmmac/dwmac1000.h
index 8b20b19971cb..cfcef0ea0fa5 100644
--- a/drivers/net/stmmac/dwmac1000.h
+++ b/drivers/net/stmmac/dwmac1000.h
@@ -99,7 +99,7 @@ enum inter_frame_gap {
99#define GMAC_CONTROL_RE 0x00000004 /* Receiver Enable */ 99#define GMAC_CONTROL_RE 0x00000004 /* Receiver Enable */
100 100
101#define GMAC_CORE_INIT (GMAC_CONTROL_JD | GMAC_CONTROL_PS | GMAC_CONTROL_ACS | \ 101#define GMAC_CORE_INIT (GMAC_CONTROL_JD | GMAC_CONTROL_PS | GMAC_CONTROL_ACS | \
102 GMAC_CONTROL_IPC | GMAC_CONTROL_JE | GMAC_CONTROL_BE) 102 GMAC_CONTROL_JE | GMAC_CONTROL_BE)
103 103
104/* GMAC Frame Filter defines */ 104/* GMAC Frame Filter defines */
105#define GMAC_FRAME_FILTER_PR 0x00000001 /* Promiscuous Mode */ 105#define GMAC_FRAME_FILTER_PR 0x00000001 /* Promiscuous Mode */
@@ -205,4 +205,4 @@ enum rtc_control {
205#define GMAC_MMC_TX_INTR 0x108 205#define GMAC_MMC_TX_INTR 0x108
206#define GMAC_MMC_RX_CSUM_OFFLOAD 0x208 206#define GMAC_MMC_RX_CSUM_OFFLOAD 0x208
207 207
208extern struct stmmac_dma_ops dwmac1000_dma_ops; 208extern const struct stmmac_dma_ops dwmac1000_dma_ops;
diff --git a/drivers/net/stmmac/dwmac1000_core.c b/drivers/net/stmmac/dwmac1000_core.c
index f1f426146f40..6ae4c3f4c63c 100644
--- a/drivers/net/stmmac/dwmac1000_core.c
+++ b/drivers/net/stmmac/dwmac1000_core.c
@@ -50,6 +50,18 @@ static void dwmac1000_core_init(void __iomem *ioaddr)
50#endif 50#endif
51} 51}
52 52
53static int dwmac1000_rx_coe_supported(void __iomem *ioaddr)
54{
55 u32 value = readl(ioaddr + GMAC_CONTROL);
56
57 value |= GMAC_CONTROL_IPC;
58 writel(value, ioaddr + GMAC_CONTROL);
59
60 value = readl(ioaddr + GMAC_CONTROL);
61
62 return !!(value & GMAC_CONTROL_IPC);
63}
64
53static void dwmac1000_dump_regs(void __iomem *ioaddr) 65static void dwmac1000_dump_regs(void __iomem *ioaddr)
54{ 66{
55 int i; 67 int i;
@@ -200,8 +212,9 @@ static void dwmac1000_irq_status(void __iomem *ioaddr)
200 } 212 }
201} 213}
202 214
203struct stmmac_ops dwmac1000_ops = { 215static const struct stmmac_ops dwmac1000_ops = {
204 .core_init = dwmac1000_core_init, 216 .core_init = dwmac1000_core_init,
217 .rx_coe = dwmac1000_rx_coe_supported,
205 .dump_regs = dwmac1000_dump_regs, 218 .dump_regs = dwmac1000_dump_regs,
206 .host_irq_status = dwmac1000_irq_status, 219 .host_irq_status = dwmac1000_irq_status,
207 .set_filter = dwmac1000_set_filter, 220 .set_filter = dwmac1000_set_filter,
@@ -226,7 +239,6 @@ struct mac_device_info *dwmac1000_setup(void __iomem *ioaddr)
226 mac->mac = &dwmac1000_ops; 239 mac->mac = &dwmac1000_ops;
227 mac->dma = &dwmac1000_dma_ops; 240 mac->dma = &dwmac1000_dma_ops;
228 241
229 mac->pmt = PMT_SUPPORTED;
230 mac->link.port = GMAC_CONTROL_PS; 242 mac->link.port = GMAC_CONTROL_PS;
231 mac->link.duplex = GMAC_CONTROL_DM; 243 mac->link.duplex = GMAC_CONTROL_DM;
232 mac->link.speed = GMAC_CONTROL_FES; 244 mac->link.speed = GMAC_CONTROL_FES;
diff --git a/drivers/net/stmmac/dwmac1000_dma.c b/drivers/net/stmmac/dwmac1000_dma.c
index 2ef5a56370e9..2c47712d45d0 100644
--- a/drivers/net/stmmac/dwmac1000_dma.c
+++ b/drivers/net/stmmac/dwmac1000_dma.c
@@ -33,10 +33,18 @@ static int dwmac1000_dma_init(void __iomem *ioaddr, int pbl, u32 dma_tx,
33 u32 dma_rx) 33 u32 dma_rx)
34{ 34{
35 u32 value = readl(ioaddr + DMA_BUS_MODE); 35 u32 value = readl(ioaddr + DMA_BUS_MODE);
36 int limit;
37
36 /* DMA SW reset */ 38 /* DMA SW reset */
37 value |= DMA_BUS_MODE_SFT_RESET; 39 value |= DMA_BUS_MODE_SFT_RESET;
38 writel(value, ioaddr + DMA_BUS_MODE); 40 writel(value, ioaddr + DMA_BUS_MODE);
39 do {} while ((readl(ioaddr + DMA_BUS_MODE) & DMA_BUS_MODE_SFT_RESET)); 41 limit = 15000;
42 while (limit--) {
43 if (!(readl(ioaddr + DMA_BUS_MODE) & DMA_BUS_MODE_SFT_RESET))
44 break;
45 }
46 if (limit < 0)
47 return -EBUSY;
40 48
41 value = /* DMA_BUS_MODE_FB | */ DMA_BUS_MODE_4PBL | 49 value = /* DMA_BUS_MODE_FB | */ DMA_BUS_MODE_4PBL |
42 ((pbl << DMA_BUS_MODE_PBL_SHIFT) | 50 ((pbl << DMA_BUS_MODE_PBL_SHIFT) |
@@ -130,7 +138,7 @@ static void dwmac1000_dump_dma_regs(void __iomem *ioaddr)
130 } 138 }
131} 139}
132 140
133struct stmmac_dma_ops dwmac1000_dma_ops = { 141const struct stmmac_dma_ops dwmac1000_dma_ops = {
134 .init = dwmac1000_dma_init, 142 .init = dwmac1000_dma_init,
135 .dump_regs = dwmac1000_dump_dma_regs, 143 .dump_regs = dwmac1000_dump_dma_regs,
136 .dma_mode = dwmac1000_dma_operation_mode, 144 .dma_mode = dwmac1000_dma_operation_mode,
diff --git a/drivers/net/stmmac/dwmac100_core.c b/drivers/net/stmmac/dwmac100_core.c
index db06c04ce480..c724fc36a24f 100644
--- a/drivers/net/stmmac/dwmac100_core.c
+++ b/drivers/net/stmmac/dwmac100_core.c
@@ -42,6 +42,11 @@ static void dwmac100_core_init(void __iomem *ioaddr)
42#endif 42#endif
43} 43}
44 44
45static int dwmac100_rx_coe_supported(void __iomem *ioaddr)
46{
47 return 0;
48}
49
45static void dwmac100_dump_mac_regs(void __iomem *ioaddr) 50static void dwmac100_dump_mac_regs(void __iomem *ioaddr)
46{ 51{
47 pr_info("\t----------------------------------------------\n" 52 pr_info("\t----------------------------------------------\n"
@@ -163,8 +168,9 @@ static void dwmac100_pmt(void __iomem *ioaddr, unsigned long mode)
163 return; 168 return;
164} 169}
165 170
166struct stmmac_ops dwmac100_ops = { 171static const struct stmmac_ops dwmac100_ops = {
167 .core_init = dwmac100_core_init, 172 .core_init = dwmac100_core_init,
173 .rx_coe = dwmac100_rx_coe_supported,
168 .dump_regs = dwmac100_dump_mac_regs, 174 .dump_regs = dwmac100_dump_mac_regs,
169 .host_irq_status = dwmac100_irq_status, 175 .host_irq_status = dwmac100_irq_status,
170 .set_filter = dwmac100_set_filter, 176 .set_filter = dwmac100_set_filter,
@@ -187,7 +193,6 @@ struct mac_device_info *dwmac100_setup(void __iomem *ioaddr)
187 mac->mac = &dwmac100_ops; 193 mac->mac = &dwmac100_ops;
188 mac->dma = &dwmac100_dma_ops; 194 mac->dma = &dwmac100_dma_ops;
189 195
190 mac->pmt = PMT_NOT_SUPPORTED;
191 mac->link.port = MAC_CONTROL_PS; 196 mac->link.port = MAC_CONTROL_PS;
192 mac->link.duplex = MAC_CONTROL_F; 197 mac->link.duplex = MAC_CONTROL_F;
193 mac->link.speed = 0; 198 mac->link.speed = 0;
diff --git a/drivers/net/stmmac/dwmac100_dma.c b/drivers/net/stmmac/dwmac100_dma.c
index c7279d2b946b..e3e224b7d9e2 100644
--- a/drivers/net/stmmac/dwmac100_dma.c
+++ b/drivers/net/stmmac/dwmac100_dma.c
@@ -35,10 +35,18 @@ static int dwmac100_dma_init(void __iomem *ioaddr, int pbl, u32 dma_tx,
35 u32 dma_rx) 35 u32 dma_rx)
36{ 36{
37 u32 value = readl(ioaddr + DMA_BUS_MODE); 37 u32 value = readl(ioaddr + DMA_BUS_MODE);
38 int limit;
39
38 /* DMA SW reset */ 40 /* DMA SW reset */
39 value |= DMA_BUS_MODE_SFT_RESET; 41 value |= DMA_BUS_MODE_SFT_RESET;
40 writel(value, ioaddr + DMA_BUS_MODE); 42 writel(value, ioaddr + DMA_BUS_MODE);
41 do {} while ((readl(ioaddr + DMA_BUS_MODE) & DMA_BUS_MODE_SFT_RESET)); 43 limit = 15000;
44 while (limit--) {
45 if (!(readl(ioaddr + DMA_BUS_MODE) & DMA_BUS_MODE_SFT_RESET))
46 break;
47 }
48 if (limit < 0)
49 return -EBUSY;
42 50
43 /* Enable Application Access by writing to DMA CSR0 */ 51 /* Enable Application Access by writing to DMA CSR0 */
44 writel(DMA_BUS_MODE_DEFAULT | (pbl << DMA_BUS_MODE_PBL_SHIFT), 52 writel(DMA_BUS_MODE_DEFAULT | (pbl << DMA_BUS_MODE_PBL_SHIFT),
@@ -118,7 +126,7 @@ static void dwmac100_dma_diagnostic_fr(void *data, struct stmmac_extra_stats *x,
118 } 126 }
119} 127}
120 128
121struct stmmac_dma_ops dwmac100_dma_ops = { 129const struct stmmac_dma_ops dwmac100_dma_ops = {
122 .init = dwmac100_dma_init, 130 .init = dwmac100_dma_init,
123 .dump_regs = dwmac100_dump_dma_regs, 131 .dump_regs = dwmac100_dump_dma_regs,
124 .dma_mode = dwmac100_dma_operation_mode, 132 .dma_mode = dwmac100_dma_operation_mode,
diff --git a/drivers/net/stmmac/enh_desc.c b/drivers/net/stmmac/enh_desc.c
index 77ff88c3958b..e5dfb6a30182 100644
--- a/drivers/net/stmmac/enh_desc.c
+++ b/drivers/net/stmmac/enh_desc.c
@@ -284,7 +284,7 @@ static void enh_desc_release_tx_desc(struct dma_desc *p)
284{ 284{
285 int ter = p->des01.etx.end_ring; 285 int ter = p->des01.etx.end_ring;
286 286
287 memset(p, 0, sizeof(struct dma_desc)); 287 memset(p, 0, offsetof(struct dma_desc, des2));
288 p->des01.etx.end_ring = ter; 288 p->des01.etx.end_ring = ter;
289} 289}
290 290
@@ -318,7 +318,7 @@ static int enh_desc_get_rx_frame_len(struct dma_desc *p)
318 return p->des01.erx.frame_length; 318 return p->des01.erx.frame_length;
319} 319}
320 320
321struct stmmac_desc_ops enh_desc_ops = { 321const struct stmmac_desc_ops enh_desc_ops = {
322 .tx_status = enh_desc_get_tx_status, 322 .tx_status = enh_desc_get_tx_status,
323 .rx_status = enh_desc_get_rx_status, 323 .rx_status = enh_desc_get_rx_status,
324 .get_tx_len = enh_desc_get_tx_len, 324 .get_tx_len = enh_desc_get_tx_len,
diff --git a/drivers/net/stmmac/norm_desc.c b/drivers/net/stmmac/norm_desc.c
index 51f4440ab98b..cd0cc76f7a1c 100644
--- a/drivers/net/stmmac/norm_desc.c
+++ b/drivers/net/stmmac/norm_desc.c
@@ -174,22 +174,7 @@ static void ndesc_release_tx_desc(struct dma_desc *p)
174{ 174{
175 int ter = p->des01.tx.end_ring; 175 int ter = p->des01.tx.end_ring;
176 176
177 /* clean field used within the xmit */ 177 memset(p, 0, offsetof(struct dma_desc, des2));
178 p->des01.tx.first_segment = 0;
179 p->des01.tx.last_segment = 0;
180 p->des01.tx.buffer1_size = 0;
181
182 /* clean status reported */
183 p->des01.tx.error_summary = 0;
184 p->des01.tx.underflow_error = 0;
185 p->des01.tx.no_carrier = 0;
186 p->des01.tx.loss_carrier = 0;
187 p->des01.tx.excessive_deferral = 0;
188 p->des01.tx.excessive_collisions = 0;
189 p->des01.tx.late_collision = 0;
190 p->des01.tx.heartbeat_fail = 0;
191 p->des01.tx.deferred = 0;
192
193 /* set termination field */ 178 /* set termination field */
194 p->des01.tx.end_ring = ter; 179 p->des01.tx.end_ring = ter;
195} 180}
@@ -217,7 +202,7 @@ static int ndesc_get_rx_frame_len(struct dma_desc *p)
217 return p->des01.rx.frame_length; 202 return p->des01.rx.frame_length;
218} 203}
219 204
220struct stmmac_desc_ops ndesc_ops = { 205const struct stmmac_desc_ops ndesc_ops = {
221 .tx_status = ndesc_get_tx_status, 206 .tx_status = ndesc_get_tx_status,
222 .rx_status = ndesc_get_rx_status, 207 .rx_status = ndesc_get_rx_status,
223 .get_tx_len = ndesc_get_tx_len, 208 .get_tx_len = ndesc_get_tx_len,
diff --git a/drivers/net/stmmac/stmmac.h b/drivers/net/stmmac/stmmac.h
index d0ddab0d21c2..79bdc2e13224 100644
--- a/drivers/net/stmmac/stmmac.h
+++ b/drivers/net/stmmac/stmmac.h
@@ -51,7 +51,6 @@ struct stmmac_priv {
51 int is_gmac; 51 int is_gmac;
52 dma_addr_t dma_rx_phy; 52 dma_addr_t dma_rx_phy;
53 unsigned int dma_rx_size; 53 unsigned int dma_rx_size;
54 int rx_csum;
55 unsigned int dma_buf_sz; 54 unsigned int dma_buf_sz;
56 struct device *device; 55 struct device *device;
57 struct mac_device_info *hw; 56 struct mac_device_info *hw;
@@ -78,6 +77,7 @@ struct stmmac_priv {
78 unsigned int flow_ctrl; 77 unsigned int flow_ctrl;
79 unsigned int pause; 78 unsigned int pause;
80 struct mii_bus *mii; 79 struct mii_bus *mii;
80 int mii_clk_csr;
81 81
82 u32 msg_enable; 82 u32 msg_enable;
83 spinlock_t lock; 83 spinlock_t lock;
@@ -91,6 +91,9 @@ struct stmmac_priv {
91 struct vlan_group *vlgrp; 91 struct vlan_group *vlgrp;
92#endif 92#endif
93 int enh_desc; 93 int enh_desc;
94 int rx_coe;
95 int bugged_jumbo;
96 int no_csum_insertion;
94}; 97};
95 98
96#ifdef CONFIG_STM_DRIVERS 99#ifdef CONFIG_STM_DRIVERS
@@ -118,5 +121,5 @@ static inline int stmmac_claim_resource(struct platform_device *pdev)
118extern int stmmac_mdio_unregister(struct net_device *ndev); 121extern int stmmac_mdio_unregister(struct net_device *ndev);
119extern int stmmac_mdio_register(struct net_device *ndev); 122extern int stmmac_mdio_register(struct net_device *ndev);
120extern void stmmac_set_ethtool_ops(struct net_device *netdev); 123extern void stmmac_set_ethtool_ops(struct net_device *netdev);
121extern struct stmmac_desc_ops enh_desc_ops; 124extern const struct stmmac_desc_ops enh_desc_ops;
122extern struct stmmac_desc_ops ndesc_ops; 125extern const struct stmmac_desc_ops ndesc_ops;
diff --git a/drivers/net/stmmac/stmmac_ethtool.c b/drivers/net/stmmac/stmmac_ethtool.c
index 63b68e61afce..6d65482e789a 100644
--- a/drivers/net/stmmac/stmmac_ethtool.c
+++ b/drivers/net/stmmac/stmmac_ethtool.c
@@ -89,8 +89,8 @@ static const struct stmmac_stats stmmac_gstrings_stats[] = {
89}; 89};
90#define STMMAC_STATS_LEN ARRAY_SIZE(stmmac_gstrings_stats) 90#define STMMAC_STATS_LEN ARRAY_SIZE(stmmac_gstrings_stats)
91 91
92void stmmac_ethtool_getdrvinfo(struct net_device *dev, 92static void stmmac_ethtool_getdrvinfo(struct net_device *dev,
93 struct ethtool_drvinfo *info) 93 struct ethtool_drvinfo *info)
94{ 94{
95 struct stmmac_priv *priv = netdev_priv(dev); 95 struct stmmac_priv *priv = netdev_priv(dev);
96 96
@@ -104,7 +104,8 @@ void stmmac_ethtool_getdrvinfo(struct net_device *dev,
104 info->n_stats = STMMAC_STATS_LEN; 104 info->n_stats = STMMAC_STATS_LEN;
105} 105}
106 106
107int stmmac_ethtool_getsettings(struct net_device *dev, struct ethtool_cmd *cmd) 107static int stmmac_ethtool_getsettings(struct net_device *dev,
108 struct ethtool_cmd *cmd)
108{ 109{
109 struct stmmac_priv *priv = netdev_priv(dev); 110 struct stmmac_priv *priv = netdev_priv(dev);
110 struct phy_device *phy = priv->phydev; 111 struct phy_device *phy = priv->phydev;
@@ -126,7 +127,8 @@ int stmmac_ethtool_getsettings(struct net_device *dev, struct ethtool_cmd *cmd)
126 return rc; 127 return rc;
127} 128}
128 129
129int stmmac_ethtool_setsettings(struct net_device *dev, struct ethtool_cmd *cmd) 130static int stmmac_ethtool_setsettings(struct net_device *dev,
131 struct ethtool_cmd *cmd)
130{ 132{
131 struct stmmac_priv *priv = netdev_priv(dev); 133 struct stmmac_priv *priv = netdev_priv(dev);
132 struct phy_device *phy = priv->phydev; 134 struct phy_device *phy = priv->phydev;
@@ -139,32 +141,32 @@ int stmmac_ethtool_setsettings(struct net_device *dev, struct ethtool_cmd *cmd)
139 return rc; 141 return rc;
140} 142}
141 143
142u32 stmmac_ethtool_getmsglevel(struct net_device *dev) 144static u32 stmmac_ethtool_getmsglevel(struct net_device *dev)
143{ 145{
144 struct stmmac_priv *priv = netdev_priv(dev); 146 struct stmmac_priv *priv = netdev_priv(dev);
145 return priv->msg_enable; 147 return priv->msg_enable;
146} 148}
147 149
148void stmmac_ethtool_setmsglevel(struct net_device *dev, u32 level) 150static void stmmac_ethtool_setmsglevel(struct net_device *dev, u32 level)
149{ 151{
150 struct stmmac_priv *priv = netdev_priv(dev); 152 struct stmmac_priv *priv = netdev_priv(dev);
151 priv->msg_enable = level; 153 priv->msg_enable = level;
152 154
153} 155}
154 156
155int stmmac_check_if_running(struct net_device *dev) 157static int stmmac_check_if_running(struct net_device *dev)
156{ 158{
157 if (!netif_running(dev)) 159 if (!netif_running(dev))
158 return -EBUSY; 160 return -EBUSY;
159 return 0; 161 return 0;
160} 162}
161 163
162int stmmac_ethtool_get_regs_len(struct net_device *dev) 164static int stmmac_ethtool_get_regs_len(struct net_device *dev)
163{ 165{
164 return REG_SPACE_SIZE; 166 return REG_SPACE_SIZE;
165} 167}
166 168
167void stmmac_ethtool_gregs(struct net_device *dev, 169static void stmmac_ethtool_gregs(struct net_device *dev,
168 struct ethtool_regs *regs, void *space) 170 struct ethtool_regs *regs, void *space)
169{ 171{
170 int i; 172 int i;
@@ -195,7 +197,7 @@ void stmmac_ethtool_gregs(struct net_device *dev,
195 } 197 }
196} 198}
197 199
198int stmmac_ethtool_set_tx_csum(struct net_device *netdev, u32 data) 200static int stmmac_ethtool_set_tx_csum(struct net_device *netdev, u32 data)
199{ 201{
200 if (data) 202 if (data)
201 netdev->features |= NETIF_F_HW_CSUM; 203 netdev->features |= NETIF_F_HW_CSUM;
@@ -205,11 +207,11 @@ int stmmac_ethtool_set_tx_csum(struct net_device *netdev, u32 data)
205 return 0; 207 return 0;
206} 208}
207 209
208u32 stmmac_ethtool_get_rx_csum(struct net_device *dev) 210static u32 stmmac_ethtool_get_rx_csum(struct net_device *dev)
209{ 211{
210 struct stmmac_priv *priv = netdev_priv(dev); 212 struct stmmac_priv *priv = netdev_priv(dev);
211 213
212 return priv->rx_csum; 214 return priv->rx_coe;
213} 215}
214 216
215static void 217static void
@@ -322,7 +324,7 @@ static void stmmac_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
322 struct stmmac_priv *priv = netdev_priv(dev); 324 struct stmmac_priv *priv = netdev_priv(dev);
323 325
324 spin_lock_irq(&priv->lock); 326 spin_lock_irq(&priv->lock);
325 if (priv->wolenabled == PMT_SUPPORTED) { 327 if (device_can_wakeup(priv->device)) {
326 wol->supported = WAKE_MAGIC; 328 wol->supported = WAKE_MAGIC;
327 wol->wolopts = priv->wolopts; 329 wol->wolopts = priv->wolopts;
328 } 330 }
@@ -334,16 +336,20 @@ static int stmmac_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
334 struct stmmac_priv *priv = netdev_priv(dev); 336 struct stmmac_priv *priv = netdev_priv(dev);
335 u32 support = WAKE_MAGIC; 337 u32 support = WAKE_MAGIC;
336 338
337 if (priv->wolenabled == PMT_NOT_SUPPORTED) 339 if (!device_can_wakeup(priv->device))
338 return -EINVAL; 340 return -EINVAL;
339 341
340 if (wol->wolopts & ~support) 342 if (wol->wolopts & ~support)
341 return -EINVAL; 343 return -EINVAL;
342 344
343 if (wol->wolopts == 0) 345 if (wol->wolopts) {
344 device_set_wakeup_enable(priv->device, 0); 346 pr_info("stmmac: wakeup enable\n");
345 else
346 device_set_wakeup_enable(priv->device, 1); 347 device_set_wakeup_enable(priv->device, 1);
348 enable_irq_wake(dev->irq);
349 } else {
350 device_set_wakeup_enable(priv->device, 0);
351 disable_irq_wake(dev->irq);
352 }
347 353
348 spin_lock_irq(&priv->lock); 354 spin_lock_irq(&priv->lock);
349 priv->wolopts = wol->wolopts; 355 priv->wolopts = wol->wolopts;
@@ -374,10 +380,8 @@ static struct ethtool_ops stmmac_ethtool_ops = {
374 .get_wol = stmmac_get_wol, 380 .get_wol = stmmac_get_wol,
375 .set_wol = stmmac_set_wol, 381 .set_wol = stmmac_set_wol,
376 .get_sset_count = stmmac_get_sset_count, 382 .get_sset_count = stmmac_get_sset_count,
377#ifdef NETIF_F_TSO
378 .get_tso = ethtool_op_get_tso, 383 .get_tso = ethtool_op_get_tso,
379 .set_tso = ethtool_op_set_tso, 384 .set_tso = ethtool_op_set_tso,
380#endif
381}; 385};
382 386
383void stmmac_set_ethtool_ops(struct net_device *netdev) 387void stmmac_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/stmmac/stmmac_main.c b/drivers/net/stmmac/stmmac_main.c
index 03c160c6d75c..823b9e6431d5 100644
--- a/drivers/net/stmmac/stmmac_main.c
+++ b/drivers/net/stmmac/stmmac_main.c
@@ -134,13 +134,6 @@ static int buf_sz = DMA_BUFFER_SIZE;
134module_param(buf_sz, int, S_IRUGO | S_IWUSR); 134module_param(buf_sz, int, S_IRUGO | S_IWUSR);
135MODULE_PARM_DESC(buf_sz, "DMA buffer size"); 135MODULE_PARM_DESC(buf_sz, "DMA buffer size");
136 136
137/* In case of Giga ETH, we can enable/disable the COE for the
138 * transmit HW checksum computation.
139 * Note that, if tx csum is off in HW, SG will be still supported. */
140static int tx_coe = HW_CSUM;
141module_param(tx_coe, int, S_IRUGO | S_IWUSR);
142MODULE_PARM_DESC(tx_coe, "GMAC COE type 2 [on/off]");
143
144static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE | 137static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
145 NETIF_MSG_LINK | NETIF_MSG_IFUP | 138 NETIF_MSG_LINK | NETIF_MSG_IFUP |
146 NETIF_MSG_IFDOWN | NETIF_MSG_TIMER); 139 NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
@@ -569,29 +562,22 @@ static void free_dma_desc_resources(struct stmmac_priv *priv)
569 * stmmac_dma_operation_mode - HW DMA operation mode 562 * stmmac_dma_operation_mode - HW DMA operation mode
570 * @priv : pointer to the private device structure. 563 * @priv : pointer to the private device structure.
571 * Description: it sets the DMA operation mode: tx/rx DMA thresholds 564 * Description: it sets the DMA operation mode: tx/rx DMA thresholds
572 * or Store-And-Forward capability. It also verifies the COE for the 565 * or Store-And-Forward capability.
573 * transmission in case of Giga ETH.
574 */ 566 */
575static void stmmac_dma_operation_mode(struct stmmac_priv *priv) 567static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
576{ 568{
577 if (!priv->is_gmac) { 569 if (likely((priv->tx_coe) && (!priv->no_csum_insertion))) {
578 /* MAC 10/100 */ 570 /* In case of GMAC, SF mode has to be enabled
579 priv->hw->dma->dma_mode(priv->ioaddr, tc, 0); 571 * to perform the TX COE. This depends on:
580 priv->tx_coe = NO_HW_CSUM; 572 * 1) TX COE if actually supported
581 } else { 573 * 2) There is no bugged Jumbo frame support
582 if ((priv->dev->mtu <= ETH_DATA_LEN) && (tx_coe)) { 574 * that needs to not insert csum in the TDES.
583 priv->hw->dma->dma_mode(priv->ioaddr, 575 */
584 SF_DMA_MODE, SF_DMA_MODE); 576 priv->hw->dma->dma_mode(priv->ioaddr,
585 tc = SF_DMA_MODE; 577 SF_DMA_MODE, SF_DMA_MODE);
586 priv->tx_coe = HW_CSUM; 578 tc = SF_DMA_MODE;
587 } else { 579 } else
588 /* Checksum computation is performed in software. */ 580 priv->hw->dma->dma_mode(priv->ioaddr, tc, SF_DMA_MODE);
589 priv->hw->dma->dma_mode(priv->ioaddr, tc,
590 SF_DMA_MODE);
591 priv->tx_coe = NO_HW_CSUM;
592 }
593 }
594 tx_coe = priv->tx_coe;
595} 581}
596 582
597/** 583/**
@@ -858,6 +844,12 @@ static int stmmac_open(struct net_device *dev)
858 /* Initialize the MAC Core */ 844 /* Initialize the MAC Core */
859 priv->hw->mac->core_init(priv->ioaddr); 845 priv->hw->mac->core_init(priv->ioaddr);
860 846
847 priv->rx_coe = priv->hw->mac->rx_coe(priv->ioaddr);
848 if (priv->rx_coe)
849 pr_info("stmmac: Rx Checksum Offload Engine supported\n");
850 if (priv->tx_coe)
851 pr_info("\tTX Checksum insertion supported\n");
852
861 priv->shutdown = 0; 853 priv->shutdown = 0;
862 854
863 /* Initialise the MMC (if present) to disable all interrupts. */ 855 /* Initialise the MMC (if present) to disable all interrupts. */
@@ -1066,7 +1058,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
1066 return stmmac_sw_tso(priv, skb); 1058 return stmmac_sw_tso(priv, skb);
1067 1059
1068 if (likely((skb->ip_summed == CHECKSUM_PARTIAL))) { 1060 if (likely((skb->ip_summed == CHECKSUM_PARTIAL))) {
1069 if (likely(priv->tx_coe == NO_HW_CSUM)) 1061 if (unlikely((!priv->tx_coe) || (priv->no_csum_insertion)))
1070 skb_checksum_help(skb); 1062 skb_checksum_help(skb);
1071 else 1063 else
1072 csum_insertion = 1; 1064 csum_insertion = 1;
@@ -1390,6 +1382,15 @@ static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
1390 return -EINVAL; 1382 return -EINVAL;
1391 } 1383 }
1392 1384
1385 /* Some GMAC devices have a bugged Jumbo frame support that
1386 * needs to have the Tx COE disabled for oversized frames
1387 * (due to limited buffer sizes). In this case we disable
1388 * the TX csum insertionin the TDES and not use SF. */
1389 if ((priv->bugged_jumbo) && (priv->dev->mtu > ETH_DATA_LEN))
1390 priv->no_csum_insertion = 1;
1391 else
1392 priv->no_csum_insertion = 0;
1393
1393 dev->mtu = new_mtu; 1394 dev->mtu = new_mtu;
1394 1395
1395 return 0; 1396 return 0;
@@ -1510,9 +1511,6 @@ static int stmmac_probe(struct net_device *dev)
1510#endif 1511#endif
1511 priv->msg_enable = netif_msg_init(debug, default_msg_level); 1512 priv->msg_enable = netif_msg_init(debug, default_msg_level);
1512 1513
1513 if (priv->is_gmac)
1514 priv->rx_csum = 1;
1515
1516 if (flow_ctrl) 1514 if (flow_ctrl)
1517 priv->flow_ctrl = FLOW_AUTO; /* RX/TX pause on */ 1515 priv->flow_ctrl = FLOW_AUTO; /* RX/TX pause on */
1518 1516
@@ -1570,9 +1568,8 @@ static int stmmac_mac_device_setup(struct net_device *dev)
1570 1568
1571 priv->hw = device; 1569 priv->hw = device;
1572 1570
1573 priv->wolenabled = priv->hw->pmt; /* PMT supported */ 1571 if (device_can_wakeup(priv->device))
1574 if (priv->wolenabled == PMT_SUPPORTED) 1572 priv->wolopts = WAKE_MAGIC; /* Magic Frame as default */
1575 priv->wolopts = WAKE_MAGIC; /* Magic Frame */
1576 1573
1577 return 0; 1574 return 0;
1578} 1575}
@@ -1662,7 +1659,7 @@ static int stmmac_dvr_probe(struct platform_device *pdev)
1662 ret = -ENODEV; 1659 ret = -ENODEV;
1663 goto out; 1660 goto out;
1664 } 1661 }
1665 pr_info("done!\n"); 1662 pr_info("\tdone!\n");
1666 1663
1667 if (!request_mem_region(res->start, resource_size(res), 1664 if (!request_mem_region(res->start, resource_size(res),
1668 pdev->name)) { 1665 pdev->name)) {
@@ -1704,10 +1701,19 @@ static int stmmac_dvr_probe(struct platform_device *pdev)
1704 plat_dat = pdev->dev.platform_data; 1701 plat_dat = pdev->dev.platform_data;
1705 priv->bus_id = plat_dat->bus_id; 1702 priv->bus_id = plat_dat->bus_id;
1706 priv->pbl = plat_dat->pbl; /* TLI */ 1703 priv->pbl = plat_dat->pbl; /* TLI */
1704 priv->mii_clk_csr = plat_dat->clk_csr;
1705 priv->tx_coe = plat_dat->tx_coe;
1706 priv->bugged_jumbo = plat_dat->bugged_jumbo;
1707 priv->is_gmac = plat_dat->has_gmac; /* GMAC is on board */ 1707 priv->is_gmac = plat_dat->has_gmac; /* GMAC is on board */
1708 priv->enh_desc = plat_dat->enh_desc; 1708 priv->enh_desc = plat_dat->enh_desc;
1709 priv->ioaddr = addr; 1709 priv->ioaddr = addr;
1710 1710
1711 /* PMT module is not integrated in all the MAC devices. */
1712 if (plat_dat->pmt) {
1713 pr_info("\tPMT module supported\n");
1714 device_set_wakeup_capable(&pdev->dev, 1);
1715 }
1716
1711 platform_set_drvdata(pdev, ndev); 1717 platform_set_drvdata(pdev, ndev);
1712 1718
1713 /* Set the I/O base addr */ 1719 /* Set the I/O base addr */
@@ -1835,13 +1841,11 @@ static int stmmac_suspend(struct platform_device *pdev, pm_message_t state)
1835 1841
1836 stmmac_mac_disable_tx(priv->ioaddr); 1842 stmmac_mac_disable_tx(priv->ioaddr);
1837 1843
1838 if (device_may_wakeup(&(pdev->dev))) { 1844 /* Enable Power down mode by programming the PMT regs */
1839 /* Enable Power down mode by programming the PMT regs */ 1845 if (device_can_wakeup(priv->device))
1840 if (priv->wolenabled == PMT_SUPPORTED) 1846 priv->hw->mac->pmt(priv->ioaddr, priv->wolopts);
1841 priv->hw->mac->pmt(priv->ioaddr, priv->wolopts); 1847 else
1842 } else {
1843 stmmac_mac_disable_rx(priv->ioaddr); 1848 stmmac_mac_disable_rx(priv->ioaddr);
1844 }
1845 } else { 1849 } else {
1846 priv->shutdown = 1; 1850 priv->shutdown = 1;
1847 /* Although this can appear slightly redundant it actually 1851 /* Although this can appear slightly redundant it actually
@@ -1876,9 +1880,8 @@ static int stmmac_resume(struct platform_device *pdev)
1876 * is received. Anyway, it's better to manually clear 1880 * is received. Anyway, it's better to manually clear
1877 * this bit because it can generate problems while resuming 1881 * this bit because it can generate problems while resuming
1878 * from another devices (e.g. serial console). */ 1882 * from another devices (e.g. serial console). */
1879 if (device_may_wakeup(&(pdev->dev))) 1883 if (device_can_wakeup(priv->device))
1880 if (priv->wolenabled == PMT_SUPPORTED) 1884 priv->hw->mac->pmt(priv->ioaddr, 0);
1881 priv->hw->mac->pmt(priv->ioaddr, 0);
1882 1885
1883 netif_device_attach(dev); 1886 netif_device_attach(dev);
1884 1887
@@ -1965,8 +1968,6 @@ static int __init stmmac_cmdline_opt(char *str)
1965 strict_strtoul(opt + 7, 0, (unsigned long *)&buf_sz); 1968 strict_strtoul(opt + 7, 0, (unsigned long *)&buf_sz);
1966 else if (!strncmp(opt, "tc:", 3)) 1969 else if (!strncmp(opt, "tc:", 3))
1967 strict_strtoul(opt + 3, 0, (unsigned long *)&tc); 1970 strict_strtoul(opt + 3, 0, (unsigned long *)&tc);
1968 else if (!strncmp(opt, "tx_coe:", 7))
1969 strict_strtoul(opt + 7, 0, (unsigned long *)&tx_coe);
1970 else if (!strncmp(opt, "watchdog:", 9)) 1971 else if (!strncmp(opt, "watchdog:", 9))
1971 strict_strtoul(opt + 9, 0, (unsigned long *)&watchdog); 1972 strict_strtoul(opt + 9, 0, (unsigned long *)&watchdog);
1972 else if (!strncmp(opt, "flow_ctrl:", 10)) 1973 else if (!strncmp(opt, "flow_ctrl:", 10))
diff --git a/drivers/net/stmmac/stmmac_mdio.c b/drivers/net/stmmac/stmmac_mdio.c
index 03dea1401571..d7441616357d 100644
--- a/drivers/net/stmmac/stmmac_mdio.c
+++ b/drivers/net/stmmac/stmmac_mdio.c
@@ -53,7 +53,7 @@ static int stmmac_mdio_read(struct mii_bus *bus, int phyaddr, int phyreg)
53 int data; 53 int data;
54 u16 regValue = (((phyaddr << 11) & (0x0000F800)) | 54 u16 regValue = (((phyaddr << 11) & (0x0000F800)) |
55 ((phyreg << 6) & (0x000007C0))); 55 ((phyreg << 6) & (0x000007C0)));
56 regValue |= MII_BUSY; /* in case of GMAC */ 56 regValue |= MII_BUSY | ((priv->mii_clk_csr & 7) << 2);
57 57
58 do {} while (((readl(priv->ioaddr + mii_address)) & MII_BUSY) == 1); 58 do {} while (((readl(priv->ioaddr + mii_address)) & MII_BUSY) == 1);
59 writel(regValue, priv->ioaddr + mii_address); 59 writel(regValue, priv->ioaddr + mii_address);
@@ -85,7 +85,8 @@ static int stmmac_mdio_write(struct mii_bus *bus, int phyaddr, int phyreg,
85 (((phyaddr << 11) & (0x0000F800)) | ((phyreg << 6) & (0x000007C0))) 85 (((phyaddr << 11) & (0x0000F800)) | ((phyreg << 6) & (0x000007C0)))
86 | MII_WRITE; 86 | MII_WRITE;
87 87
88 value |= MII_BUSY; 88 value |= MII_BUSY | ((priv->mii_clk_csr & 7) << 2);
89
89 90
90 /* Wait until any existing MII operation is complete */ 91 /* Wait until any existing MII operation is complete */
91 do {} while (((readl(priv->ioaddr + mii_address)) & MII_BUSY) == 1); 92 do {} while (((readl(priv->ioaddr + mii_address)) & MII_BUSY) == 1);
diff --git a/drivers/net/sun3lance.c b/drivers/net/sun3lance.c
index 358c22f9acbe..7d9ec23aabf6 100644
--- a/drivers/net/sun3lance.c
+++ b/drivers/net/sun3lance.c
@@ -436,7 +436,7 @@ static int lance_open( struct net_device *dev )
436 DPRINTK( 2, ( "lance_open(): opening %s failed, i=%d, csr0=%04x\n", 436 DPRINTK( 2, ( "lance_open(): opening %s failed, i=%d, csr0=%04x\n",
437 dev->name, i, DREG )); 437 dev->name, i, DREG ));
438 DREG = CSR0_STOP; 438 DREG = CSR0_STOP;
439 return( -EIO ); 439 return -EIO;
440 } 440 }
441 441
442 DREG = CSR0_IDON | CSR0_STRT | CSR0_INEA; 442 DREG = CSR0_IDON | CSR0_STRT | CSR0_INEA;
@@ -445,7 +445,7 @@ static int lance_open( struct net_device *dev )
445 445
446 DPRINTK( 2, ( "%s: LANCE is open, csr0 %04x\n", dev->name, DREG )); 446 DPRINTK( 2, ( "%s: LANCE is open, csr0 %04x\n", dev->name, DREG ));
447 447
448 return( 0 ); 448 return 0;
449} 449}
450 450
451 451
diff --git a/drivers/net/sundance.c b/drivers/net/sundance.c
index 7dfdbee878e8..3ed2a67bd6d3 100644
--- a/drivers/net/sundance.c
+++ b/drivers/net/sundance.c
@@ -96,16 +96,10 @@ static char *media[MAX_UNITS];
96#include <asm/io.h> 96#include <asm/io.h>
97#include <linux/delay.h> 97#include <linux/delay.h>
98#include <linux/spinlock.h> 98#include <linux/spinlock.h>
99#ifndef _COMPAT_WITH_OLD_KERNEL 99#include <linux/dma-mapping.h>
100#include <linux/crc32.h> 100#include <linux/crc32.h>
101#include <linux/ethtool.h> 101#include <linux/ethtool.h>
102#include <linux/mii.h> 102#include <linux/mii.h>
103#else
104#include "crc32.h"
105#include "ethtool.h"
106#include "mii.h"
107#include "compat.h"
108#endif
109 103
110/* These identify the driver base version and may not be removed. */ 104/* These identify the driver base version and may not be removed. */
111static const char version[] __devinitconst = 105static const char version[] __devinitconst =
@@ -369,9 +363,21 @@ struct netdev_private {
369 dma_addr_t tx_ring_dma; 363 dma_addr_t tx_ring_dma;
370 dma_addr_t rx_ring_dma; 364 dma_addr_t rx_ring_dma;
371 struct timer_list timer; /* Media monitoring timer. */ 365 struct timer_list timer; /* Media monitoring timer. */
366 /* ethtool extra stats */
367 struct {
368 u64 tx_multiple_collisions;
369 u64 tx_single_collisions;
370 u64 tx_late_collisions;
371 u64 tx_deferred;
372 u64 tx_deferred_excessive;
373 u64 tx_aborted;
374 u64 tx_bcasts;
375 u64 rx_bcasts;
376 u64 tx_mcasts;
377 u64 rx_mcasts;
378 } xstats;
372 /* Frequently used values: keep some adjacent for cache effect. */ 379 /* Frequently used values: keep some adjacent for cache effect. */
373 spinlock_t lock; 380 spinlock_t lock;
374 spinlock_t rx_lock; /* Group with Tx control cache line. */
375 int msg_enable; 381 int msg_enable;
376 int chip_id; 382 int chip_id;
377 unsigned int cur_rx, dirty_rx; /* Producer/consumer ring indices */ 383 unsigned int cur_rx, dirty_rx; /* Producer/consumer ring indices */
@@ -396,6 +402,7 @@ struct netdev_private {
396 unsigned char phys[MII_CNT]; /* MII device addresses, only first one used. */ 402 unsigned char phys[MII_CNT]; /* MII device addresses, only first one used. */
397 struct pci_dev *pci_dev; 403 struct pci_dev *pci_dev;
398 void __iomem *base; 404 void __iomem *base;
405 spinlock_t statlock;
399}; 406};
400 407
401/* The station address location in the EEPROM. */ 408/* The station address location in the EEPROM. */
@@ -520,16 +527,19 @@ static int __devinit sundance_probe1 (struct pci_dev *pdev,
520 np->chip_id = chip_idx; 527 np->chip_id = chip_idx;
521 np->msg_enable = (1 << debug) - 1; 528 np->msg_enable = (1 << debug) - 1;
522 spin_lock_init(&np->lock); 529 spin_lock_init(&np->lock);
530 spin_lock_init(&np->statlock);
523 tasklet_init(&np->rx_tasklet, rx_poll, (unsigned long)dev); 531 tasklet_init(&np->rx_tasklet, rx_poll, (unsigned long)dev);
524 tasklet_init(&np->tx_tasklet, tx_poll, (unsigned long)dev); 532 tasklet_init(&np->tx_tasklet, tx_poll, (unsigned long)dev);
525 533
526 ring_space = pci_alloc_consistent(pdev, TX_TOTAL_SIZE, &ring_dma); 534 ring_space = dma_alloc_coherent(&pdev->dev, TX_TOTAL_SIZE,
535 &ring_dma, GFP_KERNEL);
527 if (!ring_space) 536 if (!ring_space)
528 goto err_out_cleardev; 537 goto err_out_cleardev;
529 np->tx_ring = (struct netdev_desc *)ring_space; 538 np->tx_ring = (struct netdev_desc *)ring_space;
530 np->tx_ring_dma = ring_dma; 539 np->tx_ring_dma = ring_dma;
531 540
532 ring_space = pci_alloc_consistent(pdev, RX_TOTAL_SIZE, &ring_dma); 541 ring_space = dma_alloc_coherent(&pdev->dev, RX_TOTAL_SIZE,
542 &ring_dma, GFP_KERNEL);
533 if (!ring_space) 543 if (!ring_space)
534 goto err_out_unmap_tx; 544 goto err_out_unmap_tx;
535 np->rx_ring = (struct netdev_desc *)ring_space; 545 np->rx_ring = (struct netdev_desc *)ring_space;
@@ -663,9 +673,11 @@ static int __devinit sundance_probe1 (struct pci_dev *pdev,
663err_out_unregister: 673err_out_unregister:
664 unregister_netdev(dev); 674 unregister_netdev(dev);
665err_out_unmap_rx: 675err_out_unmap_rx:
666 pci_free_consistent(pdev, RX_TOTAL_SIZE, np->rx_ring, np->rx_ring_dma); 676 dma_free_coherent(&pdev->dev, RX_TOTAL_SIZE,
677 np->rx_ring, np->rx_ring_dma);
667err_out_unmap_tx: 678err_out_unmap_tx:
668 pci_free_consistent(pdev, TX_TOTAL_SIZE, np->tx_ring, np->tx_ring_dma); 679 dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE,
680 np->tx_ring, np->tx_ring_dma);
669err_out_cleardev: 681err_out_cleardev:
670 pci_set_drvdata(pdev, NULL); 682 pci_set_drvdata(pdev, NULL);
671 pci_iounmap(pdev, ioaddr); 683 pci_iounmap(pdev, ioaddr);
@@ -1011,8 +1023,14 @@ static void init_ring(struct net_device *dev)
1011 skb->dev = dev; /* Mark as being used by this device. */ 1023 skb->dev = dev; /* Mark as being used by this device. */
1012 skb_reserve(skb, 2); /* 16 byte align the IP header. */ 1024 skb_reserve(skb, 2); /* 16 byte align the IP header. */
1013 np->rx_ring[i].frag[0].addr = cpu_to_le32( 1025 np->rx_ring[i].frag[0].addr = cpu_to_le32(
1014 pci_map_single(np->pci_dev, skb->data, np->rx_buf_sz, 1026 dma_map_single(&np->pci_dev->dev, skb->data,
1015 PCI_DMA_FROMDEVICE)); 1027 np->rx_buf_sz, DMA_FROM_DEVICE));
1028 if (dma_mapping_error(&np->pci_dev->dev,
1029 np->rx_ring[i].frag[0].addr)) {
1030 dev_kfree_skb(skb);
1031 np->rx_skbuff[i] = NULL;
1032 break;
1033 }
1016 np->rx_ring[i].frag[0].length = cpu_to_le32(np->rx_buf_sz | LastFrag); 1034 np->rx_ring[i].frag[0].length = cpu_to_le32(np->rx_buf_sz | LastFrag);
1017 } 1035 }
1018 np->dirty_rx = (unsigned int)(i - RX_RING_SIZE); 1036 np->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
@@ -1063,9 +1081,11 @@ start_tx (struct sk_buff *skb, struct net_device *dev)
1063 1081
1064 txdesc->next_desc = 0; 1082 txdesc->next_desc = 0;
1065 txdesc->status = cpu_to_le32 ((entry << 2) | DisableAlign); 1083 txdesc->status = cpu_to_le32 ((entry << 2) | DisableAlign);
1066 txdesc->frag[0].addr = cpu_to_le32 (pci_map_single (np->pci_dev, skb->data, 1084 txdesc->frag[0].addr = cpu_to_le32(dma_map_single(&np->pci_dev->dev,
1067 skb->len, 1085 skb->data, skb->len, DMA_TO_DEVICE));
1068 PCI_DMA_TODEVICE)); 1086 if (dma_mapping_error(&np->pci_dev->dev,
1087 txdesc->frag[0].addr))
1088 goto drop_frame;
1069 txdesc->frag[0].length = cpu_to_le32 (skb->len | LastFrag); 1089 txdesc->frag[0].length = cpu_to_le32 (skb->len | LastFrag);
1070 1090
1071 /* Increment cur_tx before tasklet_schedule() */ 1091 /* Increment cur_tx before tasklet_schedule() */
@@ -1087,6 +1107,12 @@ start_tx (struct sk_buff *skb, struct net_device *dev)
1087 dev->name, np->cur_tx, entry); 1107 dev->name, np->cur_tx, entry);
1088 } 1108 }
1089 return NETDEV_TX_OK; 1109 return NETDEV_TX_OK;
1110
1111drop_frame:
1112 dev_kfree_skb(skb);
1113 np->tx_skbuff[entry] = NULL;
1114 dev->stats.tx_dropped++;
1115 return NETDEV_TX_OK;
1090} 1116}
1091 1117
1092/* Reset hardware tx and free all of tx buffers */ 1118/* Reset hardware tx and free all of tx buffers */
@@ -1097,7 +1123,6 @@ reset_tx (struct net_device *dev)
1097 void __iomem *ioaddr = np->base; 1123 void __iomem *ioaddr = np->base;
1098 struct sk_buff *skb; 1124 struct sk_buff *skb;
1099 int i; 1125 int i;
1100 int irq = in_interrupt();
1101 1126
1102 /* Reset tx logic, TxListPtr will be cleaned */ 1127 /* Reset tx logic, TxListPtr will be cleaned */
1103 iowrite16 (TxDisable, ioaddr + MACCtrl1); 1128 iowrite16 (TxDisable, ioaddr + MACCtrl1);
@@ -1109,13 +1134,10 @@ reset_tx (struct net_device *dev)
1109 1134
1110 skb = np->tx_skbuff[i]; 1135 skb = np->tx_skbuff[i];
1111 if (skb) { 1136 if (skb) {
1112 pci_unmap_single(np->pci_dev, 1137 dma_unmap_single(&np->pci_dev->dev,
1113 le32_to_cpu(np->tx_ring[i].frag[0].addr), 1138 le32_to_cpu(np->tx_ring[i].frag[0].addr),
1114 skb->len, PCI_DMA_TODEVICE); 1139 skb->len, DMA_TO_DEVICE);
1115 if (irq) 1140 dev_kfree_skb_any(skb);
1116 dev_kfree_skb_irq (skb);
1117 else
1118 dev_kfree_skb (skb);
1119 np->tx_skbuff[i] = NULL; 1141 np->tx_skbuff[i] = NULL;
1120 dev->stats.tx_dropped++; 1142 dev->stats.tx_dropped++;
1121 } 1143 }
@@ -1233,9 +1255,9 @@ static irqreturn_t intr_handler(int irq, void *dev_instance)
1233 break; 1255 break;
1234 skb = np->tx_skbuff[entry]; 1256 skb = np->tx_skbuff[entry];
1235 /* Free the original skb. */ 1257 /* Free the original skb. */
1236 pci_unmap_single(np->pci_dev, 1258 dma_unmap_single(&np->pci_dev->dev,
1237 le32_to_cpu(np->tx_ring[entry].frag[0].addr), 1259 le32_to_cpu(np->tx_ring[entry].frag[0].addr),
1238 skb->len, PCI_DMA_TODEVICE); 1260 skb->len, DMA_TO_DEVICE);
1239 dev_kfree_skb_irq (np->tx_skbuff[entry]); 1261 dev_kfree_skb_irq (np->tx_skbuff[entry]);
1240 np->tx_skbuff[entry] = NULL; 1262 np->tx_skbuff[entry] = NULL;
1241 np->tx_ring[entry].frag[0].addr = 0; 1263 np->tx_ring[entry].frag[0].addr = 0;
@@ -1252,9 +1274,9 @@ static irqreturn_t intr_handler(int irq, void *dev_instance)
1252 break; 1274 break;
1253 skb = np->tx_skbuff[entry]; 1275 skb = np->tx_skbuff[entry];
1254 /* Free the original skb. */ 1276 /* Free the original skb. */
1255 pci_unmap_single(np->pci_dev, 1277 dma_unmap_single(&np->pci_dev->dev,
1256 le32_to_cpu(np->tx_ring[entry].frag[0].addr), 1278 le32_to_cpu(np->tx_ring[entry].frag[0].addr),
1257 skb->len, PCI_DMA_TODEVICE); 1279 skb->len, DMA_TO_DEVICE);
1258 dev_kfree_skb_irq (np->tx_skbuff[entry]); 1280 dev_kfree_skb_irq (np->tx_skbuff[entry]);
1259 np->tx_skbuff[entry] = NULL; 1281 np->tx_skbuff[entry] = NULL;
1260 np->tx_ring[entry].frag[0].addr = 0; 1282 np->tx_ring[entry].frag[0].addr = 0;
@@ -1334,22 +1356,18 @@ static void rx_poll(unsigned long data)
1334 if (pkt_len < rx_copybreak && 1356 if (pkt_len < rx_copybreak &&
1335 (skb = dev_alloc_skb(pkt_len + 2)) != NULL) { 1357 (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
1336 skb_reserve(skb, 2); /* 16 byte align the IP header */ 1358 skb_reserve(skb, 2); /* 16 byte align the IP header */
1337 pci_dma_sync_single_for_cpu(np->pci_dev, 1359 dma_sync_single_for_cpu(&np->pci_dev->dev,
1338 le32_to_cpu(desc->frag[0].addr), 1360 le32_to_cpu(desc->frag[0].addr),
1339 np->rx_buf_sz, 1361 np->rx_buf_sz, DMA_FROM_DEVICE);
1340 PCI_DMA_FROMDEVICE);
1341
1342 skb_copy_to_linear_data(skb, np->rx_skbuff[entry]->data, pkt_len); 1362 skb_copy_to_linear_data(skb, np->rx_skbuff[entry]->data, pkt_len);
1343 pci_dma_sync_single_for_device(np->pci_dev, 1363 dma_sync_single_for_device(&np->pci_dev->dev,
1344 le32_to_cpu(desc->frag[0].addr), 1364 le32_to_cpu(desc->frag[0].addr),
1345 np->rx_buf_sz, 1365 np->rx_buf_sz, DMA_FROM_DEVICE);
1346 PCI_DMA_FROMDEVICE);
1347 skb_put(skb, pkt_len); 1366 skb_put(skb, pkt_len);
1348 } else { 1367 } else {
1349 pci_unmap_single(np->pci_dev, 1368 dma_unmap_single(&np->pci_dev->dev,
1350 le32_to_cpu(desc->frag[0].addr), 1369 le32_to_cpu(desc->frag[0].addr),
1351 np->rx_buf_sz, 1370 np->rx_buf_sz, DMA_FROM_DEVICE);
1352 PCI_DMA_FROMDEVICE);
1353 skb_put(skb = np->rx_skbuff[entry], pkt_len); 1371 skb_put(skb = np->rx_skbuff[entry], pkt_len);
1354 np->rx_skbuff[entry] = NULL; 1372 np->rx_skbuff[entry] = NULL;
1355 } 1373 }
@@ -1396,8 +1414,14 @@ static void refill_rx (struct net_device *dev)
1396 skb->dev = dev; /* Mark as being used by this device. */ 1414 skb->dev = dev; /* Mark as being used by this device. */
1397 skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */ 1415 skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
1398 np->rx_ring[entry].frag[0].addr = cpu_to_le32( 1416 np->rx_ring[entry].frag[0].addr = cpu_to_le32(
1399 pci_map_single(np->pci_dev, skb->data, 1417 dma_map_single(&np->pci_dev->dev, skb->data,
1400 np->rx_buf_sz, PCI_DMA_FROMDEVICE)); 1418 np->rx_buf_sz, DMA_FROM_DEVICE));
1419 if (dma_mapping_error(&np->pci_dev->dev,
1420 np->rx_ring[entry].frag[0].addr)) {
1421 dev_kfree_skb_irq(skb);
1422 np->rx_skbuff[entry] = NULL;
1423 break;
1424 }
1401 } 1425 }
1402 /* Perhaps we need not reset this field. */ 1426 /* Perhaps we need not reset this field. */
1403 np->rx_ring[entry].frag[0].length = 1427 np->rx_ring[entry].frag[0].length =
@@ -1475,27 +1499,41 @@ static struct net_device_stats *get_stats(struct net_device *dev)
1475{ 1499{
1476 struct netdev_private *np = netdev_priv(dev); 1500 struct netdev_private *np = netdev_priv(dev);
1477 void __iomem *ioaddr = np->base; 1501 void __iomem *ioaddr = np->base;
1478 int i; 1502 unsigned long flags;
1503 u8 late_coll, single_coll, mult_coll;
1479 1504
1480 /* We should lock this segment of code for SMP eventually, although 1505 spin_lock_irqsave(&np->statlock, flags);
1481 the vulnerability window is very small and statistics are
1482 non-critical. */
1483 /* The chip only need report frame silently dropped. */ 1506 /* The chip only need report frame silently dropped. */
1484 dev->stats.rx_missed_errors += ioread8(ioaddr + RxMissed); 1507 dev->stats.rx_missed_errors += ioread8(ioaddr + RxMissed);
1485 dev->stats.tx_packets += ioread16(ioaddr + TxFramesOK); 1508 dev->stats.tx_packets += ioread16(ioaddr + TxFramesOK);
1486 dev->stats.rx_packets += ioread16(ioaddr + RxFramesOK); 1509 dev->stats.rx_packets += ioread16(ioaddr + RxFramesOK);
1487 dev->stats.collisions += ioread8(ioaddr + StatsLateColl);
1488 dev->stats.collisions += ioread8(ioaddr + StatsMultiColl);
1489 dev->stats.collisions += ioread8(ioaddr + StatsOneColl);
1490 dev->stats.tx_carrier_errors += ioread8(ioaddr + StatsCarrierError); 1510 dev->stats.tx_carrier_errors += ioread8(ioaddr + StatsCarrierError);
1491 ioread8(ioaddr + StatsTxDefer); 1511
1492 for (i = StatsTxDefer; i <= StatsMcastRx; i++) 1512 mult_coll = ioread8(ioaddr + StatsMultiColl);
1493 ioread8(ioaddr + i); 1513 np->xstats.tx_multiple_collisions += mult_coll;
1514 single_coll = ioread8(ioaddr + StatsOneColl);
1515 np->xstats.tx_single_collisions += single_coll;
1516 late_coll = ioread8(ioaddr + StatsLateColl);
1517 np->xstats.tx_late_collisions += late_coll;
1518 dev->stats.collisions += mult_coll
1519 + single_coll
1520 + late_coll;
1521
1522 np->xstats.tx_deferred += ioread8(ioaddr + StatsTxDefer);
1523 np->xstats.tx_deferred_excessive += ioread8(ioaddr + StatsTxXSDefer);
1524 np->xstats.tx_aborted += ioread8(ioaddr + StatsTxAbort);
1525 np->xstats.tx_bcasts += ioread8(ioaddr + StatsBcastTx);
1526 np->xstats.rx_bcasts += ioread8(ioaddr + StatsBcastRx);
1527 np->xstats.tx_mcasts += ioread8(ioaddr + StatsMcastTx);
1528 np->xstats.rx_mcasts += ioread8(ioaddr + StatsMcastRx);
1529
1494 dev->stats.tx_bytes += ioread16(ioaddr + TxOctetsLow); 1530 dev->stats.tx_bytes += ioread16(ioaddr + TxOctetsLow);
1495 dev->stats.tx_bytes += ioread16(ioaddr + TxOctetsHigh) << 16; 1531 dev->stats.tx_bytes += ioread16(ioaddr + TxOctetsHigh) << 16;
1496 dev->stats.rx_bytes += ioread16(ioaddr + RxOctetsLow); 1532 dev->stats.rx_bytes += ioread16(ioaddr + RxOctetsLow);
1497 dev->stats.rx_bytes += ioread16(ioaddr + RxOctetsHigh) << 16; 1533 dev->stats.rx_bytes += ioread16(ioaddr + RxOctetsHigh) << 16;
1498 1534
1535 spin_unlock_irqrestore(&np->statlock, flags);
1536
1499 return &dev->stats; 1537 return &dev->stats;
1500} 1538}
1501 1539
@@ -1554,6 +1592,21 @@ static int __set_mac_addr(struct net_device *dev)
1554 return 0; 1592 return 0;
1555} 1593}
1556 1594
1595static const struct {
1596 const char name[ETH_GSTRING_LEN];
1597} sundance_stats[] = {
1598 { "tx_multiple_collisions" },
1599 { "tx_single_collisions" },
1600 { "tx_late_collisions" },
1601 { "tx_deferred" },
1602 { "tx_deferred_excessive" },
1603 { "tx_aborted" },
1604 { "tx_bcasts" },
1605 { "rx_bcasts" },
1606 { "tx_mcasts" },
1607 { "rx_mcasts" },
1608};
1609
1557static int check_if_running(struct net_device *dev) 1610static int check_if_running(struct net_device *dev)
1558{ 1611{
1559 if (!netif_running(dev)) 1612 if (!netif_running(dev))
@@ -1612,6 +1665,42 @@ static void set_msglevel(struct net_device *dev, u32 val)
1612 np->msg_enable = val; 1665 np->msg_enable = val;
1613} 1666}
1614 1667
1668static void get_strings(struct net_device *dev, u32 stringset,
1669 u8 *data)
1670{
1671 if (stringset == ETH_SS_STATS)
1672 memcpy(data, sundance_stats, sizeof(sundance_stats));
1673}
1674
1675static int get_sset_count(struct net_device *dev, int sset)
1676{
1677 switch (sset) {
1678 case ETH_SS_STATS:
1679 return ARRAY_SIZE(sundance_stats);
1680 default:
1681 return -EOPNOTSUPP;
1682 }
1683}
1684
1685static void get_ethtool_stats(struct net_device *dev,
1686 struct ethtool_stats *stats, u64 *data)
1687{
1688 struct netdev_private *np = netdev_priv(dev);
1689 int i = 0;
1690
1691 get_stats(dev);
1692 data[i++] = np->xstats.tx_multiple_collisions;
1693 data[i++] = np->xstats.tx_single_collisions;
1694 data[i++] = np->xstats.tx_late_collisions;
1695 data[i++] = np->xstats.tx_deferred;
1696 data[i++] = np->xstats.tx_deferred_excessive;
1697 data[i++] = np->xstats.tx_aborted;
1698 data[i++] = np->xstats.tx_bcasts;
1699 data[i++] = np->xstats.rx_bcasts;
1700 data[i++] = np->xstats.tx_mcasts;
1701 data[i++] = np->xstats.rx_mcasts;
1702}
1703
1615static const struct ethtool_ops ethtool_ops = { 1704static const struct ethtool_ops ethtool_ops = {
1616 .begin = check_if_running, 1705 .begin = check_if_running,
1617 .get_drvinfo = get_drvinfo, 1706 .get_drvinfo = get_drvinfo,
@@ -1621,6 +1710,9 @@ static const struct ethtool_ops ethtool_ops = {
1621 .get_link = get_link, 1710 .get_link = get_link,
1622 .get_msglevel = get_msglevel, 1711 .get_msglevel = get_msglevel,
1623 .set_msglevel = set_msglevel, 1712 .set_msglevel = set_msglevel,
1713 .get_strings = get_strings,
1714 .get_sset_count = get_sset_count,
1715 .get_ethtool_stats = get_ethtool_stats,
1624}; 1716};
1625 1717
1626static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 1718static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
@@ -1715,9 +1807,9 @@ static int netdev_close(struct net_device *dev)
1715 np->rx_ring[i].status = 0; 1807 np->rx_ring[i].status = 0;
1716 skb = np->rx_skbuff[i]; 1808 skb = np->rx_skbuff[i];
1717 if (skb) { 1809 if (skb) {
1718 pci_unmap_single(np->pci_dev, 1810 dma_unmap_single(&np->pci_dev->dev,
1719 le32_to_cpu(np->rx_ring[i].frag[0].addr), 1811 le32_to_cpu(np->rx_ring[i].frag[0].addr),
1720 np->rx_buf_sz, PCI_DMA_FROMDEVICE); 1812 np->rx_buf_sz, DMA_FROM_DEVICE);
1721 dev_kfree_skb(skb); 1813 dev_kfree_skb(skb);
1722 np->rx_skbuff[i] = NULL; 1814 np->rx_skbuff[i] = NULL;
1723 } 1815 }
@@ -1727,9 +1819,9 @@ static int netdev_close(struct net_device *dev)
1727 np->tx_ring[i].next_desc = 0; 1819 np->tx_ring[i].next_desc = 0;
1728 skb = np->tx_skbuff[i]; 1820 skb = np->tx_skbuff[i];
1729 if (skb) { 1821 if (skb) {
1730 pci_unmap_single(np->pci_dev, 1822 dma_unmap_single(&np->pci_dev->dev,
1731 le32_to_cpu(np->tx_ring[i].frag[0].addr), 1823 le32_to_cpu(np->tx_ring[i].frag[0].addr),
1732 skb->len, PCI_DMA_TODEVICE); 1824 skb->len, DMA_TO_DEVICE);
1733 dev_kfree_skb(skb); 1825 dev_kfree_skb(skb);
1734 np->tx_skbuff[i] = NULL; 1826 np->tx_skbuff[i] = NULL;
1735 } 1827 }
@@ -1743,17 +1835,16 @@ static void __devexit sundance_remove1 (struct pci_dev *pdev)
1743 struct net_device *dev = pci_get_drvdata(pdev); 1835 struct net_device *dev = pci_get_drvdata(pdev);
1744 1836
1745 if (dev) { 1837 if (dev) {
1746 struct netdev_private *np = netdev_priv(dev); 1838 struct netdev_private *np = netdev_priv(dev);
1747 1839 unregister_netdev(dev);
1748 unregister_netdev(dev); 1840 dma_free_coherent(&pdev->dev, RX_TOTAL_SIZE,
1749 pci_free_consistent(pdev, RX_TOTAL_SIZE, np->rx_ring, 1841 np->rx_ring, np->rx_ring_dma);
1750 np->rx_ring_dma); 1842 dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE,
1751 pci_free_consistent(pdev, TX_TOTAL_SIZE, np->tx_ring, 1843 np->tx_ring, np->tx_ring_dma);
1752 np->tx_ring_dma); 1844 pci_iounmap(pdev, np->base);
1753 pci_iounmap(pdev, np->base); 1845 pci_release_regions(pdev);
1754 pci_release_regions(pdev); 1846 free_netdev(dev);
1755 free_netdev(dev); 1847 pci_set_drvdata(pdev, NULL);
1756 pci_set_drvdata(pdev, NULL);
1757 } 1848 }
1758} 1849}
1759 1850
diff --git a/drivers/net/sungem_phy.c b/drivers/net/sungem_phy.c
index 4a4fac630337..d16880d7099b 100644
--- a/drivers/net/sungem_phy.c
+++ b/drivers/net/sungem_phy.c
@@ -88,7 +88,7 @@ static int reset_one_mii_phy(struct mii_phy* phy, int phy_id)
88 if ((val & BMCR_ISOLATE) && limit > 0) 88 if ((val & BMCR_ISOLATE) && limit > 0)
89 __phy_write(phy, phy_id, MII_BMCR, val & ~BMCR_ISOLATE); 89 __phy_write(phy, phy_id, MII_BMCR, val & ~BMCR_ISOLATE);
90 90
91 return (limit <= 0); 91 return limit <= 0;
92} 92}
93 93
94static int bcm5201_init(struct mii_phy* phy) 94static int bcm5201_init(struct mii_phy* phy)
diff --git a/drivers/net/sunhme.c b/drivers/net/sunhme.c
index 45f315ed1868..5e28c414421a 100644
--- a/drivers/net/sunhme.c
+++ b/drivers/net/sunhme.c
@@ -2497,7 +2497,7 @@ static u32 hme_get_link(struct net_device *dev)
2497 hp->sw_bmcr = happy_meal_tcvr_read(hp, hp->tcvregs, MII_BMCR); 2497 hp->sw_bmcr = happy_meal_tcvr_read(hp, hp->tcvregs, MII_BMCR);
2498 spin_unlock_irq(&hp->happy_lock); 2498 spin_unlock_irq(&hp->happy_lock);
2499 2499
2500 return (hp->sw_bmsr & BMSR_LSTATUS); 2500 return hp->sw_bmsr & BMSR_LSTATUS;
2501} 2501}
2502 2502
2503static const struct ethtool_ops hme_ethtool_ops = { 2503static const struct ethtool_ops hme_ethtool_ops = {
diff --git a/drivers/net/sunqe.c b/drivers/net/sunqe.c
index 72e65d4666ef..9536b2f010be 100644
--- a/drivers/net/sunqe.c
+++ b/drivers/net/sunqe.c
@@ -711,7 +711,7 @@ static u32 qe_get_link(struct net_device *dev)
711 phyconfig = sbus_readb(mregs + MREGS_PHYCONFIG); 711 phyconfig = sbus_readb(mregs + MREGS_PHYCONFIG);
712 spin_unlock_irq(&qep->lock); 712 spin_unlock_irq(&qep->lock);
713 713
714 return (phyconfig & MREGS_PHYCONFIG_LSTAT); 714 return phyconfig & MREGS_PHYCONFIG_LSTAT;
715} 715}
716 716
717static const struct ethtool_ops qe_ethtool_ops = { 717static const struct ethtool_ops qe_ethtool_ops = {
diff --git a/drivers/net/tc35815.c b/drivers/net/tc35815.c
index 99e423a5b9f1..b6eec8cea209 100644
--- a/drivers/net/tc35815.c
+++ b/drivers/net/tc35815.c
@@ -1167,7 +1167,7 @@ static void print_eth(const u8 *add)
1167static int tc35815_tx_full(struct net_device *dev) 1167static int tc35815_tx_full(struct net_device *dev)
1168{ 1168{
1169 struct tc35815_local *lp = netdev_priv(dev); 1169 struct tc35815_local *lp = netdev_priv(dev);
1170 return ((lp->tfd_start + 1) % TX_FD_NUM == lp->tfd_end); 1170 return (lp->tfd_start + 1) % TX_FD_NUM == lp->tfd_end;
1171} 1171}
1172 1172
1173static void tc35815_restart(struct net_device *dev) 1173static void tc35815_restart(struct net_device *dev)
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index 9f6ffffc8376..22720eeabddb 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -69,10 +69,10 @@
69 69
70#define DRV_MODULE_NAME "tg3" 70#define DRV_MODULE_NAME "tg3"
71#define TG3_MAJ_NUM 3 71#define TG3_MAJ_NUM 3
72#define TG3_MIN_NUM 113 72#define TG3_MIN_NUM 115
73#define DRV_MODULE_VERSION \ 73#define DRV_MODULE_VERSION \
74 __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM) 74 __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
75#define DRV_MODULE_RELDATE "August 2, 2010" 75#define DRV_MODULE_RELDATE "October 14, 2010"
76 76
77#define TG3_DEF_MAC_MODE 0 77#define TG3_DEF_MAC_MODE 0
78#define TG3_DEF_RX_MODE 0 78#define TG3_DEF_RX_MODE 0
@@ -101,9 +101,15 @@
101 * You can't change the ring sizes, but you can change where you place 101 * You can't change the ring sizes, but you can change where you place
102 * them in the NIC onboard memory. 102 * them in the NIC onboard memory.
103 */ 103 */
104#define TG3_RX_RING_SIZE 512 104#define TG3_RX_STD_RING_SIZE(tp) \
105 ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || \
106 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) ? \
107 RX_STD_MAX_SIZE_5717 : 512)
105#define TG3_DEF_RX_RING_PENDING 200 108#define TG3_DEF_RX_RING_PENDING 200
106#define TG3_RX_JUMBO_RING_SIZE 256 109#define TG3_RX_JMB_RING_SIZE(tp) \
110 ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || \
111 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) ? \
112 1024 : 256)
107#define TG3_DEF_RX_JUMBO_RING_PENDING 100 113#define TG3_DEF_RX_JUMBO_RING_PENDING 100
108#define TG3_RSS_INDIR_TBL_SIZE 128 114#define TG3_RSS_INDIR_TBL_SIZE 128
109 115
@@ -113,19 +119,16 @@
113 * hw multiply/modulo instructions. Another solution would be to 119 * hw multiply/modulo instructions. Another solution would be to
114 * replace things like '% foo' with '& (foo - 1)'. 120 * replace things like '% foo' with '& (foo - 1)'.
115 */ 121 */
116#define TG3_RX_RCB_RING_SIZE(tp) \
117 (((tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) && \
118 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) ? 1024 : 512)
119 122
120#define TG3_TX_RING_SIZE 512 123#define TG3_TX_RING_SIZE 512
121#define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1) 124#define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
122 125
123#define TG3_RX_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \ 126#define TG3_RX_STD_RING_BYTES(tp) \
124 TG3_RX_RING_SIZE) 127 (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
125#define TG3_RX_JUMBO_RING_BYTES (sizeof(struct tg3_ext_rx_buffer_desc) * \ 128#define TG3_RX_JMB_RING_BYTES(tp) \
126 TG3_RX_JUMBO_RING_SIZE) 129 (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
127#define TG3_RX_RCB_RING_BYTES(tp) (sizeof(struct tg3_rx_buffer_desc) * \ 130#define TG3_RX_RCB_RING_BYTES(tp) \
128 TG3_RX_RCB_RING_SIZE(tp)) 131 (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
129#define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \ 132#define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
130 TG3_TX_RING_SIZE) 133 TG3_TX_RING_SIZE)
131#define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1)) 134#define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
@@ -143,11 +146,11 @@
143#define TG3_RX_STD_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ) 146#define TG3_RX_STD_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
144#define TG3_RX_JMB_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ) 147#define TG3_RX_JMB_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
145 148
146#define TG3_RX_STD_BUFF_RING_SIZE \ 149#define TG3_RX_STD_BUFF_RING_SIZE(tp) \
147 (sizeof(struct ring_info) * TG3_RX_RING_SIZE) 150 (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
148 151
149#define TG3_RX_JMB_BUFF_RING_SIZE \ 152#define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
150 (sizeof(struct ring_info) * TG3_RX_JUMBO_RING_SIZE) 153 (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
151 154
152/* Due to a hardware bug, the 5701 can only DMA to memory addresses 155/* Due to a hardware bug, the 5701 can only DMA to memory addresses
153 * that are at least dword aligned when used in PCIX mode. The driver 156 * that are at least dword aligned when used in PCIX mode. The driver
@@ -264,7 +267,6 @@ static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
264 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)}, 267 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
265 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)}, 268 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
266 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)}, 269 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
267 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5724)},
268 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)}, 270 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
269 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)}, 271 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
270 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)}, 272 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
@@ -752,42 +754,6 @@ static void tg3_int_reenable(struct tg3_napi *tnapi)
752 HOSTCC_MODE_ENABLE | tnapi->coal_now); 754 HOSTCC_MODE_ENABLE | tnapi->coal_now);
753} 755}
754 756
755static void tg3_napi_disable(struct tg3 *tp)
756{
757 int i;
758
759 for (i = tp->irq_cnt - 1; i >= 0; i--)
760 napi_disable(&tp->napi[i].napi);
761}
762
763static void tg3_napi_enable(struct tg3 *tp)
764{
765 int i;
766
767 for (i = 0; i < tp->irq_cnt; i++)
768 napi_enable(&tp->napi[i].napi);
769}
770
771static inline void tg3_netif_stop(struct tg3 *tp)
772{
773 tp->dev->trans_start = jiffies; /* prevent tx timeout */
774 tg3_napi_disable(tp);
775 netif_tx_disable(tp->dev);
776}
777
778static inline void tg3_netif_start(struct tg3 *tp)
779{
780 /* NOTE: unconditional netif_tx_wake_all_queues is only
781 * appropriate so long as all callers are assured to
782 * have free tx slots (such as after tg3_init_hw)
783 */
784 netif_tx_wake_all_queues(tp->dev);
785
786 tg3_napi_enable(tp);
787 tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
788 tg3_enable_ints(tp);
789}
790
791static void tg3_switch_clocks(struct tg3 *tp) 757static void tg3_switch_clocks(struct tg3 *tp)
792{ 758{
793 u32 clock_ctrl; 759 u32 clock_ctrl;
@@ -1196,6 +1162,52 @@ static void tg3_mdio_fini(struct tg3 *tp)
1196 } 1162 }
1197} 1163}
1198 1164
1165static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
1166{
1167 int err;
1168
1169 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1170 if (err)
1171 goto done;
1172
1173 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1174 if (err)
1175 goto done;
1176
1177 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1178 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1179 if (err)
1180 goto done;
1181
1182 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
1183
1184done:
1185 return err;
1186}
1187
1188static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
1189{
1190 int err;
1191
1192 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1193 if (err)
1194 goto done;
1195
1196 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1197 if (err)
1198 goto done;
1199
1200 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1201 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1202 if (err)
1203 goto done;
1204
1205 err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
1206
1207done:
1208 return err;
1209}
1210
1199/* tp->lock is held. */ 1211/* tp->lock is held. */
1200static inline void tg3_generate_fw_event(struct tg3 *tp) 1212static inline void tg3_generate_fw_event(struct tg3 *tp)
1201{ 1213{
@@ -1572,6 +1584,17 @@ static void tg3_phy_fini(struct tg3 *tp)
1572 } 1584 }
1573} 1585}
1574 1586
1587static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
1588{
1589 int err;
1590
1591 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1592 if (!err)
1593 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
1594
1595 return err;
1596}
1597
1575static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val) 1598static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1576{ 1599{
1577 int err; 1600 int err;
@@ -1735,6 +1758,42 @@ static void tg3_phy_apply_otp(struct tg3 *tp)
1735 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy); 1758 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
1736} 1759}
1737 1760
1761static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up)
1762{
1763 u32 val;
1764
1765 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
1766 return;
1767
1768 tp->setlpicnt = 0;
1769
1770 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
1771 current_link_up == 1 &&
1772 (tp->link_config.active_speed == SPEED_1000 ||
1773 (tp->link_config.active_speed == SPEED_100 &&
1774 tp->link_config.active_duplex == DUPLEX_FULL))) {
1775 u32 eeectl;
1776
1777 if (tp->link_config.active_speed == SPEED_1000)
1778 eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
1779 else
1780 eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
1781
1782 tw32(TG3_CPMU_EEE_CTRL, eeectl);
1783
1784 tg3_phy_cl45_read(tp, 0x7, TG3_CL45_D7_EEERES_STAT, &val);
1785
1786 if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
1787 val == TG3_CL45_D7_EEERES_STAT_LP_100TX)
1788 tp->setlpicnt = 2;
1789 }
1790
1791 if (!tp->setlpicnt) {
1792 val = tr32(TG3_CPMU_EEE_MODE);
1793 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
1794 }
1795}
1796
1738static int tg3_wait_macro_done(struct tg3 *tp) 1797static int tg3_wait_macro_done(struct tg3 *tp)
1739{ 1798{
1740 int limit = 100; 1799 int limit = 100;
@@ -1917,19 +1976,16 @@ static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
1917 */ 1976 */
1918static int tg3_phy_reset(struct tg3 *tp) 1977static int tg3_phy_reset(struct tg3 *tp)
1919{ 1978{
1920 u32 cpmuctrl; 1979 u32 val, cpmuctrl;
1921 u32 phy_status;
1922 int err; 1980 int err;
1923 1981
1924 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { 1982 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1925 u32 val;
1926
1927 val = tr32(GRC_MISC_CFG); 1983 val = tr32(GRC_MISC_CFG);
1928 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ); 1984 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
1929 udelay(40); 1985 udelay(40);
1930 } 1986 }
1931 err = tg3_readphy(tp, MII_BMSR, &phy_status); 1987 err = tg3_readphy(tp, MII_BMSR, &val);
1932 err |= tg3_readphy(tp, MII_BMSR, &phy_status); 1988 err |= tg3_readphy(tp, MII_BMSR, &val);
1933 if (err != 0) 1989 if (err != 0)
1934 return -EBUSY; 1990 return -EBUSY;
1935 1991
@@ -1961,18 +2017,14 @@ static int tg3_phy_reset(struct tg3 *tp)
1961 return err; 2017 return err;
1962 2018
1963 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) { 2019 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
1964 u32 phy; 2020 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
1965 2021 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
1966 phy = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
1967 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, phy);
1968 2022
1969 tw32(TG3_CPMU_CTRL, cpmuctrl); 2023 tw32(TG3_CPMU_CTRL, cpmuctrl);
1970 } 2024 }
1971 2025
1972 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX || 2026 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
1973 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) { 2027 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
1974 u32 val;
1975
1976 val = tr32(TG3_CPMU_LSPD_1000MB_CLK); 2028 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
1977 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) == 2029 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
1978 CPMU_LSPD_1000MB_MACCLK_12_5) { 2030 CPMU_LSPD_1000MB_MACCLK_12_5) {
@@ -2028,23 +2080,19 @@ out:
2028 /* Cannot do read-modify-write on 5401 */ 2080 /* Cannot do read-modify-write on 5401 */
2029 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20); 2081 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
2030 } else if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) { 2082 } else if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) {
2031 u32 phy_reg;
2032
2033 /* Set bit 14 with read-modify-write to preserve other bits */ 2083 /* Set bit 14 with read-modify-write to preserve other bits */
2034 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007) && 2084 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007) &&
2035 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy_reg)) 2085 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &val))
2036 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy_reg | 0x4000); 2086 tg3_writephy(tp, MII_TG3_AUX_CTRL, val | 0x4000);
2037 } 2087 }
2038 2088
2039 /* Set phy register 0x10 bit 0 to high fifo elasticity to support 2089 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2040 * jumbo frames transmission. 2090 * jumbo frames transmission.
2041 */ 2091 */
2042 if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) { 2092 if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) {
2043 u32 phy_reg; 2093 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2044
2045 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &phy_reg))
2046 tg3_writephy(tp, MII_TG3_EXT_CTRL, 2094 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2047 phy_reg | MII_TG3_EXT_CTRL_FIFO_ELASTIC); 2095 val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2048 } 2096 }
2049 2097
2050 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { 2098 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
@@ -2920,6 +2968,44 @@ static void tg3_phy_copper_begin(struct tg3 *tp)
2920 tg3_writephy(tp, MII_TG3_CTRL, new_adv); 2968 tg3_writephy(tp, MII_TG3_CTRL, new_adv);
2921 } 2969 }
2922 2970
2971 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) {
2972 u32 val = 0;
2973
2974 tw32(TG3_CPMU_EEE_MODE,
2975 tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2976
2977 /* Enable SM_DSP clock and tx 6dB coding. */
2978 val = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
2979 MII_TG3_AUXCTL_ACTL_SMDSP_ENA |
2980 MII_TG3_AUXCTL_ACTL_TX_6DB;
2981 tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
2982
2983 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2984 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) &&
2985 !tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
2986 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2,
2987 val | MII_TG3_DSP_CH34TP2_HIBW01);
2988
2989 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2990 /* Advertise 100-BaseTX EEE ability */
2991 if (tp->link_config.advertising &
2992 (ADVERTISED_100baseT_Half |
2993 ADVERTISED_100baseT_Full))
2994 val |= TG3_CL45_D7_EEEADV_CAP_100TX;
2995 /* Advertise 1000-BaseT EEE ability */
2996 if (tp->link_config.advertising &
2997 (ADVERTISED_1000baseT_Half |
2998 ADVERTISED_1000baseT_Full))
2999 val |= TG3_CL45_D7_EEEADV_CAP_1000T;
3000 }
3001 tg3_phy_cl45_write(tp, 0x7, TG3_CL45_D7_EEEADV_CAP, val);
3002
3003 /* Turn off SM_DSP clock. */
3004 val = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
3005 MII_TG3_AUXCTL_ACTL_TX_6DB;
3006 tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
3007 }
3008
2923 if (tp->link_config.autoneg == AUTONEG_DISABLE && 3009 if (tp->link_config.autoneg == AUTONEG_DISABLE &&
2924 tp->link_config.speed != SPEED_INVALID) { 3010 tp->link_config.speed != SPEED_INVALID) {
2925 u32 bmcr, orig_bmcr; 3011 u32 bmcr, orig_bmcr;
@@ -3060,7 +3146,7 @@ static int tg3_adv_1000T_flowctrl_ok(struct tg3 *tp, u32 *lcladv, u32 *rmtadv)
3060static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset) 3146static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
3061{ 3147{
3062 int current_link_up; 3148 int current_link_up;
3063 u32 bmsr, dummy; 3149 u32 bmsr, val;
3064 u32 lcl_adv, rmt_adv; 3150 u32 lcl_adv, rmt_adv;
3065 u16 current_speed; 3151 u16 current_speed;
3066 u8 current_duplex; 3152 u8 current_duplex;
@@ -3140,8 +3226,8 @@ static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
3140 } 3226 }
3141 3227
3142 /* Clear pending interrupts... */ 3228 /* Clear pending interrupts... */
3143 tg3_readphy(tp, MII_TG3_ISTAT, &dummy); 3229 tg3_readphy(tp, MII_TG3_ISTAT, &val);
3144 tg3_readphy(tp, MII_TG3_ISTAT, &dummy); 3230 tg3_readphy(tp, MII_TG3_ISTAT, &val);
3145 3231
3146 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) 3232 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
3147 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG); 3233 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
@@ -3162,8 +3248,6 @@ static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
3162 current_duplex = DUPLEX_INVALID; 3248 current_duplex = DUPLEX_INVALID;
3163 3249
3164 if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) { 3250 if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
3165 u32 val;
3166
3167 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4007); 3251 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4007);
3168 tg3_readphy(tp, MII_TG3_AUX_CTRL, &val); 3252 tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
3169 if (!(val & (1 << 10))) { 3253 if (!(val & (1 << 10))) {
@@ -3238,13 +3322,11 @@ static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
3238 3322
3239relink: 3323relink:
3240 if (current_link_up == 0 || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) { 3324 if (current_link_up == 0 || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
3241 u32 tmp;
3242
3243 tg3_phy_copper_begin(tp); 3325 tg3_phy_copper_begin(tp);
3244 3326
3245 tg3_readphy(tp, MII_BMSR, &tmp); 3327 tg3_readphy(tp, MII_BMSR, &bmsr);
3246 if (!tg3_readphy(tp, MII_BMSR, &tmp) && 3328 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3247 (tmp & BMSR_LSTATUS)) 3329 (bmsr & BMSR_LSTATUS))
3248 current_link_up = 1; 3330 current_link_up = 1;
3249 } 3331 }
3250 3332
@@ -3285,6 +3367,8 @@ relink:
3285 tw32_f(MAC_MODE, tp->mac_mode); 3367 tw32_f(MAC_MODE, tp->mac_mode);
3286 udelay(40); 3368 udelay(40);
3287 3369
3370 tg3_phy_eee_adjust(tp, current_link_up);
3371
3288 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) { 3372 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
3289 /* Polled via timer. */ 3373 /* Polled via timer. */
3290 tw32_f(MAC_EVENT, 0); 3374 tw32_f(MAC_EVENT, 0);
@@ -4353,6 +4437,11 @@ static int tg3_setup_phy(struct tg3 *tp, int force_reset)
4353 return err; 4437 return err;
4354} 4438}
4355 4439
4440static inline int tg3_irq_sync(struct tg3 *tp)
4441{
4442 return tp->irq_sync;
4443}
4444
4356/* This is called whenever we suspect that the system chipset is re- 4445/* This is called whenever we suspect that the system chipset is re-
4357 * ordering the sequence of MMIO to the tx send mailbox. The symptom 4446 * ordering the sequence of MMIO to the tx send mailbox. The symptom
4358 * is bogus tx completions. We try to recover by setting the 4447 * is bogus tx completions. We try to recover by setting the
@@ -4484,22 +4573,21 @@ static int tg3_alloc_rx_skb(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
4484 u32 opaque_key, u32 dest_idx_unmasked) 4573 u32 opaque_key, u32 dest_idx_unmasked)
4485{ 4574{
4486 struct tg3_rx_buffer_desc *desc; 4575 struct tg3_rx_buffer_desc *desc;
4487 struct ring_info *map, *src_map; 4576 struct ring_info *map;
4488 struct sk_buff *skb; 4577 struct sk_buff *skb;
4489 dma_addr_t mapping; 4578 dma_addr_t mapping;
4490 int skb_size, dest_idx; 4579 int skb_size, dest_idx;
4491 4580
4492 src_map = NULL;
4493 switch (opaque_key) { 4581 switch (opaque_key) {
4494 case RXD_OPAQUE_RING_STD: 4582 case RXD_OPAQUE_RING_STD:
4495 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE; 4583 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
4496 desc = &tpr->rx_std[dest_idx]; 4584 desc = &tpr->rx_std[dest_idx];
4497 map = &tpr->rx_std_buffers[dest_idx]; 4585 map = &tpr->rx_std_buffers[dest_idx];
4498 skb_size = tp->rx_pkt_map_sz; 4586 skb_size = tp->rx_pkt_map_sz;
4499 break; 4587 break;
4500 4588
4501 case RXD_OPAQUE_RING_JUMBO: 4589 case RXD_OPAQUE_RING_JUMBO:
4502 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE; 4590 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
4503 desc = &tpr->rx_jmb[dest_idx].std; 4591 desc = &tpr->rx_jmb[dest_idx].std;
4504 map = &tpr->rx_jmb_buffers[dest_idx]; 4592 map = &tpr->rx_jmb_buffers[dest_idx];
4505 skb_size = TG3_RX_JMB_MAP_SZ; 4593 skb_size = TG3_RX_JMB_MAP_SZ;
@@ -4549,12 +4637,12 @@ static void tg3_recycle_rx(struct tg3_napi *tnapi,
4549 struct tg3 *tp = tnapi->tp; 4637 struct tg3 *tp = tnapi->tp;
4550 struct tg3_rx_buffer_desc *src_desc, *dest_desc; 4638 struct tg3_rx_buffer_desc *src_desc, *dest_desc;
4551 struct ring_info *src_map, *dest_map; 4639 struct ring_info *src_map, *dest_map;
4552 struct tg3_rx_prodring_set *spr = &tp->prodring[0]; 4640 struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
4553 int dest_idx; 4641 int dest_idx;
4554 4642
4555 switch (opaque_key) { 4643 switch (opaque_key) {
4556 case RXD_OPAQUE_RING_STD: 4644 case RXD_OPAQUE_RING_STD:
4557 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE; 4645 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
4558 dest_desc = &dpr->rx_std[dest_idx]; 4646 dest_desc = &dpr->rx_std[dest_idx];
4559 dest_map = &dpr->rx_std_buffers[dest_idx]; 4647 dest_map = &dpr->rx_std_buffers[dest_idx];
4560 src_desc = &spr->rx_std[src_idx]; 4648 src_desc = &spr->rx_std[src_idx];
@@ -4562,7 +4650,7 @@ static void tg3_recycle_rx(struct tg3_napi *tnapi,
4562 break; 4650 break;
4563 4651
4564 case RXD_OPAQUE_RING_JUMBO: 4652 case RXD_OPAQUE_RING_JUMBO:
4565 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE; 4653 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
4566 dest_desc = &dpr->rx_jmb[dest_idx].std; 4654 dest_desc = &dpr->rx_jmb[dest_idx].std;
4567 dest_map = &dpr->rx_jmb_buffers[dest_idx]; 4655 dest_map = &dpr->rx_jmb_buffers[dest_idx];
4568 src_desc = &spr->rx_jmb[src_idx].std; 4656 src_desc = &spr->rx_jmb[src_idx].std;
@@ -4619,7 +4707,7 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget)
4619 u32 sw_idx = tnapi->rx_rcb_ptr; 4707 u32 sw_idx = tnapi->rx_rcb_ptr;
4620 u16 hw_idx; 4708 u16 hw_idx;
4621 int received; 4709 int received;
4622 struct tg3_rx_prodring_set *tpr = tnapi->prodring; 4710 struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
4623 4711
4624 hw_idx = *(tnapi->rx_rcb_prod_idx); 4712 hw_idx = *(tnapi->rx_rcb_prod_idx);
4625 /* 4713 /*
@@ -4644,13 +4732,13 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget)
4644 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK; 4732 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
4645 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK; 4733 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
4646 if (opaque_key == RXD_OPAQUE_RING_STD) { 4734 if (opaque_key == RXD_OPAQUE_RING_STD) {
4647 ri = &tp->prodring[0].rx_std_buffers[desc_idx]; 4735 ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
4648 dma_addr = dma_unmap_addr(ri, mapping); 4736 dma_addr = dma_unmap_addr(ri, mapping);
4649 skb = ri->skb; 4737 skb = ri->skb;
4650 post_ptr = &std_prod_idx; 4738 post_ptr = &std_prod_idx;
4651 rx_std_posted++; 4739 rx_std_posted++;
4652 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) { 4740 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
4653 ri = &tp->prodring[0].rx_jmb_buffers[desc_idx]; 4741 ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
4654 dma_addr = dma_unmap_addr(ri, mapping); 4742 dma_addr = dma_unmap_addr(ri, mapping);
4655 skb = ri->skb; 4743 skb = ri->skb;
4656 post_ptr = &jmb_prod_idx; 4744 post_ptr = &jmb_prod_idx;
@@ -4762,7 +4850,8 @@ next_pkt:
4762 (*post_ptr)++; 4850 (*post_ptr)++;
4763 4851
4764 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) { 4852 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
4765 tpr->rx_std_prod_idx = std_prod_idx % TG3_RX_RING_SIZE; 4853 tpr->rx_std_prod_idx = std_prod_idx &
4854 tp->rx_std_ring_mask;
4766 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, 4855 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
4767 tpr->rx_std_prod_idx); 4856 tpr->rx_std_prod_idx);
4768 work_mask &= ~RXD_OPAQUE_RING_STD; 4857 work_mask &= ~RXD_OPAQUE_RING_STD;
@@ -4770,7 +4859,7 @@ next_pkt:
4770 } 4859 }
4771next_pkt_nopost: 4860next_pkt_nopost:
4772 sw_idx++; 4861 sw_idx++;
4773 sw_idx &= (TG3_RX_RCB_RING_SIZE(tp) - 1); 4862 sw_idx &= tp->rx_ret_ring_mask;
4774 4863
4775 /* Refresh hw_idx to see if there is new work */ 4864 /* Refresh hw_idx to see if there is new work */
4776 if (sw_idx == hw_idx) { 4865 if (sw_idx == hw_idx) {
@@ -4786,13 +4875,14 @@ next_pkt_nopost:
4786 /* Refill RX ring(s). */ 4875 /* Refill RX ring(s). */
4787 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS)) { 4876 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS)) {
4788 if (work_mask & RXD_OPAQUE_RING_STD) { 4877 if (work_mask & RXD_OPAQUE_RING_STD) {
4789 tpr->rx_std_prod_idx = std_prod_idx % TG3_RX_RING_SIZE; 4878 tpr->rx_std_prod_idx = std_prod_idx &
4879 tp->rx_std_ring_mask;
4790 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, 4880 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
4791 tpr->rx_std_prod_idx); 4881 tpr->rx_std_prod_idx);
4792 } 4882 }
4793 if (work_mask & RXD_OPAQUE_RING_JUMBO) { 4883 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
4794 tpr->rx_jmb_prod_idx = jmb_prod_idx % 4884 tpr->rx_jmb_prod_idx = jmb_prod_idx &
4795 TG3_RX_JUMBO_RING_SIZE; 4885 tp->rx_jmb_ring_mask;
4796 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, 4886 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
4797 tpr->rx_jmb_prod_idx); 4887 tpr->rx_jmb_prod_idx);
4798 } 4888 }
@@ -4803,8 +4893,8 @@ next_pkt_nopost:
4803 */ 4893 */
4804 smp_wmb(); 4894 smp_wmb();
4805 4895
4806 tpr->rx_std_prod_idx = std_prod_idx % TG3_RX_RING_SIZE; 4896 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
4807 tpr->rx_jmb_prod_idx = jmb_prod_idx % TG3_RX_JUMBO_RING_SIZE; 4897 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
4808 4898
4809 if (tnapi != &tp->napi[1]) 4899 if (tnapi != &tp->napi[1])
4810 napi_schedule(&tp->napi[1].napi); 4900 napi_schedule(&tp->napi[1].napi);
@@ -4860,9 +4950,11 @@ static int tg3_rx_prodring_xfer(struct tg3 *tp,
4860 if (spr->rx_std_cons_idx < src_prod_idx) 4950 if (spr->rx_std_cons_idx < src_prod_idx)
4861 cpycnt = src_prod_idx - spr->rx_std_cons_idx; 4951 cpycnt = src_prod_idx - spr->rx_std_cons_idx;
4862 else 4952 else
4863 cpycnt = TG3_RX_RING_SIZE - spr->rx_std_cons_idx; 4953 cpycnt = tp->rx_std_ring_mask + 1 -
4954 spr->rx_std_cons_idx;
4864 4955
4865 cpycnt = min(cpycnt, TG3_RX_RING_SIZE - dpr->rx_std_prod_idx); 4956 cpycnt = min(cpycnt,
4957 tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
4866 4958
4867 si = spr->rx_std_cons_idx; 4959 si = spr->rx_std_cons_idx;
4868 di = dpr->rx_std_prod_idx; 4960 di = dpr->rx_std_prod_idx;
@@ -4896,10 +4988,10 @@ static int tg3_rx_prodring_xfer(struct tg3 *tp,
4896 dbd->addr_lo = sbd->addr_lo; 4988 dbd->addr_lo = sbd->addr_lo;
4897 } 4989 }
4898 4990
4899 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) % 4991 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
4900 TG3_RX_RING_SIZE; 4992 tp->rx_std_ring_mask;
4901 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) % 4993 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
4902 TG3_RX_RING_SIZE; 4994 tp->rx_std_ring_mask;
4903 } 4995 }
4904 4996
4905 while (1) { 4997 while (1) {
@@ -4916,10 +5008,11 @@ static int tg3_rx_prodring_xfer(struct tg3 *tp,
4916 if (spr->rx_jmb_cons_idx < src_prod_idx) 5008 if (spr->rx_jmb_cons_idx < src_prod_idx)
4917 cpycnt = src_prod_idx - spr->rx_jmb_cons_idx; 5009 cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
4918 else 5010 else
4919 cpycnt = TG3_RX_JUMBO_RING_SIZE - spr->rx_jmb_cons_idx; 5011 cpycnt = tp->rx_jmb_ring_mask + 1 -
5012 spr->rx_jmb_cons_idx;
4920 5013
4921 cpycnt = min(cpycnt, 5014 cpycnt = min(cpycnt,
4922 TG3_RX_JUMBO_RING_SIZE - dpr->rx_jmb_prod_idx); 5015 tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
4923 5016
4924 si = spr->rx_jmb_cons_idx; 5017 si = spr->rx_jmb_cons_idx;
4925 di = dpr->rx_jmb_prod_idx; 5018 di = dpr->rx_jmb_prod_idx;
@@ -4953,10 +5046,10 @@ static int tg3_rx_prodring_xfer(struct tg3 *tp,
4953 dbd->addr_lo = sbd->addr_lo; 5046 dbd->addr_lo = sbd->addr_lo;
4954 } 5047 }
4955 5048
4956 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) % 5049 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
4957 TG3_RX_JUMBO_RING_SIZE; 5050 tp->rx_jmb_ring_mask;
4958 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) % 5051 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
4959 TG3_RX_JUMBO_RING_SIZE; 5052 tp->rx_jmb_ring_mask;
4960 } 5053 }
4961 5054
4962 return err; 5055 return err;
@@ -4981,14 +5074,14 @@ static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
4981 work_done += tg3_rx(tnapi, budget - work_done); 5074 work_done += tg3_rx(tnapi, budget - work_done);
4982 5075
4983 if ((tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS) && tnapi == &tp->napi[1]) { 5076 if ((tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS) && tnapi == &tp->napi[1]) {
4984 struct tg3_rx_prodring_set *dpr = &tp->prodring[0]; 5077 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
4985 int i, err = 0; 5078 int i, err = 0;
4986 u32 std_prod_idx = dpr->rx_std_prod_idx; 5079 u32 std_prod_idx = dpr->rx_std_prod_idx;
4987 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx; 5080 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
4988 5081
4989 for (i = 1; i < tp->irq_cnt; i++) 5082 for (i = 1; i < tp->irq_cnt; i++)
4990 err |= tg3_rx_prodring_xfer(tp, dpr, 5083 err |= tg3_rx_prodring_xfer(tp, dpr,
4991 tp->napi[i].prodring); 5084 &tp->napi[i].prodring);
4992 5085
4993 wmb(); 5086 wmb();
4994 5087
@@ -5098,6 +5191,59 @@ tx_recovery:
5098 return work_done; 5191 return work_done;
5099} 5192}
5100 5193
5194static void tg3_napi_disable(struct tg3 *tp)
5195{
5196 int i;
5197
5198 for (i = tp->irq_cnt - 1; i >= 0; i--)
5199 napi_disable(&tp->napi[i].napi);
5200}
5201
5202static void tg3_napi_enable(struct tg3 *tp)
5203{
5204 int i;
5205
5206 for (i = 0; i < tp->irq_cnt; i++)
5207 napi_enable(&tp->napi[i].napi);
5208}
5209
5210static void tg3_napi_init(struct tg3 *tp)
5211{
5212 int i;
5213
5214 netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
5215 for (i = 1; i < tp->irq_cnt; i++)
5216 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
5217}
5218
5219static void tg3_napi_fini(struct tg3 *tp)
5220{
5221 int i;
5222
5223 for (i = 0; i < tp->irq_cnt; i++)
5224 netif_napi_del(&tp->napi[i].napi);
5225}
5226
5227static inline void tg3_netif_stop(struct tg3 *tp)
5228{
5229 tp->dev->trans_start = jiffies; /* prevent tx timeout */
5230 tg3_napi_disable(tp);
5231 netif_tx_disable(tp->dev);
5232}
5233
5234static inline void tg3_netif_start(struct tg3 *tp)
5235{
5236 /* NOTE: unconditional netif_tx_wake_all_queues is only
5237 * appropriate so long as all callers are assured to
5238 * have free tx slots (such as after tg3_init_hw)
5239 */
5240 netif_tx_wake_all_queues(tp->dev);
5241
5242 tg3_napi_enable(tp);
5243 tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
5244 tg3_enable_ints(tp);
5245}
5246
5101static void tg3_irq_quiesce(struct tg3 *tp) 5247static void tg3_irq_quiesce(struct tg3 *tp)
5102{ 5248{
5103 int i; 5249 int i;
@@ -5111,11 +5257,6 @@ static void tg3_irq_quiesce(struct tg3 *tp)
5111 synchronize_irq(tp->napi[i].irq_vec); 5257 synchronize_irq(tp->napi[i].irq_vec);
5112} 5258}
5113 5259
5114static inline int tg3_irq_sync(struct tg3 *tp)
5115{
5116 return tp->irq_sync;
5117}
5118
5119/* Fully shutdown all tg3 driver activity elsewhere in the system. 5260/* Fully shutdown all tg3 driver activity elsewhere in the system.
5120 * If irq_sync is non-zero, then the IRQ handler must be synchronized 5261 * If irq_sync is non-zero, then the IRQ handler must be synchronized
5121 * with as well. Most of the time, this is not necessary except when 5262 * with as well. Most of the time, this is not necessary except when
@@ -5404,8 +5545,7 @@ static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
5404{ 5545{
5405 u32 base = (u32) mapping & 0xffffffff; 5546 u32 base = (u32) mapping & 0xffffffff;
5406 5547
5407 return ((base > 0xffffdcc0) && 5548 return (base > 0xffffdcc0) && (base + len + 8 < base);
5408 (base + len + 8 < base));
5409} 5549}
5410 5550
5411/* Test for DMA addresses > 40-bit */ 5551/* Test for DMA addresses > 40-bit */
@@ -5414,7 +5554,7 @@ static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
5414{ 5554{
5415#if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64) 5555#if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
5416 if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) 5556 if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG)
5417 return (((u64) mapping + len) > DMA_BIT_MASK(40)); 5557 return ((u64) mapping + len) > DMA_BIT_MASK(40);
5418 return 0; 5558 return 0;
5419#else 5559#else
5420 return 0; 5560 return 0;
@@ -5574,9 +5714,9 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb,
5574 goto out_unlock; 5714 goto out_unlock;
5575 } 5715 }
5576 5716
5577 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) 5717 if (skb_is_gso_v6(skb)) {
5578 hdrlen = skb_headlen(skb) - ETH_HLEN; 5718 hdrlen = skb_headlen(skb) - ETH_HLEN;
5579 else { 5719 } else {
5580 struct iphdr *iph = ip_hdr(skb); 5720 struct iphdr *iph = ip_hdr(skb);
5581 5721
5582 tcp_opt_len = tcp_optlen(skb); 5722 tcp_opt_len = tcp_optlen(skb);
@@ -5605,7 +5745,7 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb,
5605 } 5745 }
5606 5746
5607#if TG3_VLAN_TAG_USED 5747#if TG3_VLAN_TAG_USED
5608 if (tp->vlgrp != NULL && vlan_tx_tag_present(skb)) 5748 if (vlan_tx_tag_present(skb))
5609 base_flags |= (TXD_FLAG_VLAN | 5749 base_flags |= (TXD_FLAG_VLAN |
5610 (vlan_tx_tag_get(skb) << 16)); 5750 (vlan_tx_tag_get(skb) << 16));
5611#endif 5751#endif
@@ -5798,7 +5938,7 @@ static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb,
5798 iph = ip_hdr(skb); 5938 iph = ip_hdr(skb);
5799 tcp_opt_len = tcp_optlen(skb); 5939 tcp_opt_len = tcp_optlen(skb);
5800 5940
5801 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) { 5941 if (skb_is_gso_v6(skb)) {
5802 hdr_len = skb_headlen(skb) - ETH_HLEN; 5942 hdr_len = skb_headlen(skb) - ETH_HLEN;
5803 } else { 5943 } else {
5804 u32 ip_tcp_len; 5944 u32 ip_tcp_len;
@@ -5851,7 +5991,7 @@ static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb,
5851 } 5991 }
5852 } 5992 }
5853#if TG3_VLAN_TAG_USED 5993#if TG3_VLAN_TAG_USED
5854 if (tp->vlgrp != NULL && vlan_tx_tag_present(skb)) 5994 if (vlan_tx_tag_present(skb))
5855 base_flags |= (TXD_FLAG_VLAN | 5995 base_flags |= (TXD_FLAG_VLAN |
5856 (vlan_tx_tag_get(skb) << 16)); 5996 (vlan_tx_tag_get(skb) << 16));
5857#endif 5997#endif
@@ -6057,16 +6197,16 @@ static void tg3_rx_prodring_free(struct tg3 *tp,
6057{ 6197{
6058 int i; 6198 int i;
6059 6199
6060 if (tpr != &tp->prodring[0]) { 6200 if (tpr != &tp->napi[0].prodring) {
6061 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx; 6201 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
6062 i = (i + 1) % TG3_RX_RING_SIZE) 6202 i = (i + 1) & tp->rx_std_ring_mask)
6063 tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i], 6203 tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i],
6064 tp->rx_pkt_map_sz); 6204 tp->rx_pkt_map_sz);
6065 6205
6066 if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) { 6206 if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) {
6067 for (i = tpr->rx_jmb_cons_idx; 6207 for (i = tpr->rx_jmb_cons_idx;
6068 i != tpr->rx_jmb_prod_idx; 6208 i != tpr->rx_jmb_prod_idx;
6069 i = (i + 1) % TG3_RX_JUMBO_RING_SIZE) { 6209 i = (i + 1) & tp->rx_jmb_ring_mask) {
6070 tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i], 6210 tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i],
6071 TG3_RX_JMB_MAP_SZ); 6211 TG3_RX_JMB_MAP_SZ);
6072 } 6212 }
@@ -6075,12 +6215,13 @@ static void tg3_rx_prodring_free(struct tg3 *tp,
6075 return; 6215 return;
6076 } 6216 }
6077 6217
6078 for (i = 0; i < TG3_RX_RING_SIZE; i++) 6218 for (i = 0; i <= tp->rx_std_ring_mask; i++)
6079 tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i], 6219 tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i],
6080 tp->rx_pkt_map_sz); 6220 tp->rx_pkt_map_sz);
6081 6221
6082 if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) { 6222 if ((tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) &&
6083 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) 6223 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
6224 for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
6084 tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i], 6225 tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i],
6085 TG3_RX_JMB_MAP_SZ); 6226 TG3_RX_JMB_MAP_SZ);
6086 } 6227 }
@@ -6103,16 +6244,17 @@ static int tg3_rx_prodring_alloc(struct tg3 *tp,
6103 tpr->rx_jmb_cons_idx = 0; 6244 tpr->rx_jmb_cons_idx = 0;
6104 tpr->rx_jmb_prod_idx = 0; 6245 tpr->rx_jmb_prod_idx = 0;
6105 6246
6106 if (tpr != &tp->prodring[0]) { 6247 if (tpr != &tp->napi[0].prodring) {
6107 memset(&tpr->rx_std_buffers[0], 0, TG3_RX_STD_BUFF_RING_SIZE); 6248 memset(&tpr->rx_std_buffers[0], 0,
6108 if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) 6249 TG3_RX_STD_BUFF_RING_SIZE(tp));
6250 if (tpr->rx_jmb_buffers)
6109 memset(&tpr->rx_jmb_buffers[0], 0, 6251 memset(&tpr->rx_jmb_buffers[0], 0,
6110 TG3_RX_JMB_BUFF_RING_SIZE); 6252 TG3_RX_JMB_BUFF_RING_SIZE(tp));
6111 goto done; 6253 goto done;
6112 } 6254 }
6113 6255
6114 /* Zero out all descriptors. */ 6256 /* Zero out all descriptors. */
6115 memset(tpr->rx_std, 0, TG3_RX_RING_BYTES); 6257 memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
6116 6258
6117 rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ; 6259 rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
6118 if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) && 6260 if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) &&
@@ -6124,7 +6266,7 @@ static int tg3_rx_prodring_alloc(struct tg3 *tp,
6124 * stuff once. This works because the card does not 6266 * stuff once. This works because the card does not
6125 * write into the rx buffer posting rings. 6267 * write into the rx buffer posting rings.
6126 */ 6268 */
6127 for (i = 0; i < TG3_RX_RING_SIZE; i++) { 6269 for (i = 0; i <= tp->rx_std_ring_mask; i++) {
6128 struct tg3_rx_buffer_desc *rxd; 6270 struct tg3_rx_buffer_desc *rxd;
6129 6271
6130 rxd = &tpr->rx_std[i]; 6272 rxd = &tpr->rx_std[i];
@@ -6148,15 +6290,16 @@ static int tg3_rx_prodring_alloc(struct tg3 *tp,
6148 } 6290 }
6149 } 6291 }
6150 6292
6151 if (!(tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE)) 6293 if (!(tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) ||
6294 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
6152 goto done; 6295 goto done;
6153 6296
6154 memset(tpr->rx_jmb, 0, TG3_RX_JUMBO_RING_BYTES); 6297 memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
6155 6298
6156 if (!(tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)) 6299 if (!(tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE))
6157 goto done; 6300 goto done;
6158 6301
6159 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) { 6302 for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
6160 struct tg3_rx_buffer_desc *rxd; 6303 struct tg3_rx_buffer_desc *rxd;
6161 6304
6162 rxd = &tpr->rx_jmb[i].std; 6305 rxd = &tpr->rx_jmb[i].std;
@@ -6196,12 +6339,12 @@ static void tg3_rx_prodring_fini(struct tg3 *tp,
6196 kfree(tpr->rx_jmb_buffers); 6339 kfree(tpr->rx_jmb_buffers);
6197 tpr->rx_jmb_buffers = NULL; 6340 tpr->rx_jmb_buffers = NULL;
6198 if (tpr->rx_std) { 6341 if (tpr->rx_std) {
6199 pci_free_consistent(tp->pdev, TG3_RX_RING_BYTES, 6342 pci_free_consistent(tp->pdev, TG3_RX_STD_RING_BYTES(tp),
6200 tpr->rx_std, tpr->rx_std_mapping); 6343 tpr->rx_std, tpr->rx_std_mapping);
6201 tpr->rx_std = NULL; 6344 tpr->rx_std = NULL;
6202 } 6345 }
6203 if (tpr->rx_jmb) { 6346 if (tpr->rx_jmb) {
6204 pci_free_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES, 6347 pci_free_consistent(tp->pdev, TG3_RX_JMB_RING_BYTES(tp),
6205 tpr->rx_jmb, tpr->rx_jmb_mapping); 6348 tpr->rx_jmb, tpr->rx_jmb_mapping);
6206 tpr->rx_jmb = NULL; 6349 tpr->rx_jmb = NULL;
6207 } 6350 }
@@ -6210,23 +6353,25 @@ static void tg3_rx_prodring_fini(struct tg3 *tp,
6210static int tg3_rx_prodring_init(struct tg3 *tp, 6353static int tg3_rx_prodring_init(struct tg3 *tp,
6211 struct tg3_rx_prodring_set *tpr) 6354 struct tg3_rx_prodring_set *tpr)
6212{ 6355{
6213 tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE, GFP_KERNEL); 6356 tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
6357 GFP_KERNEL);
6214 if (!tpr->rx_std_buffers) 6358 if (!tpr->rx_std_buffers)
6215 return -ENOMEM; 6359 return -ENOMEM;
6216 6360
6217 tpr->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_RING_BYTES, 6361 tpr->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_STD_RING_BYTES(tp),
6218 &tpr->rx_std_mapping); 6362 &tpr->rx_std_mapping);
6219 if (!tpr->rx_std) 6363 if (!tpr->rx_std)
6220 goto err_out; 6364 goto err_out;
6221 6365
6222 if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) { 6366 if ((tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) &&
6223 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE, 6367 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
6368 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
6224 GFP_KERNEL); 6369 GFP_KERNEL);
6225 if (!tpr->rx_jmb_buffers) 6370 if (!tpr->rx_jmb_buffers)
6226 goto err_out; 6371 goto err_out;
6227 6372
6228 tpr->rx_jmb = pci_alloc_consistent(tp->pdev, 6373 tpr->rx_jmb = pci_alloc_consistent(tp->pdev,
6229 TG3_RX_JUMBO_RING_BYTES, 6374 TG3_RX_JMB_RING_BYTES(tp),
6230 &tpr->rx_jmb_mapping); 6375 &tpr->rx_jmb_mapping);
6231 if (!tpr->rx_jmb) 6376 if (!tpr->rx_jmb)
6232 goto err_out; 6377 goto err_out;
@@ -6253,7 +6398,7 @@ static void tg3_free_rings(struct tg3 *tp)
6253 for (j = 0; j < tp->irq_cnt; j++) { 6398 for (j = 0; j < tp->irq_cnt; j++) {
6254 struct tg3_napi *tnapi = &tp->napi[j]; 6399 struct tg3_napi *tnapi = &tp->napi[j];
6255 6400
6256 tg3_rx_prodring_free(tp, &tp->prodring[j]); 6401 tg3_rx_prodring_free(tp, &tnapi->prodring);
6257 6402
6258 if (!tnapi->tx_buffers) 6403 if (!tnapi->tx_buffers)
6259 continue; 6404 continue;
@@ -6325,7 +6470,7 @@ static int tg3_init_rings(struct tg3 *tp)
6325 if (tnapi->rx_rcb) 6470 if (tnapi->rx_rcb)
6326 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp)); 6471 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
6327 6472
6328 if (tg3_rx_prodring_alloc(tp, &tp->prodring[i])) { 6473 if (tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
6329 tg3_free_rings(tp); 6474 tg3_free_rings(tp);
6330 return -ENOMEM; 6475 return -ENOMEM;
6331 } 6476 }
@@ -6361,6 +6506,8 @@ static void tg3_free_consistent(struct tg3 *tp)
6361 tnapi->rx_rcb = NULL; 6506 tnapi->rx_rcb = NULL;
6362 } 6507 }
6363 6508
6509 tg3_rx_prodring_fini(tp, &tnapi->prodring);
6510
6364 if (tnapi->hw_status) { 6511 if (tnapi->hw_status) {
6365 pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE, 6512 pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE,
6366 tnapi->hw_status, 6513 tnapi->hw_status,
@@ -6374,9 +6521,6 @@ static void tg3_free_consistent(struct tg3 *tp)
6374 tp->hw_stats, tp->stats_mapping); 6521 tp->hw_stats, tp->stats_mapping);
6375 tp->hw_stats = NULL; 6522 tp->hw_stats = NULL;
6376 } 6523 }
6377
6378 for (i = 0; i < tp->irq_cnt; i++)
6379 tg3_rx_prodring_fini(tp, &tp->prodring[i]);
6380} 6524}
6381 6525
6382/* 6526/*
@@ -6387,11 +6531,6 @@ static int tg3_alloc_consistent(struct tg3 *tp)
6387{ 6531{
6388 int i; 6532 int i;
6389 6533
6390 for (i = 0; i < tp->irq_cnt; i++) {
6391 if (tg3_rx_prodring_init(tp, &tp->prodring[i]))
6392 goto err_out;
6393 }
6394
6395 tp->hw_stats = pci_alloc_consistent(tp->pdev, 6534 tp->hw_stats = pci_alloc_consistent(tp->pdev,
6396 sizeof(struct tg3_hw_stats), 6535 sizeof(struct tg3_hw_stats),
6397 &tp->stats_mapping); 6536 &tp->stats_mapping);
@@ -6413,6 +6552,9 @@ static int tg3_alloc_consistent(struct tg3 *tp)
6413 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE); 6552 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
6414 sblk = tnapi->hw_status; 6553 sblk = tnapi->hw_status;
6415 6554
6555 if (tg3_rx_prodring_init(tp, &tnapi->prodring))
6556 goto err_out;
6557
6416 /* If multivector TSS is enabled, vector 0 does not handle 6558 /* If multivector TSS is enabled, vector 0 does not handle
6417 * tx interrupts. Don't allocate any resources for it. 6559 * tx interrupts. Don't allocate any resources for it.
6418 */ 6560 */
@@ -6452,8 +6594,6 @@ static int tg3_alloc_consistent(struct tg3 *tp)
6452 break; 6594 break;
6453 } 6595 }
6454 6596
6455 tnapi->prodring = &tp->prodring[i];
6456
6457 /* 6597 /*
6458 * If multivector RSS is enabled, vector 0 does not handle 6598 * If multivector RSS is enabled, vector 0 does not handle
6459 * rx or tx interrupts. Don't allocate any resources for it. 6599 * rx or tx interrupts. Don't allocate any resources for it.
@@ -6596,6 +6736,10 @@ static void tg3_ape_send_event(struct tg3 *tp, u32 event)
6596 int i; 6736 int i;
6597 u32 apedata; 6737 u32 apedata;
6598 6738
6739 /* NCSI does not support APE events */
6740 if (tp->tg3_flags3 & TG3_FLG3_APE_HAS_NCSI)
6741 return;
6742
6599 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG); 6743 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
6600 if (apedata != APE_SEG_SIG_MAGIC) 6744 if (apedata != APE_SEG_SIG_MAGIC)
6601 return; 6745 return;
@@ -6647,6 +6791,8 @@ static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
6647 APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM)); 6791 APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
6648 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR, 6792 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
6649 APE_HOST_BEHAV_NO_PHYLOCK); 6793 APE_HOST_BEHAV_NO_PHYLOCK);
6794 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
6795 TG3_APE_HOST_DRVR_STATE_START);
6650 6796
6651 event = APE_EVENT_STATUS_STATE_START; 6797 event = APE_EVENT_STATUS_STATE_START;
6652 break; 6798 break;
@@ -6658,6 +6804,16 @@ static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
6658 */ 6804 */
6659 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0); 6805 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
6660 6806
6807 if (device_may_wakeup(&tp->pdev->dev) &&
6808 (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)) {
6809 tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
6810 TG3_APE_HOST_WOL_SPEED_AUTO);
6811 apedata = TG3_APE_HOST_DRVR_STATE_WOL;
6812 } else
6813 apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
6814
6815 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
6816
6661 event = APE_EVENT_STATUS_STATE_UNLOAD; 6817 event = APE_EVENT_STATUS_STATE_UNLOAD;
6662 break; 6818 break;
6663 case RESET_KIND_SUSPEND: 6819 case RESET_KIND_SUSPEND:
@@ -7515,6 +7671,9 @@ static void tg3_rings_reset(struct tg3 *tp)
7515 /* Disable all transmit rings but the first. */ 7671 /* Disable all transmit rings but the first. */
7516 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) 7672 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
7517 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16; 7673 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
7674 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
7675 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
7676 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
7518 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) 7677 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
7519 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2; 7678 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
7520 else 7679 else
@@ -7548,7 +7707,7 @@ static void tg3_rings_reset(struct tg3 *tp)
7548 7707
7549 /* Zero mailbox registers. */ 7708 /* Zero mailbox registers. */
7550 if (tp->tg3_flags & TG3_FLAG_SUPPORT_MSIX) { 7709 if (tp->tg3_flags & TG3_FLAG_SUPPORT_MSIX) {
7551 for (i = 1; i < TG3_IRQ_MAX_VECS; i++) { 7710 for (i = 1; i < tp->irq_max; i++) {
7552 tp->napi[i].tx_prod = 0; 7711 tp->napi[i].tx_prod = 0;
7553 tp->napi[i].tx_cons = 0; 7712 tp->napi[i].tx_cons = 0;
7554 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS) 7713 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)
@@ -7594,8 +7753,8 @@ static void tg3_rings_reset(struct tg3 *tp)
7594 7753
7595 if (tnapi->rx_rcb) { 7754 if (tnapi->rx_rcb) {
7596 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping, 7755 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
7597 (TG3_RX_RCB_RING_SIZE(tp) << 7756 (tp->rx_ret_ring_mask + 1) <<
7598 BDINFO_FLAGS_MAXLEN_SHIFT), 0); 7757 BDINFO_FLAGS_MAXLEN_SHIFT, 0);
7599 rxrcb += TG3_BDINFO_SIZE; 7758 rxrcb += TG3_BDINFO_SIZE;
7600 } 7759 }
7601 7760
@@ -7618,7 +7777,7 @@ static void tg3_rings_reset(struct tg3 *tp)
7618 } 7777 }
7619 7778
7620 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping, 7779 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
7621 (TG3_RX_RCB_RING_SIZE(tp) << 7780 ((tp->rx_ret_ring_mask + 1) <<
7622 BDINFO_FLAGS_MAXLEN_SHIFT), 0); 7781 BDINFO_FLAGS_MAXLEN_SHIFT), 0);
7623 7782
7624 stblk += 8; 7783 stblk += 8;
@@ -7631,7 +7790,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
7631{ 7790{
7632 u32 val, rdmac_mode; 7791 u32 val, rdmac_mode;
7633 int i, err, limit; 7792 int i, err, limit;
7634 struct tg3_rx_prodring_set *tpr = &tp->prodring[0]; 7793 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
7635 7794
7636 tg3_disable_ints(tp); 7795 tg3_disable_ints(tp);
7637 7796
@@ -7720,6 +7879,22 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
7720 tw32(TG3_CPMU_LSPD_10MB_CLK, val); 7879 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
7721 } 7880 }
7722 7881
7882 /* Enable MAC control of LPI */
7883 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) {
7884 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL,
7885 TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
7886 TG3_CPMU_EEE_LNKIDL_UART_IDL);
7887
7888 tw32_f(TG3_CPMU_EEE_CTRL,
7889 TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
7890
7891 tw32_f(TG3_CPMU_EEE_MODE,
7892 TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
7893 TG3_CPMU_EEEMD_LPI_IN_TX |
7894 TG3_CPMU_EEEMD_LPI_IN_RX |
7895 TG3_CPMU_EEEMD_EEE_ENABLE);
7896 }
7897
7723 /* This works around an issue with Athlon chipsets on 7898 /* This works around an issue with Athlon chipsets on
7724 * B3 tigon3 silicon. This bit has no effect on any 7899 * B3 tigon3 silicon. This bit has no effect on any
7725 * other revision. But do not set this on PCI Express 7900 * other revision. But do not set this on PCI Express
@@ -7845,7 +8020,10 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
7845 tw32(BUFMGR_DMA_HIGH_WATER, 8020 tw32(BUFMGR_DMA_HIGH_WATER,
7846 tp->bufmgr_config.dma_high_water); 8021 tp->bufmgr_config.dma_high_water);
7847 8022
7848 tw32(BUFMGR_MODE, BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE); 8023 val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
8024 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
8025 val |= BUFMGR_MODE_NO_TX_UNDERRUN;
8026 tw32(BUFMGR_MODE, val);
7849 for (i = 0; i < 2000; i++) { 8027 for (i = 0; i < 2000; i++) {
7850 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE) 8028 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
7851 break; 8029 break;
@@ -7928,10 +8106,14 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
7928 BDINFO_FLAGS_DISABLED); 8106 BDINFO_FLAGS_DISABLED);
7929 } 8107 }
7930 8108
7931 if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS) 8109 if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS) {
7932 val = (RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT) | 8110 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
7933 (TG3_RX_STD_DMA_SZ << 2); 8111 val = RX_STD_MAX_SIZE_5705;
7934 else 8112 else
8113 val = RX_STD_MAX_SIZE_5717;
8114 val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
8115 val |= (TG3_RX_STD_DMA_SZ << 2);
8116 } else
7935 val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT; 8117 val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
7936 } else 8118 } else
7937 val = RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT; 8119 val = RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT;
@@ -8015,6 +8197,23 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
8015 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) 8197 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
8016 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN; 8198 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
8017 8199
8200 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
8201 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
8202 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8203 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
8204 (tp->tg3_flags3 & TG3_FLG3_5717_PLUS)) {
8205 val = tr32(TG3_RDMA_RSRVCTRL_REG);
8206 tw32(TG3_RDMA_RSRVCTRL_REG,
8207 val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
8208 }
8209
8210 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) {
8211 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
8212 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val |
8213 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
8214 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
8215 }
8216
8018 /* Receive/send statistics. */ 8217 /* Receive/send statistics. */
8019 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) { 8218 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
8020 val = tr32(RCVLPC_STATS_ENABLE); 8219 val = tr32(RCVLPC_STATS_ENABLE);
@@ -8197,7 +8396,11 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
8197 8396
8198 tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE); 8397 tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
8199 tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB); 8398 tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
8200 tw32(RCVDBDI_MODE, RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ); 8399 val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
8400 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
8401 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
8402 val |= RCVDBDI_MODE_LRG_RING_SZ;
8403 tw32(RCVDBDI_MODE, val);
8201 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE); 8404 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
8202 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) 8405 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
8203 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8); 8406 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
@@ -8500,6 +8703,12 @@ static void tg3_timer(unsigned long __opaque)
8500 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) 8703 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
8501 tg3_periodic_fetch_stats(tp); 8704 tg3_periodic_fetch_stats(tp);
8502 8705
8706 if (tp->setlpicnt && !--tp->setlpicnt) {
8707 u32 val = tr32(TG3_CPMU_EEE_MODE);
8708 tw32(TG3_CPMU_EEE_MODE,
8709 val | TG3_CPMU_EEEMD_LPI_ENABLE);
8710 }
8711
8503 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) { 8712 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
8504 u32 mac_stat; 8713 u32 mac_stat;
8505 int phy_event; 8714 int phy_event;
@@ -8816,16 +9025,14 @@ static bool tg3_enable_msix(struct tg3 *tp)
8816 for (i = 0; i < tp->irq_max; i++) 9025 for (i = 0; i < tp->irq_max; i++)
8817 tp->napi[i].irq_vec = msix_ent[i].vector; 9026 tp->napi[i].irq_vec = msix_ent[i].vector;
8818 9027
8819 tp->dev->real_num_tx_queues = 1; 9028 netif_set_real_num_tx_queues(tp->dev, 1);
8820 if (tp->irq_cnt > 1) { 9029 rc = tp->irq_cnt > 1 ? tp->irq_cnt - 1 : 1;
8821 tp->tg3_flags3 |= TG3_FLG3_ENABLE_RSS; 9030 if (netif_set_real_num_rx_queues(tp->dev, rc)) {
8822 9031 pci_disable_msix(tp->pdev);
8823 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || 9032 return false;
8824 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) {
8825 tp->tg3_flags3 |= TG3_FLG3_ENABLE_TSS;
8826 tp->dev->real_num_tx_queues = tp->irq_cnt - 1;
8827 }
8828 } 9033 }
9034 if (tp->irq_cnt > 1)
9035 tp->tg3_flags3 |= TG3_FLG3_ENABLE_RSS;
8829 9036
8830 return true; 9037 return true;
8831} 9038}
@@ -8858,7 +9065,8 @@ defcfg:
8858 if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSIX)) { 9065 if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSIX)) {
8859 tp->irq_cnt = 1; 9066 tp->irq_cnt = 1;
8860 tp->napi[0].irq_vec = tp->pdev->irq; 9067 tp->napi[0].irq_vec = tp->pdev->irq;
8861 tp->dev->real_num_tx_queues = 1; 9068 netif_set_real_num_tx_queues(tp->dev, 1);
9069 netif_set_real_num_rx_queues(tp->dev, 1);
8862 } 9070 }
8863} 9071}
8864 9072
@@ -8917,6 +9125,8 @@ static int tg3_open(struct net_device *dev)
8917 if (err) 9125 if (err)
8918 goto err_out1; 9126 goto err_out1;
8919 9127
9128 tg3_napi_init(tp);
9129
8920 tg3_napi_enable(tp); 9130 tg3_napi_enable(tp);
8921 9131
8922 for (i = 0; i < tp->irq_cnt; i++) { 9132 for (i = 0; i < tp->irq_cnt; i++) {
@@ -9004,6 +9214,7 @@ err_out3:
9004 9214
9005err_out2: 9215err_out2:
9006 tg3_napi_disable(tp); 9216 tg3_napi_disable(tp);
9217 tg3_napi_fini(tp);
9007 tg3_free_consistent(tp); 9218 tg3_free_consistent(tp);
9008 9219
9009err_out1: 9220err_out1:
@@ -9051,6 +9262,8 @@ static int tg3_close(struct net_device *dev)
9051 memcpy(&tp->estats_prev, tg3_get_estats(tp), 9262 memcpy(&tp->estats_prev, tg3_get_estats(tp),
9052 sizeof(tp->estats_prev)); 9263 sizeof(tp->estats_prev));
9053 9264
9265 tg3_napi_fini(tp);
9266
9054 tg3_free_consistent(tp); 9267 tg3_free_consistent(tp);
9055 9268
9056 tg3_set_power_state(tp, PCI_D3hot); 9269 tg3_set_power_state(tp, PCI_D3hot);
@@ -9594,6 +9807,9 @@ static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
9594 if (netif_running(dev)) { 9807 if (netif_running(dev)) {
9595 cmd->speed = tp->link_config.active_speed; 9808 cmd->speed = tp->link_config.active_speed;
9596 cmd->duplex = tp->link_config.active_duplex; 9809 cmd->duplex = tp->link_config.active_duplex;
9810 } else {
9811 cmd->speed = SPEED_INVALID;
9812 cmd->duplex = DUPLEX_INVALID;
9597 } 9813 }
9598 cmd->phy_address = tp->phy_addr; 9814 cmd->phy_address = tp->phy_addr;
9599 cmd->transceiver = XCVR_INTERNAL; 9815 cmd->transceiver = XCVR_INTERNAL;
@@ -9820,10 +10036,10 @@ static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *
9820{ 10036{
9821 struct tg3 *tp = netdev_priv(dev); 10037 struct tg3 *tp = netdev_priv(dev);
9822 10038
9823 ering->rx_max_pending = TG3_RX_RING_SIZE - 1; 10039 ering->rx_max_pending = tp->rx_std_ring_mask;
9824 ering->rx_mini_max_pending = 0; 10040 ering->rx_mini_max_pending = 0;
9825 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) 10041 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
9826 ering->rx_jumbo_max_pending = TG3_RX_JUMBO_RING_SIZE - 1; 10042 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
9827 else 10043 else
9828 ering->rx_jumbo_max_pending = 0; 10044 ering->rx_jumbo_max_pending = 0;
9829 10045
@@ -9844,8 +10060,8 @@ static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *e
9844 struct tg3 *tp = netdev_priv(dev); 10060 struct tg3 *tp = netdev_priv(dev);
9845 int i, irq_sync = 0, err = 0; 10061 int i, irq_sync = 0, err = 0;
9846 10062
9847 if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) || 10063 if ((ering->rx_pending > tp->rx_std_ring_mask) ||
9848 (ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) || 10064 (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
9849 (ering->tx_pending > TG3_TX_RING_SIZE - 1) || 10065 (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
9850 (ering->tx_pending <= MAX_SKB_FRAGS) || 10066 (ering->tx_pending <= MAX_SKB_FRAGS) ||
9851 ((tp->tg3_flags2 & TG3_FLG2_TSO_BUG) && 10067 ((tp->tg3_flags2 & TG3_FLG2_TSO_BUG) &&
@@ -9867,7 +10083,7 @@ static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *e
9867 tp->rx_pending = 63; 10083 tp->rx_pending = 63;
9868 tp->rx_jumbo_pending = ering->rx_jumbo_pending; 10084 tp->rx_jumbo_pending = ering->rx_jumbo_pending;
9869 10085
9870 for (i = 0; i < TG3_IRQ_MAX_VECS; i++) 10086 for (i = 0; i < tp->irq_max; i++)
9871 tp->napi[i].tx_pending = ering->tx_pending; 10087 tp->napi[i].tx_pending = ering->tx_pending;
9872 10088
9873 if (netif_running(dev)) { 10089 if (netif_running(dev)) {
@@ -9915,8 +10131,7 @@ static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam
9915 10131
9916 if (!(phydev->supported & SUPPORTED_Pause) || 10132 if (!(phydev->supported & SUPPORTED_Pause) ||
9917 (!(phydev->supported & SUPPORTED_Asym_Pause) && 10133 (!(phydev->supported & SUPPORTED_Asym_Pause) &&
9918 ((epause->rx_pause && !epause->tx_pause) || 10134 (epause->rx_pause != epause->tx_pause)))
9919 (!epause->rx_pause && epause->tx_pause))))
9920 return -EINVAL; 10135 return -EINVAL;
9921 10136
9922 tp->link_config.flowctrl = 0; 10137 tp->link_config.flowctrl = 0;
@@ -10608,12 +10823,13 @@ static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
10608 int num_pkts, tx_len, rx_len, i, err; 10823 int num_pkts, tx_len, rx_len, i, err;
10609 struct tg3_rx_buffer_desc *desc; 10824 struct tg3_rx_buffer_desc *desc;
10610 struct tg3_napi *tnapi, *rnapi; 10825 struct tg3_napi *tnapi, *rnapi;
10611 struct tg3_rx_prodring_set *tpr = &tp->prodring[0]; 10826 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
10612 10827
10613 tnapi = &tp->napi[0]; 10828 tnapi = &tp->napi[0];
10614 rnapi = &tp->napi[0]; 10829 rnapi = &tp->napi[0];
10615 if (tp->irq_cnt > 1) { 10830 if (tp->irq_cnt > 1) {
10616 rnapi = &tp->napi[1]; 10831 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS)
10832 rnapi = &tp->napi[1];
10617 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS) 10833 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)
10618 tnapi = &tp->napi[1]; 10834 tnapi = &tp->napi[1];
10619 } 10835 }
@@ -12330,6 +12546,11 @@ static int __devinit tg3_phy_probe(struct tg3 *tp)
12330 } 12546 }
12331 } 12547 }
12332 12548
12549 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
12550 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 &&
12551 tp->pci_chip_rev_id != CHIPREV_ID_57765_A0))
12552 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
12553
12333 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) && 12554 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
12334 !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) && 12555 !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) &&
12335 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) { 12556 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
@@ -12401,14 +12622,18 @@ skip_phy_reset:
12401 12622
12402static void __devinit tg3_read_vpd(struct tg3 *tp) 12623static void __devinit tg3_read_vpd(struct tg3 *tp)
12403{ 12624{
12404 u8 vpd_data[TG3_NVM_VPD_LEN]; 12625 u8 *vpd_data;
12405 unsigned int block_end, rosize, len; 12626 unsigned int block_end, rosize, len;
12406 int j, i = 0; 12627 int j, i = 0;
12407 u32 magic; 12628 u32 magic;
12408 12629
12409 if ((tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) || 12630 if ((tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) ||
12410 tg3_nvram_read(tp, 0x0, &magic)) 12631 tg3_nvram_read(tp, 0x0, &magic))
12411 goto out_not_found; 12632 goto out_no_vpd;
12633
12634 vpd_data = kmalloc(TG3_NVM_VPD_LEN, GFP_KERNEL);
12635 if (!vpd_data)
12636 goto out_no_vpd;
12412 12637
12413 if (magic == TG3_EEPROM_MAGIC) { 12638 if (magic == TG3_EEPROM_MAGIC) {
12414 for (i = 0; i < TG3_NVM_VPD_LEN; i += 4) { 12639 for (i = 0; i < TG3_NVM_VPD_LEN; i += 4) {
@@ -12492,43 +12717,51 @@ partno:
12492 12717
12493 memcpy(tp->board_part_number, &vpd_data[i], len); 12718 memcpy(tp->board_part_number, &vpd_data[i], len);
12494 12719
12495 return;
12496
12497out_not_found: 12720out_not_found:
12498 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) 12721 kfree(vpd_data);
12722 if (tp->board_part_number[0])
12723 return;
12724
12725out_no_vpd:
12726 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
12727 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717)
12728 strcpy(tp->board_part_number, "BCM5717");
12729 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
12730 strcpy(tp->board_part_number, "BCM5718");
12731 else
12732 goto nomatch;
12733 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
12734 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
12735 strcpy(tp->board_part_number, "BCM57780");
12736 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
12737 strcpy(tp->board_part_number, "BCM57760");
12738 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
12739 strcpy(tp->board_part_number, "BCM57790");
12740 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
12741 strcpy(tp->board_part_number, "BCM57788");
12742 else
12743 goto nomatch;
12744 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
12745 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
12746 strcpy(tp->board_part_number, "BCM57761");
12747 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
12748 strcpy(tp->board_part_number, "BCM57765");
12749 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
12750 strcpy(tp->board_part_number, "BCM57781");
12751 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
12752 strcpy(tp->board_part_number, "BCM57785");
12753 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
12754 strcpy(tp->board_part_number, "BCM57791");
12755 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
12756 strcpy(tp->board_part_number, "BCM57795");
12757 else
12758 goto nomatch;
12759 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
12499 strcpy(tp->board_part_number, "BCM95906"); 12760 strcpy(tp->board_part_number, "BCM95906");
12500 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 && 12761 } else {
12501 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780) 12762nomatch:
12502 strcpy(tp->board_part_number, "BCM57780");
12503 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 &&
12504 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
12505 strcpy(tp->board_part_number, "BCM57760");
12506 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 &&
12507 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
12508 strcpy(tp->board_part_number, "BCM57790");
12509 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 &&
12510 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
12511 strcpy(tp->board_part_number, "BCM57788");
12512 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 &&
12513 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
12514 strcpy(tp->board_part_number, "BCM57761");
12515 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 &&
12516 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
12517 strcpy(tp->board_part_number, "BCM57765");
12518 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 &&
12519 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
12520 strcpy(tp->board_part_number, "BCM57781");
12521 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 &&
12522 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
12523 strcpy(tp->board_part_number, "BCM57785");
12524 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 &&
12525 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
12526 strcpy(tp->board_part_number, "BCM57791");
12527 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 &&
12528 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
12529 strcpy(tp->board_part_number, "BCM57795");
12530 else
12531 strcpy(tp->board_part_number, "none"); 12763 strcpy(tp->board_part_number, "none");
12764 }
12532} 12765}
12533 12766
12534static int __devinit tg3_fw_img_is_valid(struct tg3 *tp, u32 offset) 12767static int __devinit tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
@@ -12637,6 +12870,9 @@ static void __devinit tg3_read_sb_ver(struct tg3 *tp, u32 val)
12637 case TG3_EEPROM_SB_REVISION_5: 12870 case TG3_EEPROM_SB_REVISION_5:
12638 offset = TG3_EEPROM_SB_F1R5_EDH_OFF; 12871 offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
12639 break; 12872 break;
12873 case TG3_EEPROM_SB_REVISION_6:
12874 offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
12875 break;
12640 default: 12876 default:
12641 return; 12877 return;
12642 } 12878 }
@@ -12736,10 +12972,12 @@ static void __devinit tg3_read_dash_ver(struct tg3 *tp)
12736 12972
12737 apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION); 12973 apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
12738 12974
12739 if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI) 12975 if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI) {
12976 tp->tg3_flags3 |= TG3_FLG3_APE_HAS_NCSI;
12740 fwtype = "NCSI"; 12977 fwtype = "NCSI";
12741 else 12978 } else {
12742 fwtype = "DASH"; 12979 fwtype = "DASH";
12980 }
12743 12981
12744 vlen = strlen(tp->fw_ver); 12982 vlen = strlen(tp->fw_ver);
12745 12983
@@ -12795,6 +13033,18 @@ static void inline vlan_features_add(struct net_device *dev, unsigned long flags
12795#endif 13033#endif
12796} 13034}
12797 13035
13036static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
13037{
13038 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
13039 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
13040 return 4096;
13041 else if ((tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) &&
13042 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
13043 return 1024;
13044 else
13045 return 512;
13046}
13047
12798static int __devinit tg3_get_invariants(struct tg3 *tp) 13048static int __devinit tg3_get_invariants(struct tg3 *tp)
12799{ 13049{
12800 static struct pci_device_id write_reorder_chipsets[] = { 13050 static struct pci_device_id write_reorder_chipsets[] = {
@@ -12839,7 +13089,6 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
12839 13089
12840 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 || 13090 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
12841 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 || 13091 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
12842 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5724 ||
12843 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719) 13092 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719)
12844 pci_read_config_dword(tp->pdev, 13093 pci_read_config_dword(tp->pdev,
12845 TG3PCI_GEN2_PRODID_ASICREV, 13094 TG3PCI_GEN2_PRODID_ASICREV,
@@ -13410,10 +13659,6 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
13410 if (err) 13659 if (err)
13411 return err; 13660 return err;
13412 13661
13413 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 &&
13414 tp->pci_chip_rev_id != CHIPREV_ID_5717_A0)
13415 return -ENOTSUPP;
13416
13417 /* Initialize data/descriptor byte/word swapping. */ 13662 /* Initialize data/descriptor byte/word swapping. */
13418 val = tr32(GRC_MODE); 13663 val = tr32(GRC_MODE);
13419 val &= GRC_MODE_HOST_STACKUP; 13664 val &= GRC_MODE_HOST_STACKUP;
@@ -13553,7 +13798,11 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
13553#endif 13798#endif
13554 } 13799 }
13555 13800
13556 tp->rx_std_max_post = TG3_RX_RING_SIZE; 13801 tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
13802 tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
13803 tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
13804
13805 tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
13557 13806
13558 /* Increment the rx prod index on the rx std ring by at most 13807 /* Increment the rx prod index on the rx std ring by at most
13559 * 8 for these chips to workaround hw errata. 13808 * 8 for these chips to workaround hw errata.
@@ -14442,7 +14691,7 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
14442 } 14691 }
14443 14692
14444 if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) && 14693 if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) &&
14445 tp->pci_chip_rev_id != CHIPREV_ID_5717_A0 && 14694 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
14446 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5719) 14695 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5719)
14447 dev->netdev_ops = &tg3_netdev_ops; 14696 dev->netdev_ops = &tg3_netdev_ops;
14448 else 14697 else
@@ -14581,7 +14830,7 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
14581 intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW; 14830 intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
14582 rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW; 14831 rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
14583 sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW; 14832 sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
14584 for (i = 0; i < TG3_IRQ_MAX_VECS; i++) { 14833 for (i = 0; i < tp->irq_max; i++) {
14585 struct tg3_napi *tnapi = &tp->napi[i]; 14834 struct tg3_napi *tnapi = &tp->napi[i];
14586 14835
14587 tnapi->tp = tp; 14836 tnapi->tp = tp;
@@ -14596,13 +14845,10 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
14596 tnapi->consmbox = rcvmbx; 14845 tnapi->consmbox = rcvmbx;
14597 tnapi->prodmbox = sndmbx; 14846 tnapi->prodmbox = sndmbx;
14598 14847
14599 if (i) { 14848 if (i)
14600 tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1); 14849 tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
14601 netif_napi_add(dev, &tnapi->napi, tg3_poll_msix, 64); 14850 else
14602 } else {
14603 tnapi->coal_now = HOSTCC_MODE_NOW; 14851 tnapi->coal_now = HOSTCC_MODE_NOW;
14604 netif_napi_add(dev, &tnapi->napi, tg3_poll, 64);
14605 }
14606 14852
14607 if (!(tp->tg3_flags & TG3_FLAG_SUPPORT_MSIX)) 14853 if (!(tp->tg3_flags & TG3_FLAG_SUPPORT_MSIX))
14608 break; 14854 break;
diff --git a/drivers/net/tg3.h b/drivers/net/tg3.h
index 4937bd190964..8342190df0ff 100644
--- a/drivers/net/tg3.h
+++ b/drivers/net/tg3.h
@@ -26,6 +26,7 @@
26#define TG3_RX_INTERNAL_RING_SZ_5906 32 26#define TG3_RX_INTERNAL_RING_SZ_5906 32
27 27
28#define RX_STD_MAX_SIZE_5705 512 28#define RX_STD_MAX_SIZE_5705 512
29#define RX_STD_MAX_SIZE_5717 2048
29#define RX_JUMBO_MAX_SIZE 0xdeadbeef /* XXX */ 30#define RX_JUMBO_MAX_SIZE 0xdeadbeef /* XXX */
30 31
31/* First 256 bytes are a mirror of PCI config space. */ 32/* First 256 bytes are a mirror of PCI config space. */
@@ -46,7 +47,6 @@
46#define TG3PCI_DEVICE_TIGON3_5785_F 0x16a0 /* 10/100 only */ 47#define TG3PCI_DEVICE_TIGON3_5785_F 0x16a0 /* 10/100 only */
47#define TG3PCI_DEVICE_TIGON3_5717 0x1655 48#define TG3PCI_DEVICE_TIGON3_5717 0x1655
48#define TG3PCI_DEVICE_TIGON3_5718 0x1656 49#define TG3PCI_DEVICE_TIGON3_5718 0x1656
49#define TG3PCI_DEVICE_TIGON3_5724 0x165c
50#define TG3PCI_DEVICE_TIGON3_57781 0x16b1 50#define TG3PCI_DEVICE_TIGON3_57781 0x16b1
51#define TG3PCI_DEVICE_TIGON3_57785 0x16b5 51#define TG3PCI_DEVICE_TIGON3_57785 0x16b5
52#define TG3PCI_DEVICE_TIGON3_57761 0x16b0 52#define TG3PCI_DEVICE_TIGON3_57761 0x16b0
@@ -973,6 +973,7 @@
973#define RCVDBDI_MODE_JUMBOBD_NEEDED 0x00000004 973#define RCVDBDI_MODE_JUMBOBD_NEEDED 0x00000004
974#define RCVDBDI_MODE_FRM_TOO_BIG 0x00000008 974#define RCVDBDI_MODE_FRM_TOO_BIG 0x00000008
975#define RCVDBDI_MODE_INV_RING_SZ 0x00000010 975#define RCVDBDI_MODE_INV_RING_SZ 0x00000010
976#define RCVDBDI_MODE_LRG_RING_SZ 0x00010000
976#define RCVDBDI_STATUS 0x00002404 977#define RCVDBDI_STATUS 0x00002404
977#define RCVDBDI_STATUS_JUMBOBD_NEEDED 0x00000004 978#define RCVDBDI_STATUS_JUMBOBD_NEEDED 0x00000004
978#define RCVDBDI_STATUS_FRM_TOO_BIG 0x00000008 979#define RCVDBDI_STATUS_FRM_TOO_BIG 0x00000008
@@ -1090,7 +1091,26 @@
1090#define CPMU_MUTEX_GNT_DRIVER 0x00001000 1091#define CPMU_MUTEX_GNT_DRIVER 0x00001000
1091#define TG3_CPMU_PHY_STRAP 0x00003664 1092#define TG3_CPMU_PHY_STRAP 0x00003664
1092#define TG3_CPMU_PHY_STRAP_IS_SERDES 0x00000020 1093#define TG3_CPMU_PHY_STRAP_IS_SERDES 0x00000020
1093/* 0x3664 --> 0x3800 unused */ 1094/* 0x3664 --> 0x36b0 unused */
1095
1096#define TG3_CPMU_EEE_MODE 0x000036b0
1097#define TG3_CPMU_EEEMD_ERLY_L1_XIT_DET 0x00000008
1098#define TG3_CPMU_EEEMD_LPI_ENABLE 0x00000080
1099#define TG3_CPMU_EEEMD_LPI_IN_TX 0x00000100
1100#define TG3_CPMU_EEEMD_LPI_IN_RX 0x00000200
1101#define TG3_CPMU_EEEMD_EEE_ENABLE 0x00100000
1102/* 0x36b4 --> 0x36b8 unused */
1103
1104#define TG3_CPMU_EEE_LNKIDL_CTRL 0x000036bc
1105#define TG3_CPMU_EEE_LNKIDL_PCIE_NL0 0x01000000
1106#define TG3_CPMU_EEE_LNKIDL_UART_IDL 0x00000004
1107/* 0x36c0 --> 0x36d0 unused */
1108
1109#define TG3_CPMU_EEE_CTRL 0x000036d0
1110#define TG3_CPMU_EEE_CTRL_EXIT_16_5_US 0x0000019d
1111#define TG3_CPMU_EEE_CTRL_EXIT_36_US 0x00000384
1112#define TG3_CPMU_EEE_CTRL_EXIT_20_1_US 0x000001f8
1113/* 0x36d4 --> 0x3800 unused */
1094 1114
1095/* Mbuf cluster free registers */ 1115/* Mbuf cluster free registers */
1096#define MBFREE_MODE 0x00003800 1116#define MBFREE_MODE 0x00003800
@@ -1225,6 +1245,7 @@
1225#define BUFMGR_MODE_ATTN_ENABLE 0x00000004 1245#define BUFMGR_MODE_ATTN_ENABLE 0x00000004
1226#define BUFMGR_MODE_BM_TEST 0x00000008 1246#define BUFMGR_MODE_BM_TEST 0x00000008
1227#define BUFMGR_MODE_MBLOW_ATTN_ENAB 0x00000010 1247#define BUFMGR_MODE_MBLOW_ATTN_ENAB 0x00000010
1248#define BUFMGR_MODE_NO_TX_UNDERRUN 0x80000000
1228#define BUFMGR_STATUS 0x00004404 1249#define BUFMGR_STATUS 0x00004404
1229#define BUFMGR_STATUS_ERROR 0x00000004 1250#define BUFMGR_STATUS_ERROR 0x00000004
1230#define BUFMGR_STATUS_MBLOW 0x00000010 1251#define BUFMGR_STATUS_MBLOW 0x00000010
@@ -1302,7 +1323,16 @@
1302#define RDMAC_STATUS_FIFOURUN 0x00000080 1323#define RDMAC_STATUS_FIFOURUN 0x00000080
1303#define RDMAC_STATUS_FIFOOREAD 0x00000100 1324#define RDMAC_STATUS_FIFOOREAD 0x00000100
1304#define RDMAC_STATUS_LNGREAD 0x00000200 1325#define RDMAC_STATUS_LNGREAD 0x00000200
1305/* 0x4808 --> 0x4c00 unused */ 1326/* 0x4808 --> 0x4900 unused */
1327
1328#define TG3_RDMA_RSRVCTRL_REG 0x00004900
1329#define TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX 0x00000004
1330/* 0x4904 --> 0x4910 unused */
1331
1332#define TG3_LSO_RD_DMA_CRPTEN_CTRL 0x00004910
1333#define TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K 0x00030000
1334#define TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K 0x000c0000
1335/* 0x4914 --> 0x4c00 unused */
1306 1336
1307/* Write DMA control registers */ 1337/* Write DMA control registers */
1308#define WDMAC_MODE 0x00004c00 1338#define WDMAC_MODE 0x00004c00
@@ -1904,6 +1934,7 @@
1904#define TG3_EEPROM_SB_REVISION_3 0x00030000 1934#define TG3_EEPROM_SB_REVISION_3 0x00030000
1905#define TG3_EEPROM_SB_REVISION_4 0x00040000 1935#define TG3_EEPROM_SB_REVISION_4 0x00040000
1906#define TG3_EEPROM_SB_REVISION_5 0x00050000 1936#define TG3_EEPROM_SB_REVISION_5 0x00050000
1937#define TG3_EEPROM_SB_REVISION_6 0x00060000
1907#define TG3_EEPROM_MAGIC_HW 0xabcd 1938#define TG3_EEPROM_MAGIC_HW 0xabcd
1908#define TG3_EEPROM_MAGIC_HW_MSK 0xffff 1939#define TG3_EEPROM_MAGIC_HW_MSK 0xffff
1909 1940
@@ -1923,6 +1954,7 @@
1923#define TG3_EEPROM_SB_F1R3_EDH_OFF 0x18 1954#define TG3_EEPROM_SB_F1R3_EDH_OFF 0x18
1924#define TG3_EEPROM_SB_F1R4_EDH_OFF 0x1c 1955#define TG3_EEPROM_SB_F1R4_EDH_OFF 0x1c
1925#define TG3_EEPROM_SB_F1R5_EDH_OFF 0x20 1956#define TG3_EEPROM_SB_F1R5_EDH_OFF 0x20
1957#define TG3_EEPROM_SB_F1R6_EDH_OFF 0x4c
1926#define TG3_EEPROM_SB_EDH_MAJ_MASK 0x00000700 1958#define TG3_EEPROM_SB_EDH_MAJ_MASK 0x00000700
1927#define TG3_EEPROM_SB_EDH_MAJ_SHFT 8 1959#define TG3_EEPROM_SB_EDH_MAJ_SHFT 8
1928#define TG3_EEPROM_SB_EDH_MIN_MASK 0x000000ff 1960#define TG3_EEPROM_SB_EDH_MIN_MASK 0x000000ff
@@ -2048,6 +2080,10 @@
2048#define MII_TG3_CTRL_AS_MASTER 0x0800 2080#define MII_TG3_CTRL_AS_MASTER 0x0800
2049#define MII_TG3_CTRL_ENABLE_AS_MASTER 0x1000 2081#define MII_TG3_CTRL_ENABLE_AS_MASTER 0x1000
2050 2082
2083#define MII_TG3_MMD_CTRL 0x0d /* MMD Access Control register */
2084#define MII_TG3_MMD_CTRL_DATA_NOINC 0x4000
2085#define MII_TG3_MMD_ADDRESS 0x0e /* MMD Address Data register */
2086
2051#define MII_TG3_EXT_CTRL 0x10 /* Extended control register */ 2087#define MII_TG3_EXT_CTRL 0x10 /* Extended control register */
2052#define MII_TG3_EXT_CTRL_FIFO_ELASTIC 0x0001 2088#define MII_TG3_EXT_CTRL_FIFO_ELASTIC 0x0001
2053#define MII_TG3_EXT_CTRL_LNK3_LED_MODE 0x0002 2089#define MII_TG3_EXT_CTRL_LNK3_LED_MODE 0x0002
@@ -2065,6 +2101,8 @@
2065#define MII_TG3_DSP_TAP1 0x0001 2101#define MII_TG3_DSP_TAP1 0x0001
2066#define MII_TG3_DSP_TAP1_AGCTGT_DFLT 0x0007 2102#define MII_TG3_DSP_TAP1_AGCTGT_DFLT 0x0007
2067#define MII_TG3_DSP_AADJ1CH0 0x001f 2103#define MII_TG3_DSP_AADJ1CH0 0x001f
2104#define MII_TG3_DSP_CH34TP2 0x4022
2105#define MII_TG3_DSP_CH34TP2_HIBW01 0x0010
2068#define MII_TG3_DSP_AADJ1CH3 0x601f 2106#define MII_TG3_DSP_AADJ1CH3 0x601f
2069#define MII_TG3_DSP_AADJ1CH3_ADCCKADJ 0x0002 2107#define MII_TG3_DSP_AADJ1CH3_ADCCKADJ 0x0002
2070#define MII_TG3_DSP_EXP1_INT_STAT 0x0f01 2108#define MII_TG3_DSP_EXP1_INT_STAT 0x0f01
@@ -2131,6 +2169,14 @@
2131#define MII_TG3_TEST1_TRIM_EN 0x0010 2169#define MII_TG3_TEST1_TRIM_EN 0x0010
2132#define MII_TG3_TEST1_CRC_EN 0x8000 2170#define MII_TG3_TEST1_CRC_EN 0x8000
2133 2171
2172/* Clause 45 expansion registers */
2173#define TG3_CL45_D7_EEEADV_CAP 0x003c
2174#define TG3_CL45_D7_EEEADV_CAP_100TX 0x0002
2175#define TG3_CL45_D7_EEEADV_CAP_1000T 0x0004
2176#define TG3_CL45_D7_EEERES_STAT 0x803e
2177#define TG3_CL45_D7_EEERES_STAT_LP_100TX 0x0002
2178#define TG3_CL45_D7_EEERES_STAT_LP_1000T 0x0004
2179
2134 2180
2135/* Fast Ethernet Tranceiver definitions */ 2181/* Fast Ethernet Tranceiver definitions */
2136#define MII_TG3_FET_PTEST 0x17 2182#define MII_TG3_FET_PTEST 0x17
@@ -2176,7 +2222,7 @@
2176#define TG3_APE_HOST_SEG_SIG 0x4200 2222#define TG3_APE_HOST_SEG_SIG 0x4200
2177#define APE_HOST_SEG_SIG_MAGIC 0x484f5354 2223#define APE_HOST_SEG_SIG_MAGIC 0x484f5354
2178#define TG3_APE_HOST_SEG_LEN 0x4204 2224#define TG3_APE_HOST_SEG_LEN 0x4204
2179#define APE_HOST_SEG_LEN_MAGIC 0x0000001c 2225#define APE_HOST_SEG_LEN_MAGIC 0x00000020
2180#define TG3_APE_HOST_INIT_COUNT 0x4208 2226#define TG3_APE_HOST_INIT_COUNT 0x4208
2181#define TG3_APE_HOST_DRIVER_ID 0x420c 2227#define TG3_APE_HOST_DRIVER_ID 0x420c
2182#define APE_HOST_DRIVER_ID_LINUX 0xf0000000 2228#define APE_HOST_DRIVER_ID_LINUX 0xf0000000
@@ -2188,6 +2234,12 @@
2188#define APE_HOST_HEARTBEAT_INT_DISABLE 0 2234#define APE_HOST_HEARTBEAT_INT_DISABLE 0
2189#define APE_HOST_HEARTBEAT_INT_5SEC 5000 2235#define APE_HOST_HEARTBEAT_INT_5SEC 5000
2190#define TG3_APE_HOST_HEARTBEAT_COUNT 0x4218 2236#define TG3_APE_HOST_HEARTBEAT_COUNT 0x4218
2237#define TG3_APE_HOST_DRVR_STATE 0x421c
2238#define TG3_APE_HOST_DRVR_STATE_START 0x00000001
2239#define TG3_APE_HOST_DRVR_STATE_UNLOAD 0x00000002
2240#define TG3_APE_HOST_DRVR_STATE_WOL 0x00000003
2241#define TG3_APE_HOST_WOL_SPEED 0x4224
2242#define TG3_APE_HOST_WOL_SPEED_AUTO 0x00008000
2191 2243
2192#define TG3_APE_EVENT_STATUS 0x4300 2244#define TG3_APE_EVENT_STATUS 0x4300
2193 2245
@@ -2649,7 +2701,8 @@ struct tg3_rx_prodring_set {
2649 dma_addr_t rx_jmb_mapping; 2701 dma_addr_t rx_jmb_mapping;
2650}; 2702};
2651 2703
2652#define TG3_IRQ_MAX_VECS 5 2704#define TG3_IRQ_MAX_VECS_RSS 5
2705#define TG3_IRQ_MAX_VECS TG3_IRQ_MAX_VECS_RSS
2653 2706
2654struct tg3_napi { 2707struct tg3_napi {
2655 struct napi_struct napi ____cacheline_aligned; 2708 struct napi_struct napi ____cacheline_aligned;
@@ -2668,7 +2721,7 @@ struct tg3_napi {
2668 u32 consmbox; 2721 u32 consmbox;
2669 u32 rx_rcb_ptr; 2722 u32 rx_rcb_ptr;
2670 u16 *rx_rcb_prod_idx; 2723 u16 *rx_rcb_prod_idx;
2671 struct tg3_rx_prodring_set *prodring; 2724 struct tg3_rx_prodring_set prodring;
2672 2725
2673 struct tg3_rx_buffer_desc *rx_rcb; 2726 struct tg3_rx_buffer_desc *rx_rcb;
2674 struct tg3_tx_buffer_desc *tx_ring; 2727 struct tg3_tx_buffer_desc *tx_ring;
@@ -2746,6 +2799,9 @@ struct tg3 {
2746 void (*write32_rx_mbox) (struct tg3 *, u32, 2799 void (*write32_rx_mbox) (struct tg3 *, u32,
2747 u32); 2800 u32);
2748 u32 rx_copy_thresh; 2801 u32 rx_copy_thresh;
2802 u32 rx_std_ring_mask;
2803 u32 rx_jmb_ring_mask;
2804 u32 rx_ret_ring_mask;
2749 u32 rx_pending; 2805 u32 rx_pending;
2750 u32 rx_jumbo_pending; 2806 u32 rx_jumbo_pending;
2751 u32 rx_std_max_post; 2807 u32 rx_std_max_post;
@@ -2755,8 +2811,6 @@ struct tg3 {
2755 struct vlan_group *vlgrp; 2811 struct vlan_group *vlgrp;
2756#endif 2812#endif
2757 2813
2758 struct tg3_rx_prodring_set prodring[TG3_IRQ_MAX_VECS];
2759
2760 2814
2761 /* begin "everything else" cacheline(s) section */ 2815 /* begin "everything else" cacheline(s) section */
2762 struct rtnl_link_stats64 net_stats; 2816 struct rtnl_link_stats64 net_stats;
@@ -2850,6 +2904,7 @@ struct tg3 {
2850#define TG3_FLG3_USE_JUMBO_BDFLAG 0x00400000 2904#define TG3_FLG3_USE_JUMBO_BDFLAG 0x00400000
2851#define TG3_FLG3_L1PLLPD_EN 0x00800000 2905#define TG3_FLG3_L1PLLPD_EN 0x00800000
2852#define TG3_FLG3_5717_PLUS 0x01000000 2906#define TG3_FLG3_5717_PLUS 0x01000000
2907#define TG3_FLG3_APE_HAS_NCSI 0x02000000
2853 2908
2854 struct timer_list timer; 2909 struct timer_list timer;
2855 u16 timer_counter; 2910 u16 timer_counter;
@@ -2966,9 +3021,11 @@ struct tg3 {
2966#define TG3_PHYFLG_BER_BUG 0x00008000 3021#define TG3_PHYFLG_BER_BUG 0x00008000
2967#define TG3_PHYFLG_SERDES_PREEMPHASIS 0x00010000 3022#define TG3_PHYFLG_SERDES_PREEMPHASIS 0x00010000
2968#define TG3_PHYFLG_PARALLEL_DETECT 0x00020000 3023#define TG3_PHYFLG_PARALLEL_DETECT 0x00020000
3024#define TG3_PHYFLG_EEE_CAP 0x00040000
2969 3025
2970 u32 led_ctrl; 3026 u32 led_ctrl;
2971 u32 phy_otp; 3027 u32 phy_otp;
3028 u32 setlpicnt;
2972 3029
2973#define TG3_BPN_SIZE 24 3030#define TG3_BPN_SIZE 24
2974 char board_part_number[TG3_BPN_SIZE]; 3031 char board_part_number[TG3_BPN_SIZE];
diff --git a/drivers/net/tlan.c b/drivers/net/tlan.c
index 0564ca05963d..ec8c804a795d 100644
--- a/drivers/net/tlan.c
+++ b/drivers/net/tlan.c
@@ -3187,7 +3187,7 @@ static int TLan_EeSendByte( u16 io_base, u8 data, int stop )
3187 TLan_SetBit( TLAN_NET_SIO_EDATA, sio ); 3187 TLan_SetBit( TLAN_NET_SIO_EDATA, sio );
3188 } 3188 }
3189 3189
3190 return ( err ); 3190 return err;
3191 3191
3192} /* TLan_EeSendByte */ 3192} /* TLan_EeSendByte */
3193 3193
diff --git a/drivers/net/tlan.h b/drivers/net/tlan.h
index d13ff12d7500..3315ced774e2 100644
--- a/drivers/net/tlan.h
+++ b/drivers/net/tlan.h
@@ -442,7 +442,7 @@ typedef struct tlan_private_tag {
442static inline u8 TLan_DioRead8(u16 base_addr, u16 internal_addr) 442static inline u8 TLan_DioRead8(u16 base_addr, u16 internal_addr)
443{ 443{
444 outw(internal_addr, base_addr + TLAN_DIO_ADR); 444 outw(internal_addr, base_addr + TLAN_DIO_ADR);
445 return (inb((base_addr + TLAN_DIO_DATA) + (internal_addr & 0x3))); 445 return inb((base_addr + TLAN_DIO_DATA) + (internal_addr & 0x3));
446 446
447} /* TLan_DioRead8 */ 447} /* TLan_DioRead8 */
448 448
@@ -452,7 +452,7 @@ static inline u8 TLan_DioRead8(u16 base_addr, u16 internal_addr)
452static inline u16 TLan_DioRead16(u16 base_addr, u16 internal_addr) 452static inline u16 TLan_DioRead16(u16 base_addr, u16 internal_addr)
453{ 453{
454 outw(internal_addr, base_addr + TLAN_DIO_ADR); 454 outw(internal_addr, base_addr + TLAN_DIO_ADR);
455 return (inw((base_addr + TLAN_DIO_DATA) + (internal_addr & 0x2))); 455 return inw((base_addr + TLAN_DIO_DATA) + (internal_addr & 0x2));
456 456
457} /* TLan_DioRead16 */ 457} /* TLan_DioRead16 */
458 458
@@ -462,7 +462,7 @@ static inline u16 TLan_DioRead16(u16 base_addr, u16 internal_addr)
462static inline u32 TLan_DioRead32(u16 base_addr, u16 internal_addr) 462static inline u32 TLan_DioRead32(u16 base_addr, u16 internal_addr)
463{ 463{
464 outw(internal_addr, base_addr + TLAN_DIO_ADR); 464 outw(internal_addr, base_addr + TLAN_DIO_ADR);
465 return (inl(base_addr + TLAN_DIO_DATA)); 465 return inl(base_addr + TLAN_DIO_DATA);
466 466
467} /* TLan_DioRead32 */ 467} /* TLan_DioRead32 */
468 468
@@ -537,6 +537,6 @@ static inline u32 TLan_HashFunc( const u8 *a )
537 hash ^= ((a[2]^a[5])<<4); /* & 060 */ 537 hash ^= ((a[2]^a[5])<<4); /* & 060 */
538 hash ^= ((a[2]^a[5])>>2); /* & 077 */ 538 hash ^= ((a[2]^a[5])>>2); /* & 077 */
539 539
540 return (hash & 077); 540 return hash & 077;
541} 541}
542#endif 542#endif
diff --git a/drivers/net/tokenring/proteon.c b/drivers/net/tokenring/proteon.c
index 16e8783ee9cd..8d362e64a40e 100644
--- a/drivers/net/tokenring/proteon.c
+++ b/drivers/net/tokenring/proteon.c
@@ -110,7 +110,7 @@ static int __init proteon_probe1(struct net_device *dev, int ioaddr)
110 } 110 }
111 111
112 dev->base_addr = ioaddr; 112 dev->base_addr = ioaddr;
113 return (0); 113 return 0;
114nodev: 114nodev:
115 release_region(ioaddr, PROTEON_IO_EXTENT); 115 release_region(ioaddr, PROTEON_IO_EXTENT);
116 return -ENODEV; 116 return -ENODEV;
diff --git a/drivers/net/tokenring/smctr.c b/drivers/net/tokenring/smctr.c
index 0929fff5982c..63db5a6762ae 100644
--- a/drivers/net/tokenring/smctr.c
+++ b/drivers/net/tokenring/smctr.c
@@ -435,7 +435,7 @@ static int smctr_alloc_shared_memory(struct net_device *dev)
435 RX_DATA_BUFFER_SIZE * tp->num_rx_bdbs[NON_MAC_QUEUE]); 435 RX_DATA_BUFFER_SIZE * tp->num_rx_bdbs[NON_MAC_QUEUE]);
436 tp->rx_buff_end[NON_MAC_QUEUE] = (__u16 *)smctr_malloc(dev, 0); 436 tp->rx_buff_end[NON_MAC_QUEUE] = (__u16 *)smctr_malloc(dev, 0);
437 437
438 return (0); 438 return 0;
439} 439}
440 440
441/* Enter Bypass state. */ 441/* Enter Bypass state. */
@@ -448,7 +448,7 @@ static int smctr_bypass_state(struct net_device *dev)
448 448
449 err = smctr_setup_single_cmd(dev, ACB_CMD_CHANGE_JOIN_STATE, JS_BYPASS_STATE); 449 err = smctr_setup_single_cmd(dev, ACB_CMD_CHANGE_JOIN_STATE, JS_BYPASS_STATE);
450 450
451 return (err); 451 return err;
452} 452}
453 453
454static int smctr_checksum_firmware(struct net_device *dev) 454static int smctr_checksum_firmware(struct net_device *dev)
@@ -471,9 +471,9 @@ static int smctr_checksum_firmware(struct net_device *dev)
471 smctr_disable_adapter_ctrl_store(dev); 471 smctr_disable_adapter_ctrl_store(dev);
472 472
473 if(checksum) 473 if(checksum)
474 return (checksum); 474 return checksum;
475 475
476 return (0); 476 return 0;
477} 477}
478 478
479static int __init smctr_chk_mca(struct net_device *dev) 479static int __init smctr_chk_mca(struct net_device *dev)
@@ -485,7 +485,7 @@ static int __init smctr_chk_mca(struct net_device *dev)
485 485
486 current_slot = mca_find_unused_adapter(smctr_posid, 0); 486 current_slot = mca_find_unused_adapter(smctr_posid, 0);
487 if(current_slot == MCA_NOTFOUND) 487 if(current_slot == MCA_NOTFOUND)
488 return (-ENODEV); 488 return -ENODEV;
489 489
490 mca_set_adapter_name(current_slot, smctr_name); 490 mca_set_adapter_name(current_slot, smctr_name);
491 mca_mark_as_used(current_slot); 491 mca_mark_as_used(current_slot);
@@ -622,9 +622,9 @@ static int __init smctr_chk_mca(struct net_device *dev)
622 break; 622 break;
623 } 623 }
624 624
625 return (0); 625 return 0;
626#else 626#else
627 return (-1); 627 return -1;
628#endif /* CONFIG_MCA_LEGACY */ 628#endif /* CONFIG_MCA_LEGACY */
629} 629}
630 630
@@ -677,18 +677,18 @@ static int smctr_chg_rx_mask(struct net_device *dev)
677 if((err = smctr_issue_write_word_cmd(dev, RW_CONFIG_REGISTER_0, 677 if((err = smctr_issue_write_word_cmd(dev, RW_CONFIG_REGISTER_0,
678 &tp->config_word0))) 678 &tp->config_word0)))
679 { 679 {
680 return (err); 680 return err;
681 } 681 }
682 682
683 if((err = smctr_issue_write_word_cmd(dev, RW_CONFIG_REGISTER_1, 683 if((err = smctr_issue_write_word_cmd(dev, RW_CONFIG_REGISTER_1,
684 &tp->config_word1))) 684 &tp->config_word1)))
685 { 685 {
686 return (err); 686 return err;
687 } 687 }
688 688
689 smctr_disable_16bit(dev); 689 smctr_disable_16bit(dev);
690 690
691 return (0); 691 return 0;
692} 692}
693 693
694static int smctr_clear_int(struct net_device *dev) 694static int smctr_clear_int(struct net_device *dev)
@@ -697,7 +697,7 @@ static int smctr_clear_int(struct net_device *dev)
697 697
698 outb((tp->trc_mask | CSR_CLRTINT), dev->base_addr + CSR); 698 outb((tp->trc_mask | CSR_CLRTINT), dev->base_addr + CSR);
699 699
700 return (0); 700 return 0;
701} 701}
702 702
703static int smctr_clear_trc_reset(int ioaddr) 703static int smctr_clear_trc_reset(int ioaddr)
@@ -707,7 +707,7 @@ static int smctr_clear_trc_reset(int ioaddr)
707 r = inb(ioaddr + MSR); 707 r = inb(ioaddr + MSR);
708 outb(~MSR_RST & r, ioaddr + MSR); 708 outb(~MSR_RST & r, ioaddr + MSR);
709 709
710 return (0); 710 return 0;
711} 711}
712 712
713/* 713/*
@@ -725,7 +725,7 @@ static int smctr_close(struct net_device *dev)
725 725
726 /* Check to see if adapter is already in a closed state. */ 726 /* Check to see if adapter is already in a closed state. */
727 if(tp->status != OPEN) 727 if(tp->status != OPEN)
728 return (0); 728 return 0;
729 729
730 smctr_enable_16bit(dev); 730 smctr_enable_16bit(dev);
731 smctr_set_page(dev, (__u8 *)tp->ram_access); 731 smctr_set_page(dev, (__u8 *)tp->ram_access);
@@ -733,7 +733,7 @@ static int smctr_close(struct net_device *dev)
733 if((err = smctr_issue_remove_cmd(dev))) 733 if((err = smctr_issue_remove_cmd(dev)))
734 { 734 {
735 smctr_disable_16bit(dev); 735 smctr_disable_16bit(dev);
736 return (err); 736 return err;
737 } 737 }
738 738
739 for(;;) 739 for(;;)
@@ -746,7 +746,7 @@ static int smctr_close(struct net_device *dev)
746 } 746 }
747 747
748 748
749 return (0); 749 return 0;
750} 750}
751 751
752static int smctr_decode_firmware(struct net_device *dev, 752static int smctr_decode_firmware(struct net_device *dev,
@@ -807,12 +807,12 @@ static int smctr_decode_firmware(struct net_device *dev,
807 if(buff) 807 if(buff)
808 *(mem++) = SWAP_BYTES(buff); 808 *(mem++) = SWAP_BYTES(buff);
809 809
810 return (0); 810 return 0;
811} 811}
812 812
813static int smctr_disable_16bit(struct net_device *dev) 813static int smctr_disable_16bit(struct net_device *dev)
814{ 814{
815 return (0); 815 return 0;
816} 816}
817 817
818/* 818/*
@@ -832,7 +832,7 @@ static int smctr_disable_adapter_ctrl_store(struct net_device *dev)
832 tp->trc_mask |= CSR_WCSS; 832 tp->trc_mask |= CSR_WCSS;
833 outb(tp->trc_mask, ioaddr + CSR); 833 outb(tp->trc_mask, ioaddr + CSR);
834 834
835 return (0); 835 return 0;
836} 836}
837 837
838static int smctr_disable_bic_int(struct net_device *dev) 838static int smctr_disable_bic_int(struct net_device *dev)
@@ -844,7 +844,7 @@ static int smctr_disable_bic_int(struct net_device *dev)
844 | CSR_MSKTINT | CSR_WCSS; 844 | CSR_MSKTINT | CSR_WCSS;
845 outb(tp->trc_mask, ioaddr + CSR); 845 outb(tp->trc_mask, ioaddr + CSR);
846 846
847 return (0); 847 return 0;
848} 848}
849 849
850static int smctr_enable_16bit(struct net_device *dev) 850static int smctr_enable_16bit(struct net_device *dev)
@@ -858,7 +858,7 @@ static int smctr_enable_16bit(struct net_device *dev)
858 outb((r | LAAR_MEM16ENB), dev->base_addr + LAAR); 858 outb((r | LAAR_MEM16ENB), dev->base_addr + LAAR);
859 } 859 }
860 860
861 return (0); 861 return 0;
862} 862}
863 863
864/* 864/*
@@ -881,7 +881,7 @@ static int smctr_enable_adapter_ctrl_store(struct net_device *dev)
881 tp->trc_mask &= ~CSR_WCSS; 881 tp->trc_mask &= ~CSR_WCSS;
882 outb(tp->trc_mask, ioaddr + CSR); 882 outb(tp->trc_mask, ioaddr + CSR);
883 883
884 return (0); 884 return 0;
885} 885}
886 886
887static int smctr_enable_adapter_ram(struct net_device *dev) 887static int smctr_enable_adapter_ram(struct net_device *dev)
@@ -895,7 +895,7 @@ static int smctr_enable_adapter_ram(struct net_device *dev)
895 r = inb(ioaddr + MSR); 895 r = inb(ioaddr + MSR);
896 outb(MSR_MEMB | r, ioaddr + MSR); 896 outb(MSR_MEMB | r, ioaddr + MSR);
897 897
898 return (0); 898 return 0;
899} 899}
900 900
901static int smctr_enable_bic_int(struct net_device *dev) 901static int smctr_enable_bic_int(struct net_device *dev)
@@ -921,7 +921,7 @@ static int smctr_enable_bic_int(struct net_device *dev)
921 break; 921 break;
922 } 922 }
923 923
924 return (0); 924 return 0;
925} 925}
926 926
927static int __init smctr_chk_isa(struct net_device *dev) 927static int __init smctr_chk_isa(struct net_device *dev)
@@ -1145,7 +1145,7 @@ static int __init smctr_chk_isa(struct net_device *dev)
1145 */ 1145 */
1146 } 1146 }
1147 1147
1148 return (0); 1148 return 0;
1149 1149
1150out2: 1150out2:
1151 release_region(ioaddr, SMCTR_IO_EXTENT); 1151 release_region(ioaddr, SMCTR_IO_EXTENT);
@@ -1199,7 +1199,7 @@ static int __init smctr_get_boardid(struct net_device *dev, int mca)
1199 * return; 1199 * return;
1200 */ 1200 */
1201 if(IdByte & 0xF8) 1201 if(IdByte & 0xF8)
1202 return (-1); 1202 return -1;
1203 1203
1204 r1 = inb(ioaddr + BID_REG_1); 1204 r1 = inb(ioaddr + BID_REG_1);
1205 r1 &= BID_ICR_MASK; 1205 r1 &= BID_ICR_MASK;
@@ -1250,21 +1250,21 @@ static int __init smctr_get_boardid(struct net_device *dev, int mca)
1250 while(r1 & BID_RECALL_DONE_MASK) 1250 while(r1 & BID_RECALL_DONE_MASK)
1251 r1 = inb(ioaddr + BID_REG_1); 1251 r1 = inb(ioaddr + BID_REG_1);
1252 1252
1253 return (BoardIdMask); 1253 return BoardIdMask;
1254} 1254}
1255 1255
1256static int smctr_get_group_address(struct net_device *dev) 1256static int smctr_get_group_address(struct net_device *dev)
1257{ 1257{
1258 smctr_issue_read_word_cmd(dev, RW_INDIVIDUAL_GROUP_ADDR); 1258 smctr_issue_read_word_cmd(dev, RW_INDIVIDUAL_GROUP_ADDR);
1259 1259
1260 return(smctr_wait_cmd(dev)); 1260 return smctr_wait_cmd(dev);
1261} 1261}
1262 1262
1263static int smctr_get_functional_address(struct net_device *dev) 1263static int smctr_get_functional_address(struct net_device *dev)
1264{ 1264{
1265 smctr_issue_read_word_cmd(dev, RW_FUNCTIONAL_ADDR); 1265 smctr_issue_read_word_cmd(dev, RW_FUNCTIONAL_ADDR);
1266 1266
1267 return(smctr_wait_cmd(dev)); 1267 return smctr_wait_cmd(dev);
1268} 1268}
1269 1269
1270/* Calculate number of Non-MAC receive BDB's and data buffers. 1270/* Calculate number of Non-MAC receive BDB's and data buffers.
@@ -1346,14 +1346,14 @@ static unsigned int smctr_get_num_rx_bdbs(struct net_device *dev)
1346 */ 1346 */
1347 mem_used += 0x100; 1347 mem_used += 0x100;
1348 1348
1349 return((0xffff - mem_used) / (RX_DATA_BUFFER_SIZE + sizeof(BDBlock))); 1349 return (0xffff - mem_used) / (RX_DATA_BUFFER_SIZE + sizeof(BDBlock));
1350} 1350}
1351 1351
1352static int smctr_get_physical_drop_number(struct net_device *dev) 1352static int smctr_get_physical_drop_number(struct net_device *dev)
1353{ 1353{
1354 smctr_issue_read_word_cmd(dev, RW_PHYSICAL_DROP_NUMBER); 1354 smctr_issue_read_word_cmd(dev, RW_PHYSICAL_DROP_NUMBER);
1355 1355
1356 return(smctr_wait_cmd(dev)); 1356 return smctr_wait_cmd(dev);
1357} 1357}
1358 1358
1359static __u8 * smctr_get_rx_pointer(struct net_device *dev, short queue) 1359static __u8 * smctr_get_rx_pointer(struct net_device *dev, short queue)
@@ -1366,14 +1366,14 @@ static __u8 * smctr_get_rx_pointer(struct net_device *dev, short queue)
1366 1366
1367 tp->rx_fcb_curr[queue]->bdb_ptr = bdb; 1367 tp->rx_fcb_curr[queue]->bdb_ptr = bdb;
1368 1368
1369 return ((__u8 *)bdb->data_block_ptr); 1369 return (__u8 *)bdb->data_block_ptr;
1370} 1370}
1371 1371
1372static int smctr_get_station_id(struct net_device *dev) 1372static int smctr_get_station_id(struct net_device *dev)
1373{ 1373{
1374 smctr_issue_read_word_cmd(dev, RW_INDIVIDUAL_MAC_ADDRESS); 1374 smctr_issue_read_word_cmd(dev, RW_INDIVIDUAL_MAC_ADDRESS);
1375 1375
1376 return(smctr_wait_cmd(dev)); 1376 return smctr_wait_cmd(dev);
1377} 1377}
1378 1378
1379/* 1379/*
@@ -1384,7 +1384,7 @@ static struct net_device_stats *smctr_get_stats(struct net_device *dev)
1384{ 1384{
1385 struct net_local *tp = netdev_priv(dev); 1385 struct net_local *tp = netdev_priv(dev);
1386 1386
1387 return ((struct net_device_stats *)&tp->MacStat); 1387 return (struct net_device_stats *)&tp->MacStat;
1388} 1388}
1389 1389
1390static FCBlock *smctr_get_tx_fcb(struct net_device *dev, __u16 queue, 1390static FCBlock *smctr_get_tx_fcb(struct net_device *dev, __u16 queue,
@@ -1401,14 +1401,14 @@ static FCBlock *smctr_get_tx_fcb(struct net_device *dev, __u16 queue,
1401 1401
1402 /* check if there is enough FCB blocks */ 1402 /* check if there is enough FCB blocks */
1403 if(tp->num_tx_fcbs_used[queue] >= tp->num_tx_fcbs[queue]) 1403 if(tp->num_tx_fcbs_used[queue] >= tp->num_tx_fcbs[queue])
1404 return ((FCBlock *)(-1L)); 1404 return (FCBlock *)(-1L);
1405 1405
1406 /* round off the input pkt size to the nearest even number */ 1406 /* round off the input pkt size to the nearest even number */
1407 alloc_size = (bytes_count + 1) & 0xfffe; 1407 alloc_size = (bytes_count + 1) & 0xfffe;
1408 1408
1409 /* check if enough mem */ 1409 /* check if enough mem */
1410 if((tp->tx_buff_used[queue] + alloc_size) > tp->tx_buff_size[queue]) 1410 if((tp->tx_buff_used[queue] + alloc_size) > tp->tx_buff_size[queue])
1411 return ((FCBlock *)(-1L)); 1411 return (FCBlock *)(-1L);
1412 1412
1413 /* check if past the end ; 1413 /* check if past the end ;
1414 * if exactly enough mem to end of ring, alloc from front. 1414 * if exactly enough mem to end of ring, alloc from front.
@@ -1425,7 +1425,7 @@ static FCBlock *smctr_get_tx_fcb(struct net_device *dev, __u16 queue,
1425 if((tp->tx_buff_used[queue] + alloc_size) 1425 if((tp->tx_buff_used[queue] + alloc_size)
1426 > tp->tx_buff_size[queue]) 1426 > tp->tx_buff_size[queue])
1427 { 1427 {
1428 return ((FCBlock *)(-1L)); 1428 return (FCBlock *)(-1L);
1429 } 1429 }
1430 1430
1431 /* ring wrap */ 1431 /* ring wrap */
@@ -1448,14 +1448,14 @@ static FCBlock *smctr_get_tx_fcb(struct net_device *dev, __u16 queue,
1448 pFCB = tp->tx_fcb_curr[queue]; 1448 pFCB = tp->tx_fcb_curr[queue];
1449 tp->tx_fcb_curr[queue] = tp->tx_fcb_curr[queue]->next_ptr; 1449 tp->tx_fcb_curr[queue] = tp->tx_fcb_curr[queue]->next_ptr;
1450 1450
1451 return (pFCB); 1451 return pFCB;
1452} 1452}
1453 1453
1454static int smctr_get_upstream_neighbor_addr(struct net_device *dev) 1454static int smctr_get_upstream_neighbor_addr(struct net_device *dev)
1455{ 1455{
1456 smctr_issue_read_word_cmd(dev, RW_UPSTREAM_NEIGHBOR_ADDRESS); 1456 smctr_issue_read_word_cmd(dev, RW_UPSTREAM_NEIGHBOR_ADDRESS);
1457 1457
1458 return(smctr_wait_cmd(dev)); 1458 return smctr_wait_cmd(dev);
1459} 1459}
1460 1460
1461static int smctr_hardware_send_packet(struct net_device *dev, 1461static int smctr_hardware_send_packet(struct net_device *dev,
@@ -1469,21 +1469,22 @@ static int smctr_hardware_send_packet(struct net_device *dev,
1469 printk(KERN_DEBUG"%s: smctr_hardware_send_packet\n", dev->name); 1469 printk(KERN_DEBUG"%s: smctr_hardware_send_packet\n", dev->name);
1470 1470
1471 if(tp->status != OPEN) 1471 if(tp->status != OPEN)
1472 return (-1); 1472 return -1;
1473 1473
1474 if(tp->monitor_state_ready != 1) 1474 if(tp->monitor_state_ready != 1)
1475 return (-1); 1475 return -1;
1476 1476
1477 for(;;) 1477 for(;;)
1478 { 1478 {
1479 /* Send first buffer from queue */ 1479 /* Send first buffer from queue */
1480 skb = skb_dequeue(&tp->SendSkbQueue); 1480 skb = skb_dequeue(&tp->SendSkbQueue);
1481 if(skb == NULL) 1481 if(skb == NULL)
1482 return (-1); 1482 return -1;
1483 1483
1484 tp->QueueSkb++; 1484 tp->QueueSkb++;
1485 1485
1486 if(skb->len < SMC_HEADER_SIZE || skb->len > tp->max_packet_size) return (-1); 1486 if(skb->len < SMC_HEADER_SIZE || skb->len > tp->max_packet_size)
1487 return -1;
1487 1488
1488 smctr_enable_16bit(dev); 1489 smctr_enable_16bit(dev);
1489 smctr_set_page(dev, (__u8 *)tp->ram_access); 1490 smctr_set_page(dev, (__u8 *)tp->ram_access);
@@ -1492,7 +1493,7 @@ static int smctr_hardware_send_packet(struct net_device *dev,
1492 == (FCBlock *)(-1L)) 1493 == (FCBlock *)(-1L))
1493 { 1494 {
1494 smctr_disable_16bit(dev); 1495 smctr_disable_16bit(dev);
1495 return (-1); 1496 return -1;
1496 } 1497 }
1497 1498
1498 smctr_tx_move_frame(dev, skb, 1499 smctr_tx_move_frame(dev, skb,
@@ -1508,7 +1509,7 @@ static int smctr_hardware_send_packet(struct net_device *dev,
1508 smctr_disable_16bit(dev); 1509 smctr_disable_16bit(dev);
1509 } 1510 }
1510 1511
1511 return (0); 1512 return 0;
1512} 1513}
1513 1514
1514static int smctr_init_acbs(struct net_device *dev) 1515static int smctr_init_acbs(struct net_device *dev)
@@ -1552,7 +1553,7 @@ static int smctr_init_acbs(struct net_device *dev)
1552 tp->acb_curr = tp->acb_head->next_ptr; 1553 tp->acb_curr = tp->acb_head->next_ptr;
1553 tp->num_acbs_used = 0; 1554 tp->num_acbs_used = 0;
1554 1555
1555 return (0); 1556 return 0;
1556} 1557}
1557 1558
1558static int smctr_init_adapter(struct net_device *dev) 1559static int smctr_init_adapter(struct net_device *dev)
@@ -1590,13 +1591,14 @@ static int smctr_init_adapter(struct net_device *dev)
1590 1591
1591 if(smctr_checksum_firmware(dev)) 1592 if(smctr_checksum_firmware(dev))
1592 { 1593 {
1593 printk(KERN_ERR "%s: Previously loaded firmware is missing\n",dev->name); return (-ENOENT); 1594 printk(KERN_ERR "%s: Previously loaded firmware is missing\n",dev->name);
1595 return -ENOENT;
1594 } 1596 }
1595 1597
1596 if((err = smctr_ram_memory_test(dev))) 1598 if((err = smctr_ram_memory_test(dev)))
1597 { 1599 {
1598 printk(KERN_ERR "%s: RAM memory test failed.\n", dev->name); 1600 printk(KERN_ERR "%s: RAM memory test failed.\n", dev->name);
1599 return (-EIO); 1601 return -EIO;
1600 } 1602 }
1601 1603
1602 smctr_set_rx_look_ahead(dev); 1604 smctr_set_rx_look_ahead(dev);
@@ -1608,7 +1610,7 @@ static int smctr_init_adapter(struct net_device *dev)
1608 { 1610 {
1609 printk(KERN_ERR "%s: Initialization of card failed (%d)\n", 1611 printk(KERN_ERR "%s: Initialization of card failed (%d)\n",
1610 dev->name, err); 1612 dev->name, err);
1611 return (-EINVAL); 1613 return -EINVAL;
1612 } 1614 }
1613 1615
1614 /* This routine clobbers the TRC's internal registers. */ 1616 /* This routine clobbers the TRC's internal registers. */
@@ -1616,7 +1618,7 @@ static int smctr_init_adapter(struct net_device *dev)
1616 { 1618 {
1617 printk(KERN_ERR "%s: Card failed internal self test (%d)\n", 1619 printk(KERN_ERR "%s: Card failed internal self test (%d)\n",
1618 dev->name, err); 1620 dev->name, err);
1619 return (-EINVAL); 1621 return -EINVAL;
1620 } 1622 }
1621 1623
1622 /* Re-Initialize adapter's internal registers */ 1624 /* Re-Initialize adapter's internal registers */
@@ -1625,17 +1627,17 @@ static int smctr_init_adapter(struct net_device *dev)
1625 { 1627 {
1626 printk(KERN_ERR "%s: Initialization of card failed (%d)\n", 1628 printk(KERN_ERR "%s: Initialization of card failed (%d)\n",
1627 dev->name, err); 1629 dev->name, err);
1628 return (-EINVAL); 1630 return -EINVAL;
1629 } 1631 }
1630 1632
1631 smctr_enable_bic_int(dev); 1633 smctr_enable_bic_int(dev);
1632 1634
1633 if((err = smctr_issue_enable_int_cmd(dev, TRC_INTERRUPT_ENABLE_MASK))) 1635 if((err = smctr_issue_enable_int_cmd(dev, TRC_INTERRUPT_ENABLE_MASK)))
1634 return (err); 1636 return err;
1635 1637
1636 smctr_disable_16bit(dev); 1638 smctr_disable_16bit(dev);
1637 1639
1638 return (0); 1640 return 0;
1639} 1641}
1640 1642
1641static int smctr_init_card_real(struct net_device *dev) 1643static int smctr_init_card_real(struct net_device *dev)
@@ -1703,15 +1705,15 @@ static int smctr_init_card_real(struct net_device *dev)
1703 smctr_init_shared_memory(dev); 1705 smctr_init_shared_memory(dev);
1704 1706
1705 if((err = smctr_issue_init_timers_cmd(dev))) 1707 if((err = smctr_issue_init_timers_cmd(dev)))
1706 return (err); 1708 return err;
1707 1709
1708 if((err = smctr_issue_init_txrx_cmd(dev))) 1710 if((err = smctr_issue_init_txrx_cmd(dev)))
1709 { 1711 {
1710 printk(KERN_ERR "%s: Hardware failure\n", dev->name); 1712 printk(KERN_ERR "%s: Hardware failure\n", dev->name);
1711 return (err); 1713 return err;
1712 } 1714 }
1713 1715
1714 return (0); 1716 return 0;
1715} 1717}
1716 1718
1717static int smctr_init_rx_bdbs(struct net_device *dev) 1719static int smctr_init_rx_bdbs(struct net_device *dev)
@@ -1763,7 +1765,7 @@ static int smctr_init_rx_bdbs(struct net_device *dev)
1763 tp->rx_bdb_curr[i] = tp->rx_bdb_head[i]->next_ptr; 1765 tp->rx_bdb_curr[i] = tp->rx_bdb_head[i]->next_ptr;
1764 } 1766 }
1765 1767
1766 return (0); 1768 return 0;
1767} 1769}
1768 1770
1769static int smctr_init_rx_fcbs(struct net_device *dev) 1771static int smctr_init_rx_fcbs(struct net_device *dev)
@@ -1813,7 +1815,7 @@ static int smctr_init_rx_fcbs(struct net_device *dev)
1813 tp->rx_fcb_curr[i] = tp->rx_fcb_head[i]->next_ptr; 1815 tp->rx_fcb_curr[i] = tp->rx_fcb_head[i]->next_ptr;
1814 } 1816 }
1815 1817
1816 return(0); 1818 return 0;
1817} 1819}
1818 1820
1819static int smctr_init_shared_memory(struct net_device *dev) 1821static int smctr_init_shared_memory(struct net_device *dev)
@@ -1871,7 +1873,7 @@ static int smctr_init_shared_memory(struct net_device *dev)
1871 smctr_init_rx_bdbs(dev); 1873 smctr_init_rx_bdbs(dev);
1872 smctr_init_rx_fcbs(dev); 1874 smctr_init_rx_fcbs(dev);
1873 1875
1874 return (0); 1876 return 0;
1875} 1877}
1876 1878
1877static int smctr_init_tx_bdbs(struct net_device *dev) 1879static int smctr_init_tx_bdbs(struct net_device *dev)
@@ -1901,7 +1903,7 @@ static int smctr_init_tx_bdbs(struct net_device *dev)
1901 tp->tx_bdb_head[i]->back_ptr = bdb; 1903 tp->tx_bdb_head[i]->back_ptr = bdb;
1902 } 1904 }
1903 1905
1904 return (0); 1906 return 0;
1905} 1907}
1906 1908
1907static int smctr_init_tx_fcbs(struct net_device *dev) 1909static int smctr_init_tx_fcbs(struct net_device *dev)
@@ -1940,7 +1942,7 @@ static int smctr_init_tx_fcbs(struct net_device *dev)
1940 tp->num_tx_fcbs_used[i] = 0; 1942 tp->num_tx_fcbs_used[i] = 0;
1941 } 1943 }
1942 1944
1943 return (0); 1945 return 0;
1944} 1946}
1945 1947
1946static int smctr_internal_self_test(struct net_device *dev) 1948static int smctr_internal_self_test(struct net_device *dev)
@@ -1949,33 +1951,33 @@ static int smctr_internal_self_test(struct net_device *dev)
1949 int err; 1951 int err;
1950 1952
1951 if((err = smctr_issue_test_internal_rom_cmd(dev))) 1953 if((err = smctr_issue_test_internal_rom_cmd(dev)))
1952 return (err); 1954 return err;
1953 1955
1954 if((err = smctr_wait_cmd(dev))) 1956 if((err = smctr_wait_cmd(dev)))
1955 return (err); 1957 return err;
1956 1958
1957 if(tp->acb_head->cmd_done_status & 0xff) 1959 if(tp->acb_head->cmd_done_status & 0xff)
1958 return (-1); 1960 return -1;
1959 1961
1960 if((err = smctr_issue_test_hic_cmd(dev))) 1962 if((err = smctr_issue_test_hic_cmd(dev)))
1961 return (err); 1963 return err;
1962 1964
1963 if((err = smctr_wait_cmd(dev))) 1965 if((err = smctr_wait_cmd(dev)))
1964 return (err); 1966 return err;
1965 1967
1966 if(tp->acb_head->cmd_done_status & 0xff) 1968 if(tp->acb_head->cmd_done_status & 0xff)
1967 return (-1); 1969 return -1;
1968 1970
1969 if((err = smctr_issue_test_mac_reg_cmd(dev))) 1971 if((err = smctr_issue_test_mac_reg_cmd(dev)))
1970 return (err); 1972 return err;
1971 1973
1972 if((err = smctr_wait_cmd(dev))) 1974 if((err = smctr_wait_cmd(dev)))
1973 return (err); 1975 return err;
1974 1976
1975 if(tp->acb_head->cmd_done_status & 0xff) 1977 if(tp->acb_head->cmd_done_status & 0xff)
1976 return (-1); 1978 return -1;
1977 1979
1978 return (0); 1980 return 0;
1979} 1981}
1980 1982
1981/* 1983/*
@@ -2468,14 +2470,14 @@ static int smctr_issue_enable_int_cmd(struct net_device *dev,
2468 int err; 2470 int err;
2469 2471
2470 if((err = smctr_wait_while_cbusy(dev))) 2472 if((err = smctr_wait_while_cbusy(dev)))
2471 return (err); 2473 return err;
2472 2474
2473 tp->sclb_ptr->int_mask_control = interrupt_enable_mask; 2475 tp->sclb_ptr->int_mask_control = interrupt_enable_mask;
2474 tp->sclb_ptr->valid_command = SCLB_VALID | SCLB_CMD_CLEAR_INTERRUPT_MASK; 2476 tp->sclb_ptr->valid_command = SCLB_VALID | SCLB_CMD_CLEAR_INTERRUPT_MASK;
2475 2477
2476 smctr_set_ctrl_attention(dev); 2478 smctr_set_ctrl_attention(dev);
2477 2479
2478 return (0); 2480 return 0;
2479} 2481}
2480 2482
2481static int smctr_issue_int_ack(struct net_device *dev, __u16 iack_code, __u16 ibits) 2483static int smctr_issue_int_ack(struct net_device *dev, __u16 iack_code, __u16 ibits)
@@ -2483,7 +2485,7 @@ static int smctr_issue_int_ack(struct net_device *dev, __u16 iack_code, __u16 ib
2483 struct net_local *tp = netdev_priv(dev); 2485 struct net_local *tp = netdev_priv(dev);
2484 2486
2485 if(smctr_wait_while_cbusy(dev)) 2487 if(smctr_wait_while_cbusy(dev))
2486 return (-1); 2488 return -1;
2487 2489
2488 tp->sclb_ptr->int_mask_control = ibits; 2490 tp->sclb_ptr->int_mask_control = ibits;
2489 tp->sclb_ptr->iack_code = iack_code << 1; /* use the offset from base */ tp->sclb_ptr->resume_control = 0; 2491 tp->sclb_ptr->iack_code = iack_code << 1; /* use the offset from base */ tp->sclb_ptr->resume_control = 0;
@@ -2491,7 +2493,7 @@ static int smctr_issue_int_ack(struct net_device *dev, __u16 iack_code, __u16 ib
2491 2493
2492 smctr_set_ctrl_attention(dev); 2494 smctr_set_ctrl_attention(dev);
2493 2495
2494 return (0); 2496 return 0;
2495} 2497}
2496 2498
2497static int smctr_issue_init_timers_cmd(struct net_device *dev) 2499static int smctr_issue_init_timers_cmd(struct net_device *dev)
@@ -2502,10 +2504,10 @@ static int smctr_issue_init_timers_cmd(struct net_device *dev)
2502 __u16 *pTimer_Struc = (__u16 *)tp->misc_command_data; 2504 __u16 *pTimer_Struc = (__u16 *)tp->misc_command_data;
2503 2505
2504 if((err = smctr_wait_while_cbusy(dev))) 2506 if((err = smctr_wait_while_cbusy(dev)))
2505 return (err); 2507 return err;
2506 2508
2507 if((err = smctr_wait_cmd(dev))) 2509 if((err = smctr_wait_cmd(dev)))
2508 return (err); 2510 return err;
2509 2511
2510 tp->config_word0 = THDREN | DMA_TRIGGER | USETPT | NO_AUTOREMOVE; 2512 tp->config_word0 = THDREN | DMA_TRIGGER | USETPT | NO_AUTOREMOVE;
2511 tp->config_word1 = 0; 2513 tp->config_word1 = 0;
@@ -2648,7 +2650,7 @@ static int smctr_issue_init_timers_cmd(struct net_device *dev)
2648 2650
2649 err = smctr_setup_single_cmd_w_data(dev, ACB_CMD_INIT_TRC_TIMERS, 0); 2651 err = smctr_setup_single_cmd_w_data(dev, ACB_CMD_INIT_TRC_TIMERS, 0);
2650 2652
2651 return (err); 2653 return err;
2652} 2654}
2653 2655
2654static int smctr_issue_init_txrx_cmd(struct net_device *dev) 2656static int smctr_issue_init_txrx_cmd(struct net_device *dev)
@@ -2659,12 +2661,12 @@ static int smctr_issue_init_txrx_cmd(struct net_device *dev)
2659 void **txrx_ptrs = (void *)tp->misc_command_data; 2661 void **txrx_ptrs = (void *)tp->misc_command_data;
2660 2662
2661 if((err = smctr_wait_while_cbusy(dev))) 2663 if((err = smctr_wait_while_cbusy(dev)))
2662 return (err); 2664 return err;
2663 2665
2664 if((err = smctr_wait_cmd(dev))) 2666 if((err = smctr_wait_cmd(dev)))
2665 { 2667 {
2666 printk(KERN_ERR "%s: Hardware failure\n", dev->name); 2668 printk(KERN_ERR "%s: Hardware failure\n", dev->name);
2667 return (err); 2669 return err;
2668 } 2670 }
2669 2671
2670 /* Initialize Transmit Queue Pointers that are used, to point to 2672 /* Initialize Transmit Queue Pointers that are used, to point to
@@ -2695,7 +2697,7 @@ static int smctr_issue_init_txrx_cmd(struct net_device *dev)
2695 2697
2696 err = smctr_setup_single_cmd_w_data(dev, ACB_CMD_INIT_TX_RX, 0); 2698 err = smctr_setup_single_cmd_w_data(dev, ACB_CMD_INIT_TX_RX, 0);
2697 2699
2698 return (err); 2700 return err;
2699} 2701}
2700 2702
2701static int smctr_issue_insert_cmd(struct net_device *dev) 2703static int smctr_issue_insert_cmd(struct net_device *dev)
@@ -2704,7 +2706,7 @@ static int smctr_issue_insert_cmd(struct net_device *dev)
2704 2706
2705 err = smctr_setup_single_cmd(dev, ACB_CMD_INSERT, ACB_SUB_CMD_NOP); 2707 err = smctr_setup_single_cmd(dev, ACB_CMD_INSERT, ACB_SUB_CMD_NOP);
2706 2708
2707 return (err); 2709 return err;
2708} 2710}
2709 2711
2710static int smctr_issue_read_ring_status_cmd(struct net_device *dev) 2712static int smctr_issue_read_ring_status_cmd(struct net_device *dev)
@@ -2712,15 +2714,15 @@ static int smctr_issue_read_ring_status_cmd(struct net_device *dev)
2712 int err; 2714 int err;
2713 2715
2714 if((err = smctr_wait_while_cbusy(dev))) 2716 if((err = smctr_wait_while_cbusy(dev)))
2715 return (err); 2717 return err;
2716 2718
2717 if((err = smctr_wait_cmd(dev))) 2719 if((err = smctr_wait_cmd(dev)))
2718 return (err); 2720 return err;
2719 2721
2720 err = smctr_setup_single_cmd_w_data(dev, ACB_CMD_READ_TRC_STATUS, 2722 err = smctr_setup_single_cmd_w_data(dev, ACB_CMD_READ_TRC_STATUS,
2721 RW_TRC_STATUS_BLOCK); 2723 RW_TRC_STATUS_BLOCK);
2722 2724
2723 return (err); 2725 return err;
2724} 2726}
2725 2727
2726static int smctr_issue_read_word_cmd(struct net_device *dev, __u16 aword_cnt) 2728static int smctr_issue_read_word_cmd(struct net_device *dev, __u16 aword_cnt)
@@ -2728,15 +2730,15 @@ static int smctr_issue_read_word_cmd(struct net_device *dev, __u16 aword_cnt)
2728 int err; 2730 int err;
2729 2731
2730 if((err = smctr_wait_while_cbusy(dev))) 2732 if((err = smctr_wait_while_cbusy(dev)))
2731 return (err); 2733 return err;
2732 2734
2733 if((err = smctr_wait_cmd(dev))) 2735 if((err = smctr_wait_cmd(dev)))
2734 return (err); 2736 return err;
2735 2737
2736 err = smctr_setup_single_cmd_w_data(dev, ACB_CMD_MCT_READ_VALUE, 2738 err = smctr_setup_single_cmd_w_data(dev, ACB_CMD_MCT_READ_VALUE,
2737 aword_cnt); 2739 aword_cnt);
2738 2740
2739 return (err); 2741 return err;
2740} 2742}
2741 2743
2742static int smctr_issue_remove_cmd(struct net_device *dev) 2744static int smctr_issue_remove_cmd(struct net_device *dev)
@@ -2745,14 +2747,14 @@ static int smctr_issue_remove_cmd(struct net_device *dev)
2745 int err; 2747 int err;
2746 2748
2747 if((err = smctr_wait_while_cbusy(dev))) 2749 if((err = smctr_wait_while_cbusy(dev)))
2748 return (err); 2750 return err;
2749 2751
2750 tp->sclb_ptr->resume_control = 0; 2752 tp->sclb_ptr->resume_control = 0;
2751 tp->sclb_ptr->valid_command = SCLB_VALID | SCLB_CMD_REMOVE; 2753 tp->sclb_ptr->valid_command = SCLB_VALID | SCLB_CMD_REMOVE;
2752 2754
2753 smctr_set_ctrl_attention(dev); 2755 smctr_set_ctrl_attention(dev);
2754 2756
2755 return (0); 2757 return 0;
2756} 2758}
2757 2759
2758static int smctr_issue_resume_acb_cmd(struct net_device *dev) 2760static int smctr_issue_resume_acb_cmd(struct net_device *dev)
@@ -2761,7 +2763,7 @@ static int smctr_issue_resume_acb_cmd(struct net_device *dev)
2761 int err; 2763 int err;
2762 2764
2763 if((err = smctr_wait_while_cbusy(dev))) 2765 if((err = smctr_wait_while_cbusy(dev)))
2764 return (err); 2766 return err;
2765 2767
2766 tp->sclb_ptr->resume_control = SCLB_RC_ACB; 2768 tp->sclb_ptr->resume_control = SCLB_RC_ACB;
2767 tp->sclb_ptr->valid_command = SCLB_VALID | SCLB_RESUME_CONTROL_VALID; 2769 tp->sclb_ptr->valid_command = SCLB_VALID | SCLB_RESUME_CONTROL_VALID;
@@ -2770,7 +2772,7 @@ static int smctr_issue_resume_acb_cmd(struct net_device *dev)
2770 2772
2771 smctr_set_ctrl_attention(dev); 2773 smctr_set_ctrl_attention(dev);
2772 2774
2773 return (0); 2775 return 0;
2774} 2776}
2775 2777
2776static int smctr_issue_resume_rx_bdb_cmd(struct net_device *dev, __u16 queue) 2778static int smctr_issue_resume_rx_bdb_cmd(struct net_device *dev, __u16 queue)
@@ -2779,7 +2781,7 @@ static int smctr_issue_resume_rx_bdb_cmd(struct net_device *dev, __u16 queue)
2779 int err; 2781 int err;
2780 2782
2781 if((err = smctr_wait_while_cbusy(dev))) 2783 if((err = smctr_wait_while_cbusy(dev)))
2782 return (err); 2784 return err;
2783 2785
2784 if(queue == MAC_QUEUE) 2786 if(queue == MAC_QUEUE)
2785 tp->sclb_ptr->resume_control = SCLB_RC_RX_MAC_BDB; 2787 tp->sclb_ptr->resume_control = SCLB_RC_RX_MAC_BDB;
@@ -2790,7 +2792,7 @@ static int smctr_issue_resume_rx_bdb_cmd(struct net_device *dev, __u16 queue)
2790 2792
2791 smctr_set_ctrl_attention(dev); 2793 smctr_set_ctrl_attention(dev);
2792 2794
2793 return (0); 2795 return 0;
2794} 2796}
2795 2797
2796static int smctr_issue_resume_rx_fcb_cmd(struct net_device *dev, __u16 queue) 2798static int smctr_issue_resume_rx_fcb_cmd(struct net_device *dev, __u16 queue)
@@ -2801,7 +2803,7 @@ static int smctr_issue_resume_rx_fcb_cmd(struct net_device *dev, __u16 queue)
2801 printk(KERN_DEBUG "%s: smctr_issue_resume_rx_fcb_cmd\n", dev->name); 2803 printk(KERN_DEBUG "%s: smctr_issue_resume_rx_fcb_cmd\n", dev->name);
2802 2804
2803 if(smctr_wait_while_cbusy(dev)) 2805 if(smctr_wait_while_cbusy(dev))
2804 return (-1); 2806 return -1;
2805 2807
2806 if(queue == MAC_QUEUE) 2808 if(queue == MAC_QUEUE)
2807 tp->sclb_ptr->resume_control = SCLB_RC_RX_MAC_FCB; 2809 tp->sclb_ptr->resume_control = SCLB_RC_RX_MAC_FCB;
@@ -2812,7 +2814,7 @@ static int smctr_issue_resume_rx_fcb_cmd(struct net_device *dev, __u16 queue)
2812 2814
2813 smctr_set_ctrl_attention(dev); 2815 smctr_set_ctrl_attention(dev);
2814 2816
2815 return (0); 2817 return 0;
2816} 2818}
2817 2819
2818static int smctr_issue_resume_tx_fcb_cmd(struct net_device *dev, __u16 queue) 2820static int smctr_issue_resume_tx_fcb_cmd(struct net_device *dev, __u16 queue)
@@ -2823,14 +2825,14 @@ static int smctr_issue_resume_tx_fcb_cmd(struct net_device *dev, __u16 queue)
2823 printk(KERN_DEBUG "%s: smctr_issue_resume_tx_fcb_cmd\n", dev->name); 2825 printk(KERN_DEBUG "%s: smctr_issue_resume_tx_fcb_cmd\n", dev->name);
2824 2826
2825 if(smctr_wait_while_cbusy(dev)) 2827 if(smctr_wait_while_cbusy(dev))
2826 return (-1); 2828 return -1;
2827 2829
2828 tp->sclb_ptr->resume_control = (SCLB_RC_TFCB0 << queue); 2830 tp->sclb_ptr->resume_control = (SCLB_RC_TFCB0 << queue);
2829 tp->sclb_ptr->valid_command = SCLB_RESUME_CONTROL_VALID | SCLB_VALID; 2831 tp->sclb_ptr->valid_command = SCLB_RESUME_CONTROL_VALID | SCLB_VALID;
2830 2832
2831 smctr_set_ctrl_attention(dev); 2833 smctr_set_ctrl_attention(dev);
2832 2834
2833 return (0); 2835 return 0;
2834} 2836}
2835 2837
2836static int smctr_issue_test_internal_rom_cmd(struct net_device *dev) 2838static int smctr_issue_test_internal_rom_cmd(struct net_device *dev)
@@ -2840,7 +2842,7 @@ static int smctr_issue_test_internal_rom_cmd(struct net_device *dev)
2840 err = smctr_setup_single_cmd(dev, ACB_CMD_MCT_TEST, 2842 err = smctr_setup_single_cmd(dev, ACB_CMD_MCT_TEST,
2841 TRC_INTERNAL_ROM_TEST); 2843 TRC_INTERNAL_ROM_TEST);
2842 2844
2843 return (err); 2845 return err;
2844} 2846}
2845 2847
2846static int smctr_issue_test_hic_cmd(struct net_device *dev) 2848static int smctr_issue_test_hic_cmd(struct net_device *dev)
@@ -2850,7 +2852,7 @@ static int smctr_issue_test_hic_cmd(struct net_device *dev)
2850 err = smctr_setup_single_cmd(dev, ACB_CMD_HIC_TEST, 2852 err = smctr_setup_single_cmd(dev, ACB_CMD_HIC_TEST,
2851 TRC_HOST_INTERFACE_REG_TEST); 2853 TRC_HOST_INTERFACE_REG_TEST);
2852 2854
2853 return (err); 2855 return err;
2854} 2856}
2855 2857
2856static int smctr_issue_test_mac_reg_cmd(struct net_device *dev) 2858static int smctr_issue_test_mac_reg_cmd(struct net_device *dev)
@@ -2860,7 +2862,7 @@ static int smctr_issue_test_mac_reg_cmd(struct net_device *dev)
2860 err = smctr_setup_single_cmd(dev, ACB_CMD_MCT_TEST, 2862 err = smctr_setup_single_cmd(dev, ACB_CMD_MCT_TEST,
2861 TRC_MAC_REGISTERS_TEST); 2863 TRC_MAC_REGISTERS_TEST);
2862 2864
2863 return (err); 2865 return err;
2864} 2866}
2865 2867
2866static int smctr_issue_trc_loopback_cmd(struct net_device *dev) 2868static int smctr_issue_trc_loopback_cmd(struct net_device *dev)
@@ -2870,7 +2872,7 @@ static int smctr_issue_trc_loopback_cmd(struct net_device *dev)
2870 err = smctr_setup_single_cmd(dev, ACB_CMD_MCT_TEST, 2872 err = smctr_setup_single_cmd(dev, ACB_CMD_MCT_TEST,
2871 TRC_INTERNAL_LOOPBACK); 2873 TRC_INTERNAL_LOOPBACK);
2872 2874
2873 return (err); 2875 return err;
2874} 2876}
2875 2877
2876static int smctr_issue_tri_loopback_cmd(struct net_device *dev) 2878static int smctr_issue_tri_loopback_cmd(struct net_device *dev)
@@ -2880,7 +2882,7 @@ static int smctr_issue_tri_loopback_cmd(struct net_device *dev)
2880 err = smctr_setup_single_cmd(dev, ACB_CMD_MCT_TEST, 2882 err = smctr_setup_single_cmd(dev, ACB_CMD_MCT_TEST,
2881 TRC_TRI_LOOPBACK); 2883 TRC_TRI_LOOPBACK);
2882 2884
2883 return (err); 2885 return err;
2884} 2886}
2885 2887
2886static int smctr_issue_write_byte_cmd(struct net_device *dev, 2888static int smctr_issue_write_byte_cmd(struct net_device *dev,
@@ -2891,10 +2893,10 @@ static int smctr_issue_write_byte_cmd(struct net_device *dev,
2891 int err; 2893 int err;
2892 2894
2893 if((err = smctr_wait_while_cbusy(dev))) 2895 if((err = smctr_wait_while_cbusy(dev)))
2894 return (err); 2896 return err;
2895 2897
2896 if((err = smctr_wait_cmd(dev))) 2898 if((err = smctr_wait_cmd(dev)))
2897 return (err); 2899 return err;
2898 2900
2899 for(iword = 0, ibyte = 0; iword < (unsigned int)(aword_cnt & 0xff); 2901 for(iword = 0, ibyte = 0; iword < (unsigned int)(aword_cnt & 0xff);
2900 iword++, ibyte += 2) 2902 iword++, ibyte += 2)
@@ -2903,8 +2905,8 @@ static int smctr_issue_write_byte_cmd(struct net_device *dev,
2903 | (*((__u8 *)byte + ibyte + 1)); 2905 | (*((__u8 *)byte + ibyte + 1));
2904 } 2906 }
2905 2907
2906 return (smctr_setup_single_cmd_w_data(dev, ACB_CMD_MCT_WRITE_VALUE, 2908 return smctr_setup_single_cmd_w_data(dev, ACB_CMD_MCT_WRITE_VALUE,
2907 aword_cnt)); 2909 aword_cnt);
2908} 2910}
2909 2911
2910static int smctr_issue_write_word_cmd(struct net_device *dev, 2912static int smctr_issue_write_word_cmd(struct net_device *dev,
@@ -2914,10 +2916,10 @@ static int smctr_issue_write_word_cmd(struct net_device *dev,
2914 unsigned int i, err; 2916 unsigned int i, err;
2915 2917
2916 if((err = smctr_wait_while_cbusy(dev))) 2918 if((err = smctr_wait_while_cbusy(dev)))
2917 return (err); 2919 return err;
2918 2920
2919 if((err = smctr_wait_cmd(dev))) 2921 if((err = smctr_wait_cmd(dev)))
2920 return (err); 2922 return err;
2921 2923
2922 for(i = 0; i < (unsigned int)(aword_cnt & 0xff); i++) 2924 for(i = 0; i < (unsigned int)(aword_cnt & 0xff); i++)
2923 tp->misc_command_data[i] = *((__u16 *)word + i); 2925 tp->misc_command_data[i] = *((__u16 *)word + i);
@@ -2925,7 +2927,7 @@ static int smctr_issue_write_word_cmd(struct net_device *dev,
2925 err = smctr_setup_single_cmd_w_data(dev, ACB_CMD_MCT_WRITE_VALUE, 2927 err = smctr_setup_single_cmd_w_data(dev, ACB_CMD_MCT_WRITE_VALUE,
2926 aword_cnt); 2928 aword_cnt);
2927 2929
2928 return (err); 2930 return err;
2929} 2931}
2930 2932
2931static int smctr_join_complete_state(struct net_device *dev) 2933static int smctr_join_complete_state(struct net_device *dev)
@@ -2935,7 +2937,7 @@ static int smctr_join_complete_state(struct net_device *dev)
2935 err = smctr_setup_single_cmd(dev, ACB_CMD_CHANGE_JOIN_STATE, 2937 err = smctr_setup_single_cmd(dev, ACB_CMD_CHANGE_JOIN_STATE,
2936 JS_JOIN_COMPLETE_STATE); 2938 JS_JOIN_COMPLETE_STATE);
2937 2939
2938 return (err); 2940 return err;
2939} 2941}
2940 2942
2941static int smctr_link_tx_fcbs_to_bdbs(struct net_device *dev) 2943static int smctr_link_tx_fcbs_to_bdbs(struct net_device *dev)
@@ -2959,7 +2961,7 @@ static int smctr_link_tx_fcbs_to_bdbs(struct net_device *dev)
2959 } 2961 }
2960 } 2962 }
2961 2963
2962 return (0); 2964 return 0;
2963} 2965}
2964 2966
2965static int smctr_load_firmware(struct net_device *dev) 2967static int smctr_load_firmware(struct net_device *dev)
@@ -2974,7 +2976,7 @@ static int smctr_load_firmware(struct net_device *dev)
2974 2976
2975 if (request_firmware(&fw, "tr_smctr.bin", &dev->dev)) { 2977 if (request_firmware(&fw, "tr_smctr.bin", &dev->dev)) {
2976 printk(KERN_ERR "%s: firmware not found\n", dev->name); 2978 printk(KERN_ERR "%s: firmware not found\n", dev->name);
2977 return (UCODE_NOT_PRESENT); 2979 return UCODE_NOT_PRESENT;
2978 } 2980 }
2979 2981
2980 tp->num_of_tx_buffs = 4; 2982 tp->num_of_tx_buffs = 4;
@@ -3036,7 +3038,7 @@ static int smctr_load_firmware(struct net_device *dev)
3036 smctr_disable_16bit(dev); 3038 smctr_disable_16bit(dev);
3037 out: 3039 out:
3038 release_firmware(fw); 3040 release_firmware(fw);
3039 return (err); 3041 return err;
3040} 3042}
3041 3043
3042static int smctr_load_node_addr(struct net_device *dev) 3044static int smctr_load_node_addr(struct net_device *dev)
@@ -3052,7 +3054,7 @@ static int smctr_load_node_addr(struct net_device *dev)
3052 } 3054 }
3053 dev->addr_len = 6; 3055 dev->addr_len = 6;
3054 3056
3055 return (0); 3057 return 0;
3056} 3058}
3057 3059
3058/* Lobe Media Test. 3060/* Lobe Media Test.
@@ -3146,14 +3148,14 @@ static int smctr_lobe_media_test_cmd(struct net_device *dev)
3146 if(smctr_wait_cmd(dev)) 3148 if(smctr_wait_cmd(dev))
3147 { 3149 {
3148 printk(KERN_ERR "Lobe Failed test state\n"); 3150 printk(KERN_ERR "Lobe Failed test state\n");
3149 return (LOBE_MEDIA_TEST_FAILED); 3151 return LOBE_MEDIA_TEST_FAILED;
3150 } 3152 }
3151 } 3153 }
3152 3154
3153 err = smctr_setup_single_cmd(dev, ACB_CMD_MCT_TEST, 3155 err = smctr_setup_single_cmd(dev, ACB_CMD_MCT_TEST,
3154 TRC_LOBE_MEDIA_TEST); 3156 TRC_LOBE_MEDIA_TEST);
3155 3157
3156 return (err); 3158 return err;
3157} 3159}
3158 3160
3159static int smctr_lobe_media_test_state(struct net_device *dev) 3161static int smctr_lobe_media_test_state(struct net_device *dev)
@@ -3163,7 +3165,7 @@ static int smctr_lobe_media_test_state(struct net_device *dev)
3163 err = smctr_setup_single_cmd(dev, ACB_CMD_CHANGE_JOIN_STATE, 3165 err = smctr_setup_single_cmd(dev, ACB_CMD_CHANGE_JOIN_STATE,
3164 JS_LOBE_TEST_STATE); 3166 JS_LOBE_TEST_STATE);
3165 3167
3166 return (err); 3168 return err;
3167} 3169}
3168 3170
3169static int smctr_make_8025_hdr(struct net_device *dev, 3171static int smctr_make_8025_hdr(struct net_device *dev,
@@ -3212,7 +3214,7 @@ static int smctr_make_8025_hdr(struct net_device *dev,
3212 break; 3214 break;
3213 } 3215 }
3214 3216
3215 return (0); 3217 return 0;
3216} 3218}
3217 3219
3218static int smctr_make_access_pri(struct net_device *dev, MAC_SUB_VECTOR *tsv) 3220static int smctr_make_access_pri(struct net_device *dev, MAC_SUB_VECTOR *tsv)
@@ -3225,7 +3227,7 @@ static int smctr_make_access_pri(struct net_device *dev, MAC_SUB_VECTOR *tsv)
3225 tsv->svv[0] = MSB(tp->authorized_access_priority); 3227 tsv->svv[0] = MSB(tp->authorized_access_priority);
3226 tsv->svv[1] = LSB(tp->authorized_access_priority); 3228 tsv->svv[1] = LSB(tp->authorized_access_priority);
3227 3229
3228 return (0); 3230 return 0;
3229} 3231}
3230 3232
3231static int smctr_make_addr_mod(struct net_device *dev, MAC_SUB_VECTOR *tsv) 3233static int smctr_make_addr_mod(struct net_device *dev, MAC_SUB_VECTOR *tsv)
@@ -3236,7 +3238,7 @@ static int smctr_make_addr_mod(struct net_device *dev, MAC_SUB_VECTOR *tsv)
3236 tsv->svv[0] = 0; 3238 tsv->svv[0] = 0;
3237 tsv->svv[1] = 0; 3239 tsv->svv[1] = 0;
3238 3240
3239 return (0); 3241 return 0;
3240} 3242}
3241 3243
3242static int smctr_make_auth_funct_class(struct net_device *dev, 3244static int smctr_make_auth_funct_class(struct net_device *dev,
@@ -3250,7 +3252,7 @@ static int smctr_make_auth_funct_class(struct net_device *dev,
3250 tsv->svv[0] = MSB(tp->authorized_function_classes); 3252 tsv->svv[0] = MSB(tp->authorized_function_classes);
3251 tsv->svv[1] = LSB(tp->authorized_function_classes); 3253 tsv->svv[1] = LSB(tp->authorized_function_classes);
3252 3254
3253 return (0); 3255 return 0;
3254} 3256}
3255 3257
3256static int smctr_make_corr(struct net_device *dev, 3258static int smctr_make_corr(struct net_device *dev,
@@ -3262,7 +3264,7 @@ static int smctr_make_corr(struct net_device *dev,
3262 tsv->svv[0] = MSB(correlator); 3264 tsv->svv[0] = MSB(correlator);
3263 tsv->svv[1] = LSB(correlator); 3265 tsv->svv[1] = LSB(correlator);
3264 3266
3265 return (0); 3267 return 0;
3266} 3268}
3267 3269
3268static int smctr_make_funct_addr(struct net_device *dev, MAC_SUB_VECTOR *tsv) 3270static int smctr_make_funct_addr(struct net_device *dev, MAC_SUB_VECTOR *tsv)
@@ -3280,7 +3282,7 @@ static int smctr_make_funct_addr(struct net_device *dev, MAC_SUB_VECTOR *tsv)
3280 tsv->svv[2] = MSB(tp->misc_command_data[1]); 3282 tsv->svv[2] = MSB(tp->misc_command_data[1]);
3281 tsv->svv[3] = LSB(tp->misc_command_data[1]); 3283 tsv->svv[3] = LSB(tp->misc_command_data[1]);
3282 3284
3283 return (0); 3285 return 0;
3284} 3286}
3285 3287
3286static int smctr_make_group_addr(struct net_device *dev, MAC_SUB_VECTOR *tsv) 3288static int smctr_make_group_addr(struct net_device *dev, MAC_SUB_VECTOR *tsv)
@@ -3305,7 +3307,7 @@ static int smctr_make_group_addr(struct net_device *dev, MAC_SUB_VECTOR *tsv)
3305 tsv->svv[2] == 0x00 && tsv->svv[3] == 0x00) 3307 tsv->svv[2] == 0x00 && tsv->svv[3] == 0x00)
3306 tsv->svv[0] = 0x00; 3308 tsv->svv[0] = 0x00;
3307 3309
3308 return (0); 3310 return 0;
3309} 3311}
3310 3312
3311static int smctr_make_phy_drop_num(struct net_device *dev, 3313static int smctr_make_phy_drop_num(struct net_device *dev,
@@ -3324,7 +3326,7 @@ static int smctr_make_phy_drop_num(struct net_device *dev,
3324 tsv->svv[2] = MSB(tp->misc_command_data[1]); 3326 tsv->svv[2] = MSB(tp->misc_command_data[1]);
3325 tsv->svv[3] = LSB(tp->misc_command_data[1]); 3327 tsv->svv[3] = LSB(tp->misc_command_data[1]);
3326 3328
3327 return (0); 3329 return 0;
3328} 3330}
3329 3331
3330static int smctr_make_product_id(struct net_device *dev, MAC_SUB_VECTOR *tsv) 3332static int smctr_make_product_id(struct net_device *dev, MAC_SUB_VECTOR *tsv)
@@ -3337,7 +3339,7 @@ static int smctr_make_product_id(struct net_device *dev, MAC_SUB_VECTOR *tsv)
3337 for(i = 0; i < 18; i++) 3339 for(i = 0; i < 18; i++)
3338 tsv->svv[i] = 0xF0; 3340 tsv->svv[i] = 0xF0;
3339 3341
3340 return (0); 3342 return 0;
3341} 3343}
3342 3344
3343static int smctr_make_station_id(struct net_device *dev, MAC_SUB_VECTOR *tsv) 3345static int smctr_make_station_id(struct net_device *dev, MAC_SUB_VECTOR *tsv)
@@ -3358,7 +3360,7 @@ static int smctr_make_station_id(struct net_device *dev, MAC_SUB_VECTOR *tsv)
3358 tsv->svv[4] = MSB(tp->misc_command_data[2]); 3360 tsv->svv[4] = MSB(tp->misc_command_data[2]);
3359 tsv->svv[5] = LSB(tp->misc_command_data[2]); 3361 tsv->svv[5] = LSB(tp->misc_command_data[2]);
3360 3362
3361 return (0); 3363 return 0;
3362} 3364}
3363 3365
3364static int smctr_make_ring_station_status(struct net_device *dev, 3366static int smctr_make_ring_station_status(struct net_device *dev,
@@ -3374,7 +3376,7 @@ static int smctr_make_ring_station_status(struct net_device *dev,
3374 tsv->svv[4] = 0; 3376 tsv->svv[4] = 0;
3375 tsv->svv[5] = 0; 3377 tsv->svv[5] = 0;
3376 3378
3377 return (0); 3379 return 0;
3378} 3380}
3379 3381
3380static int smctr_make_ring_station_version(struct net_device *dev, 3382static int smctr_make_ring_station_version(struct net_device *dev,
@@ -3400,7 +3402,7 @@ static int smctr_make_ring_station_version(struct net_device *dev,
3400 else 3402 else
3401 tsv->svv[9] = 0xc4; /* EBCDIC - D */ 3403 tsv->svv[9] = 0xc4; /* EBCDIC - D */
3402 3404
3403 return (0); 3405 return 0;
3404} 3406}
3405 3407
3406static int smctr_make_tx_status_code(struct net_device *dev, 3408static int smctr_make_tx_status_code(struct net_device *dev,
@@ -3414,7 +3416,7 @@ static int smctr_make_tx_status_code(struct net_device *dev,
3414 /* Stripped frame status of Transmitted Frame */ 3416 /* Stripped frame status of Transmitted Frame */
3415 tsv->svv[1] = tx_fstatus & 0xff; 3417 tsv->svv[1] = tx_fstatus & 0xff;
3416 3418
3417 return (0); 3419 return 0;
3418} 3420}
3419 3421
3420static int smctr_make_upstream_neighbor_addr(struct net_device *dev, 3422static int smctr_make_upstream_neighbor_addr(struct net_device *dev,
@@ -3436,7 +3438,7 @@ static int smctr_make_upstream_neighbor_addr(struct net_device *dev,
3436 tsv->svv[4] = MSB(tp->misc_command_data[2]); 3438 tsv->svv[4] = MSB(tp->misc_command_data[2]);
3437 tsv->svv[5] = LSB(tp->misc_command_data[2]); 3439 tsv->svv[5] = LSB(tp->misc_command_data[2]);
3438 3440
3439 return (0); 3441 return 0;
3440} 3442}
3441 3443
3442static int smctr_make_wrap_data(struct net_device *dev, MAC_SUB_VECTOR *tsv) 3444static int smctr_make_wrap_data(struct net_device *dev, MAC_SUB_VECTOR *tsv)
@@ -3444,7 +3446,7 @@ static int smctr_make_wrap_data(struct net_device *dev, MAC_SUB_VECTOR *tsv)
3444 tsv->svi = WRAP_DATA; 3446 tsv->svi = WRAP_DATA;
3445 tsv->svl = S_WRAP_DATA; 3447 tsv->svl = S_WRAP_DATA;
3446 3448
3447 return (0); 3449 return 0;
3448} 3450}
3449 3451
3450/* 3452/*
@@ -3464,9 +3466,9 @@ static int smctr_open(struct net_device *dev)
3464 3466
3465 err = smctr_init_adapter(dev); 3467 err = smctr_init_adapter(dev);
3466 if(err < 0) 3468 if(err < 0)
3467 return (err); 3469 return err;
3468 3470
3469 return (err); 3471 return err;
3470} 3472}
3471 3473
3472/* Interrupt driven open of Token card. */ 3474/* Interrupt driven open of Token card. */
@@ -3481,9 +3483,9 @@ static int smctr_open_tr(struct net_device *dev)
3481 3483
3482 /* Now we can actually open the adapter. */ 3484 /* Now we can actually open the adapter. */
3483 if(tp->status == OPEN) 3485 if(tp->status == OPEN)
3484 return (0); 3486 return 0;
3485 if(tp->status != INITIALIZED) 3487 if(tp->status != INITIALIZED)
3486 return (-1); 3488 return -1;
3487 3489
3488 /* FIXME: it would work a lot better if we masked the irq sources 3490 /* FIXME: it would work a lot better if we masked the irq sources
3489 on the card here, then we could skip the locking and poll nicely */ 3491 on the card here, then we could skip the locking and poll nicely */
@@ -3560,7 +3562,7 @@ static int smctr_open_tr(struct net_device *dev)
3560out: 3562out:
3561 spin_unlock_irqrestore(&tp->lock, flags); 3563 spin_unlock_irqrestore(&tp->lock, flags);
3562 3564
3563 return (err); 3565 return err;
3564} 3566}
3565 3567
3566/* Check for a network adapter of this type, 3568/* Check for a network adapter of this type,
@@ -3675,7 +3677,7 @@ static int __init smctr_probe1(struct net_device *dev, int ioaddr)
3675 3677
3676 dev->netdev_ops = &smctr_netdev_ops; 3678 dev->netdev_ops = &smctr_netdev_ops;
3677 dev->watchdog_timeo = HZ; 3679 dev->watchdog_timeo = HZ;
3678 return (0); 3680 return 0;
3679 3681
3680out: 3682out:
3681 return err; 3683 return err;
@@ -3699,13 +3701,13 @@ static int smctr_process_rx_packet(MAC_HEADER *rmf, __u16 size,
3699 case INIT: 3701 case INIT:
3700 if((rcode = smctr_rcv_init(dev, rmf, &correlator)) == HARDWARE_FAILED) 3702 if((rcode = smctr_rcv_init(dev, rmf, &correlator)) == HARDWARE_FAILED)
3701 { 3703 {
3702 return (rcode); 3704 return rcode;
3703 } 3705 }
3704 3706
3705 if((err = smctr_send_rsp(dev, rmf, rcode, 3707 if((err = smctr_send_rsp(dev, rmf, rcode,
3706 correlator))) 3708 correlator)))
3707 { 3709 {
3708 return (err); 3710 return err;
3709 } 3711 }
3710 break; 3712 break;
3711 3713
@@ -3713,13 +3715,13 @@ static int smctr_process_rx_packet(MAC_HEADER *rmf, __u16 size,
3713 if((rcode = smctr_rcv_chg_param(dev, rmf, 3715 if((rcode = smctr_rcv_chg_param(dev, rmf,
3714 &correlator)) ==HARDWARE_FAILED) 3716 &correlator)) ==HARDWARE_FAILED)
3715 { 3717 {
3716 return (rcode); 3718 return rcode;
3717 } 3719 }
3718 3720
3719 if((err = smctr_send_rsp(dev, rmf, rcode, 3721 if((err = smctr_send_rsp(dev, rmf, rcode,
3720 correlator))) 3722 correlator)))
3721 { 3723 {
3722 return (err); 3724 return err;
3723 } 3725 }
3724 break; 3726 break;
3725 3727
@@ -3728,16 +3730,16 @@ static int smctr_process_rx_packet(MAC_HEADER *rmf, __u16 size,
3728 rmf, &correlator)) != POSITIVE_ACK) 3730 rmf, &correlator)) != POSITIVE_ACK)
3729 { 3731 {
3730 if(rcode == HARDWARE_FAILED) 3732 if(rcode == HARDWARE_FAILED)
3731 return (rcode); 3733 return rcode;
3732 else 3734 else
3733 return (smctr_send_rsp(dev, rmf, 3735 return smctr_send_rsp(dev, rmf,
3734 rcode, correlator)); 3736 rcode, correlator);
3735 } 3737 }
3736 3738
3737 if((err = smctr_send_rpt_addr(dev, rmf, 3739 if((err = smctr_send_rpt_addr(dev, rmf,
3738 correlator))) 3740 correlator)))
3739 { 3741 {
3740 return (err); 3742 return err;
3741 } 3743 }
3742 break; 3744 break;
3743 3745
@@ -3746,17 +3748,17 @@ static int smctr_process_rx_packet(MAC_HEADER *rmf, __u16 size,
3746 rmf, &correlator)) != POSITIVE_ACK) 3748 rmf, &correlator)) != POSITIVE_ACK)
3747 { 3749 {
3748 if(rcode == HARDWARE_FAILED) 3750 if(rcode == HARDWARE_FAILED)
3749 return (rcode); 3751 return rcode;
3750 else 3752 else
3751 return (smctr_send_rsp(dev, rmf, 3753 return smctr_send_rsp(dev, rmf,
3752 rcode, 3754 rcode,
3753 correlator)); 3755 correlator);
3754 } 3756 }
3755 3757
3756 if((err = smctr_send_rpt_attch(dev, rmf, 3758 if((err = smctr_send_rpt_attch(dev, rmf,
3757 correlator))) 3759 correlator)))
3758 { 3760 {
3759 return (err); 3761 return err;
3760 } 3762 }
3761 break; 3763 break;
3762 3764
@@ -3765,17 +3767,17 @@ static int smctr_process_rx_packet(MAC_HEADER *rmf, __u16 size,
3765 rmf, &correlator)) != POSITIVE_ACK) 3767 rmf, &correlator)) != POSITIVE_ACK)
3766 { 3768 {
3767 if(rcode == HARDWARE_FAILED) 3769 if(rcode == HARDWARE_FAILED)
3768 return (rcode); 3770 return rcode;
3769 else 3771 else
3770 return (smctr_send_rsp(dev, rmf, 3772 return smctr_send_rsp(dev, rmf,
3771 rcode, 3773 rcode,
3772 correlator)); 3774 correlator);
3773 } 3775 }
3774 3776
3775 if((err = smctr_send_rpt_state(dev, rmf, 3777 if((err = smctr_send_rpt_state(dev, rmf,
3776 correlator))) 3778 correlator)))
3777 { 3779 {
3778 return (err); 3780 return err;
3779 } 3781 }
3780 break; 3782 break;
3781 3783
@@ -3786,17 +3788,17 @@ static int smctr_process_rx_packet(MAC_HEADER *rmf, __u16 size,
3786 != POSITIVE_ACK) 3788 != POSITIVE_ACK)
3787 { 3789 {
3788 if(rcode == HARDWARE_FAILED) 3790 if(rcode == HARDWARE_FAILED)
3789 return (rcode); 3791 return rcode;
3790 else 3792 else
3791 return (smctr_send_rsp(dev, rmf, 3793 return smctr_send_rsp(dev, rmf,
3792 rcode, 3794 rcode,
3793 correlator)); 3795 correlator);
3794 } 3796 }
3795 3797
3796 if((err = smctr_send_tx_forward(dev, rmf, 3798 if((err = smctr_send_tx_forward(dev, rmf,
3797 &tx_fstatus)) == HARDWARE_FAILED) 3799 &tx_fstatus)) == HARDWARE_FAILED)
3798 { 3800 {
3799 return (err); 3801 return err;
3800 } 3802 }
3801 3803
3802 if(err == A_FRAME_WAS_FORWARDED) 3804 if(err == A_FRAME_WAS_FORWARDED)
@@ -3805,7 +3807,7 @@ static int smctr_process_rx_packet(MAC_HEADER *rmf, __u16 size,
3805 rmf, tx_fstatus)) 3807 rmf, tx_fstatus))
3806 == HARDWARE_FAILED) 3808 == HARDWARE_FAILED)
3807 { 3809 {
3808 return (err); 3810 return err;
3809 } 3811 }
3810 } 3812 }
3811 break; 3813 break;
@@ -3834,7 +3836,7 @@ static int smctr_process_rx_packet(MAC_HEADER *rmf, __u16 size,
3834 if((err = smctr_send_rsp(dev, rmf,rcode, 3836 if((err = smctr_send_rsp(dev, rmf,rcode,
3835 correlator))) 3837 correlator)))
3836 { 3838 {
3837 return (err); 3839 return err;
3838 } 3840 }
3839 } 3841 }
3840 3842
@@ -3899,7 +3901,7 @@ static int smctr_process_rx_packet(MAC_HEADER *rmf, __u16 size,
3899 err = 0; 3901 err = 0;
3900 } 3902 }
3901 3903
3902 return (err); 3904 return err;
3903} 3905}
3904 3906
3905/* Adapter RAM test. Incremental word ODD boundary data test. */ 3907/* Adapter RAM test. Incremental word ODD boundary data test. */
@@ -3942,7 +3944,7 @@ static int smctr_ram_memory_test(struct net_device *dev)
3942 err_offset = j; 3944 err_offset = j;
3943 err_word = word_read; 3945 err_word = word_read;
3944 err_pattern = word_pattern; 3946 err_pattern = word_pattern;
3945 return (RAM_TEST_FAILED); 3947 return RAM_TEST_FAILED;
3946 } 3948 }
3947 } 3949 }
3948 } 3950 }
@@ -3966,14 +3968,14 @@ static int smctr_ram_memory_test(struct net_device *dev)
3966 err_offset = j; 3968 err_offset = j;
3967 err_word = word_read; 3969 err_word = word_read;
3968 err_pattern = word_pattern; 3970 err_pattern = word_pattern;
3969 return (RAM_TEST_FAILED); 3971 return RAM_TEST_FAILED;
3970 } 3972 }
3971 } 3973 }
3972 } 3974 }
3973 3975
3974 smctr_set_page(dev, (__u8 *)tp->ram_access); 3976 smctr_set_page(dev, (__u8 *)tp->ram_access);
3975 3977
3976 return (0); 3978 return 0;
3977} 3979}
3978 3980
3979static int smctr_rcv_chg_param(struct net_device *dev, MAC_HEADER *rmf, 3981static int smctr_rcv_chg_param(struct net_device *dev, MAC_HEADER *rmf,
@@ -3986,7 +3988,7 @@ static int smctr_rcv_chg_param(struct net_device *dev, MAC_HEADER *rmf,
3986 3988
3987 /* This Frame can only come from a CRS */ 3989 /* This Frame can only come from a CRS */
3988 if((rmf->dc_sc & SC_MASK) != SC_CRS) 3990 if((rmf->dc_sc & SC_MASK) != SC_CRS)
3989 return(E_INAPPROPRIATE_SOURCE_CLASS); 3991 return E_INAPPROPRIATE_SOURCE_CLASS;
3990 3992
3991 /* Remove MVID Length from total length. */ 3993 /* Remove MVID Length from total length. */
3992 vlen = (signed short)rmf->vl - 4; 3994 vlen = (signed short)rmf->vl - 4;
@@ -4058,7 +4060,7 @@ static int smctr_rcv_chg_param(struct net_device *dev, MAC_HEADER *rmf,
4058 } 4060 }
4059 } 4061 }
4060 4062
4061 return (rcode); 4063 return rcode;
4062} 4064}
4063 4065
4064static int smctr_rcv_init(struct net_device *dev, MAC_HEADER *rmf, 4066static int smctr_rcv_init(struct net_device *dev, MAC_HEADER *rmf,
@@ -4071,7 +4073,7 @@ static int smctr_rcv_init(struct net_device *dev, MAC_HEADER *rmf,
4071 4073
4072 /* This Frame can only come from a RPS */ 4074 /* This Frame can only come from a RPS */
4073 if((rmf->dc_sc & SC_MASK) != SC_RPS) 4075 if((rmf->dc_sc & SC_MASK) != SC_RPS)
4074 return (E_INAPPROPRIATE_SOURCE_CLASS); 4076 return E_INAPPROPRIATE_SOURCE_CLASS;
4075 4077
4076 /* Remove MVID Length from total length. */ 4078 /* Remove MVID Length from total length. */
4077 vlen = (signed short)rmf->vl - 4; 4079 vlen = (signed short)rmf->vl - 4;
@@ -4133,7 +4135,7 @@ static int smctr_rcv_init(struct net_device *dev, MAC_HEADER *rmf,
4133 } 4135 }
4134 } 4136 }
4135 4137
4136 return (rcode); 4138 return rcode;
4137} 4139}
4138 4140
4139static int smctr_rcv_tx_forward(struct net_device *dev, MAC_HEADER *rmf) 4141static int smctr_rcv_tx_forward(struct net_device *dev, MAC_HEADER *rmf)
@@ -4145,7 +4147,7 @@ static int smctr_rcv_tx_forward(struct net_device *dev, MAC_HEADER *rmf)
4145 4147
4146 /* This Frame can only come from a CRS */ 4148 /* This Frame can only come from a CRS */
4147 if((rmf->dc_sc & SC_MASK) != SC_CRS) 4149 if((rmf->dc_sc & SC_MASK) != SC_CRS)
4148 return (E_INAPPROPRIATE_SOURCE_CLASS); 4150 return E_INAPPROPRIATE_SOURCE_CLASS;
4149 4151
4150 /* Remove MVID Length from total length */ 4152 /* Remove MVID Length from total length */
4151 vlen = (signed short)rmf->vl - 4; 4153 vlen = (signed short)rmf->vl - 4;
@@ -4193,7 +4195,7 @@ static int smctr_rcv_tx_forward(struct net_device *dev, MAC_HEADER *rmf)
4193 } 4195 }
4194 } 4196 }
4195 4197
4196 return (rcode); 4198 return rcode;
4197} 4199}
4198 4200
4199static int smctr_rcv_rq_addr_state_attch(struct net_device *dev, 4201static int smctr_rcv_rq_addr_state_attch(struct net_device *dev,
@@ -4250,7 +4252,7 @@ static int smctr_rcv_rq_addr_state_attch(struct net_device *dev,
4250 } 4252 }
4251 } 4253 }
4252 4254
4253 return (rcode); 4255 return rcode;
4254} 4256}
4255 4257
4256static int smctr_rcv_unknown(struct net_device *dev, MAC_HEADER *rmf, 4258static int smctr_rcv_unknown(struct net_device *dev, MAC_HEADER *rmf,
@@ -4284,7 +4286,7 @@ static int smctr_rcv_unknown(struct net_device *dev, MAC_HEADER *rmf,
4284 rsv = (MAC_SUB_VECTOR *)((__u32)rsv + rsv->svl); 4286 rsv = (MAC_SUB_VECTOR *)((__u32)rsv + rsv->svl);
4285 } 4287 }
4286 4288
4287 return (E_UNRECOGNIZED_VECTOR_ID); 4289 return E_UNRECOGNIZED_VECTOR_ID;
4288} 4290}
4289 4291
4290/* 4292/*
@@ -4311,7 +4313,7 @@ static int smctr_reset_adapter(struct net_device *dev)
4311 */ 4313 */
4312 outb(tp->trc_mask | CSR_CLRTINT | CSR_CLRCBUSY, ioaddr + CSR); 4314 outb(tp->trc_mask | CSR_CLRTINT | CSR_CLRCBUSY, ioaddr + CSR);
4313 4315
4314 return (0); 4316 return 0;
4315} 4317}
4316 4318
4317static int smctr_restart_tx_chain(struct net_device *dev, short queue) 4319static int smctr_restart_tx_chain(struct net_device *dev, short queue)
@@ -4329,7 +4331,7 @@ static int smctr_restart_tx_chain(struct net_device *dev, short queue)
4329 err = smctr_issue_resume_tx_fcb_cmd(dev, queue); 4331 err = smctr_issue_resume_tx_fcb_cmd(dev, queue);
4330 } 4332 }
4331 4333
4332 return (err); 4334 return err;
4333} 4335}
4334 4336
4335static int smctr_ring_status_chg(struct net_device *dev) 4337static int smctr_ring_status_chg(struct net_device *dev)
@@ -4371,7 +4373,7 @@ static int smctr_ring_status_chg(struct net_device *dev)
4371 } 4373 }
4372 4374
4373 if(!(tp->ring_status_flags & RING_STATUS_CHANGED)) 4375 if(!(tp->ring_status_flags & RING_STATUS_CHANGED))
4374 return (0); 4376 return 0;
4375 4377
4376 switch(tp->ring_status) 4378 switch(tp->ring_status)
4377 { 4379 {
@@ -4421,7 +4423,7 @@ static int smctr_ring_status_chg(struct net_device *dev)
4421 break; 4423 break;
4422 } 4424 }
4423 4425
4424 return (0); 4426 return 0;
4425} 4427}
4426 4428
4427static int smctr_rx_frame(struct net_device *dev) 4429static int smctr_rx_frame(struct net_device *dev)
@@ -4486,7 +4488,7 @@ static int smctr_rx_frame(struct net_device *dev)
4486 break; 4488 break;
4487 } 4489 }
4488 4490
4489 return (err); 4491 return err;
4490} 4492}
4491 4493
4492static int smctr_send_dat(struct net_device *dev) 4494static int smctr_send_dat(struct net_device *dev)
@@ -4502,7 +4504,7 @@ static int smctr_send_dat(struct net_device *dev)
4502 if((fcb = smctr_get_tx_fcb(dev, MAC_QUEUE, 4504 if((fcb = smctr_get_tx_fcb(dev, MAC_QUEUE,
4503 sizeof(MAC_HEADER))) == (FCBlock *)(-1L)) 4505 sizeof(MAC_HEADER))) == (FCBlock *)(-1L))
4504 { 4506 {
4505 return (OUT_OF_RESOURCES); 4507 return OUT_OF_RESOURCES;
4506 } 4508 }
4507 4509
4508 /* Initialize DAT Data Fields. */ 4510 /* Initialize DAT Data Fields. */
@@ -4524,7 +4526,7 @@ static int smctr_send_dat(struct net_device *dev)
4524 4526
4525 /* Start Transmit. */ 4527 /* Start Transmit. */
4526 if((err = smctr_trc_send_packet(dev, fcb, MAC_QUEUE))) 4528 if((err = smctr_trc_send_packet(dev, fcb, MAC_QUEUE)))
4527 return (err); 4529 return err;
4528 4530
4529 /* Wait for Transmit to Complete */ 4531 /* Wait for Transmit to Complete */
4530 for(i = 0; i < 10000; i++) 4532 for(i = 0; i < 10000; i++)
@@ -4538,7 +4540,7 @@ static int smctr_send_dat(struct net_device *dev)
4538 if(!(fcb->frame_status & FCB_COMMAND_DONE) || 4540 if(!(fcb->frame_status & FCB_COMMAND_DONE) ||
4539 fcb->frame_status & (FCB_TX_STATUS_E | FCB_TX_AC_BITS)) 4541 fcb->frame_status & (FCB_TX_STATUS_E | FCB_TX_AC_BITS))
4540 { 4542 {
4541 return (INITIALIZE_FAILED); 4543 return INITIALIZE_FAILED;
4542 } 4544 }
4543 4545
4544 /* De-allocated Tx FCB and Frame Buffer 4546 /* De-allocated Tx FCB and Frame Buffer
@@ -4549,7 +4551,7 @@ static int smctr_send_dat(struct net_device *dev)
4549 tp->tx_queue_status[MAC_QUEUE] = NOT_TRANSMITING; 4551 tp->tx_queue_status[MAC_QUEUE] = NOT_TRANSMITING;
4550 smctr_update_tx_chain(dev, fcb, MAC_QUEUE); 4552 smctr_update_tx_chain(dev, fcb, MAC_QUEUE);
4551 4553
4552 return (0); 4554 return 0;
4553} 4555}
4554 4556
4555static void smctr_timeout(struct net_device *dev) 4557static void smctr_timeout(struct net_device *dev)
@@ -4610,7 +4612,7 @@ static int smctr_send_lobe_media_test(struct net_device *dev)
4610 if((fcb = smctr_get_tx_fcb(dev, MAC_QUEUE, sizeof(struct trh_hdr) 4612 if((fcb = smctr_get_tx_fcb(dev, MAC_QUEUE, sizeof(struct trh_hdr)
4611 + S_WRAP_DATA + S_WRAP_DATA)) == (FCBlock *)(-1L)) 4613 + S_WRAP_DATA + S_WRAP_DATA)) == (FCBlock *)(-1L))
4612 { 4614 {
4613 return (OUT_OF_RESOURCES); 4615 return OUT_OF_RESOURCES;
4614 } 4616 }
4615 4617
4616 /* Initialize DAT Data Fields. */ 4618 /* Initialize DAT Data Fields. */
@@ -4639,7 +4641,7 @@ static int smctr_send_lobe_media_test(struct net_device *dev)
4639 /* Start Transmit. */ 4641 /* Start Transmit. */
4640 tmf->vl = SWAP_BYTES(tmf->vl); 4642 tmf->vl = SWAP_BYTES(tmf->vl);
4641 if((err = smctr_trc_send_packet(dev, fcb, MAC_QUEUE))) 4643 if((err = smctr_trc_send_packet(dev, fcb, MAC_QUEUE)))
4642 return (err); 4644 return err;
4643 4645
4644 /* Wait for Transmit to Complete. (10 ms). */ 4646 /* Wait for Transmit to Complete. (10 ms). */
4645 for(i=0; i < 10000; i++) 4647 for(i=0; i < 10000; i++)
@@ -4653,7 +4655,7 @@ static int smctr_send_lobe_media_test(struct net_device *dev)
4653 if(!(fcb->frame_status & FCB_COMMAND_DONE) || 4655 if(!(fcb->frame_status & FCB_COMMAND_DONE) ||
4654 fcb->frame_status & (FCB_TX_STATUS_E | FCB_TX_AC_BITS)) 4656 fcb->frame_status & (FCB_TX_STATUS_E | FCB_TX_AC_BITS))
4655 { 4657 {
4656 return (LOBE_MEDIA_TEST_FAILED); 4658 return LOBE_MEDIA_TEST_FAILED;
4657 } 4659 }
4658 4660
4659 /* De-allocated Tx FCB and Frame Buffer 4661 /* De-allocated Tx FCB and Frame Buffer
@@ -4664,7 +4666,7 @@ static int smctr_send_lobe_media_test(struct net_device *dev)
4664 tp->tx_queue_status[MAC_QUEUE] = NOT_TRANSMITING; 4666 tp->tx_queue_status[MAC_QUEUE] = NOT_TRANSMITING;
4665 smctr_update_tx_chain(dev, fcb, MAC_QUEUE); 4667 smctr_update_tx_chain(dev, fcb, MAC_QUEUE);
4666 4668
4667 return (0); 4669 return 0;
4668} 4670}
4669 4671
4670static int smctr_send_rpt_addr(struct net_device *dev, MAC_HEADER *rmf, 4672static int smctr_send_rpt_addr(struct net_device *dev, MAC_HEADER *rmf,
@@ -4679,7 +4681,7 @@ static int smctr_send_rpt_addr(struct net_device *dev, MAC_HEADER *rmf,
4679 + S_ADDRESS_MODIFER + S_GROUP_ADDRESS + S_FUNCTIONAL_ADDRESS)) 4681 + S_ADDRESS_MODIFER + S_GROUP_ADDRESS + S_FUNCTIONAL_ADDRESS))
4680 == (FCBlock *)(-1L)) 4682 == (FCBlock *)(-1L))
4681 { 4683 {
4682 return (0); 4684 return 0;
4683 } 4685 }
4684 4686
4685 tmf = (MAC_HEADER *)fcb->bdb_ptr->data_block_ptr; 4687 tmf = (MAC_HEADER *)fcb->bdb_ptr->data_block_ptr;
@@ -4722,7 +4724,7 @@ static int smctr_send_rpt_addr(struct net_device *dev, MAC_HEADER *rmf,
4722*/ 4724*/
4723 tmf->vl = SWAP_BYTES(tmf->vl); 4725 tmf->vl = SWAP_BYTES(tmf->vl);
4724 4726
4725 return (smctr_trc_send_packet(dev, fcb, MAC_QUEUE)); 4727 return smctr_trc_send_packet(dev, fcb, MAC_QUEUE);
4726} 4728}
4727 4729
4728static int smctr_send_rpt_attch(struct net_device *dev, MAC_HEADER *rmf, 4730static int smctr_send_rpt_attch(struct net_device *dev, MAC_HEADER *rmf,
@@ -4737,7 +4739,7 @@ static int smctr_send_rpt_attch(struct net_device *dev, MAC_HEADER *rmf,
4737 + S_AUTHORIZED_FUNCTION_CLASS + S_AUTHORIZED_ACCESS_PRIORITY)) 4739 + S_AUTHORIZED_FUNCTION_CLASS + S_AUTHORIZED_ACCESS_PRIORITY))
4738 == (FCBlock *)(-1L)) 4740 == (FCBlock *)(-1L))
4739 { 4741 {
4740 return (0); 4742 return 0;
4741 } 4743 }
4742 4744
4743 tmf = (MAC_HEADER *)fcb->bdb_ptr->data_block_ptr; 4745 tmf = (MAC_HEADER *)fcb->bdb_ptr->data_block_ptr;
@@ -4776,7 +4778,7 @@ static int smctr_send_rpt_attch(struct net_device *dev, MAC_HEADER *rmf,
4776*/ 4778*/
4777 tmf->vl = SWAP_BYTES(tmf->vl); 4779 tmf->vl = SWAP_BYTES(tmf->vl);
4778 4780
4779 return (smctr_trc_send_packet(dev, fcb, MAC_QUEUE)); 4781 return smctr_trc_send_packet(dev, fcb, MAC_QUEUE);
4780} 4782}
4781 4783
4782static int smctr_send_rpt_state(struct net_device *dev, MAC_HEADER *rmf, 4784static int smctr_send_rpt_state(struct net_device *dev, MAC_HEADER *rmf,
@@ -4791,7 +4793,7 @@ static int smctr_send_rpt_state(struct net_device *dev, MAC_HEADER *rmf,
4791 + S_RING_STATION_STATUS + S_STATION_IDENTIFER)) 4793 + S_RING_STATION_STATUS + S_STATION_IDENTIFER))
4792 == (FCBlock *)(-1L)) 4794 == (FCBlock *)(-1L))
4793 { 4795 {
4794 return (0); 4796 return 0;
4795 } 4797 }
4796 4798
4797 tmf = (MAC_HEADER *)fcb->bdb_ptr->data_block_ptr; 4799 tmf = (MAC_HEADER *)fcb->bdb_ptr->data_block_ptr;
@@ -4826,7 +4828,7 @@ static int smctr_send_rpt_state(struct net_device *dev, MAC_HEADER *rmf,
4826*/ 4828*/
4827 tmf->vl = SWAP_BYTES(tmf->vl); 4829 tmf->vl = SWAP_BYTES(tmf->vl);
4828 4830
4829 return (smctr_trc_send_packet(dev, fcb, MAC_QUEUE)); 4831 return smctr_trc_send_packet(dev, fcb, MAC_QUEUE);
4830} 4832}
4831 4833
4832static int smctr_send_rpt_tx_forward(struct net_device *dev, 4834static int smctr_send_rpt_tx_forward(struct net_device *dev,
@@ -4839,7 +4841,7 @@ static int smctr_send_rpt_tx_forward(struct net_device *dev,
4839 if((fcb = smctr_get_tx_fcb(dev, MAC_QUEUE, sizeof(MAC_HEADER) 4841 if((fcb = smctr_get_tx_fcb(dev, MAC_QUEUE, sizeof(MAC_HEADER)
4840 + S_TRANSMIT_STATUS_CODE)) == (FCBlock *)(-1L)) 4842 + S_TRANSMIT_STATUS_CODE)) == (FCBlock *)(-1L))
4841 { 4843 {
4842 return (0); 4844 return 0;
4843 } 4845 }
4844 4846
4845 tmf = (MAC_HEADER *)fcb->bdb_ptr->data_block_ptr; 4847 tmf = (MAC_HEADER *)fcb->bdb_ptr->data_block_ptr;
@@ -4862,7 +4864,7 @@ static int smctr_send_rpt_tx_forward(struct net_device *dev,
4862*/ 4864*/
4863 tmf->vl = SWAP_BYTES(tmf->vl); 4865 tmf->vl = SWAP_BYTES(tmf->vl);
4864 4866
4865 return(smctr_trc_send_packet(dev, fcb, MAC_QUEUE)); 4867 return smctr_trc_send_packet(dev, fcb, MAC_QUEUE);
4866} 4868}
4867 4869
4868static int smctr_send_rsp(struct net_device *dev, MAC_HEADER *rmf, 4870static int smctr_send_rsp(struct net_device *dev, MAC_HEADER *rmf,
@@ -4875,7 +4877,7 @@ static int smctr_send_rsp(struct net_device *dev, MAC_HEADER *rmf,
4875 if((fcb = smctr_get_tx_fcb(dev, MAC_QUEUE, sizeof(MAC_HEADER) 4877 if((fcb = smctr_get_tx_fcb(dev, MAC_QUEUE, sizeof(MAC_HEADER)
4876 + S_CORRELATOR + S_RESPONSE_CODE)) == (FCBlock *)(-1L)) 4878 + S_CORRELATOR + S_RESPONSE_CODE)) == (FCBlock *)(-1L))
4877 { 4879 {
4878 return (0); 4880 return 0;
4879 } 4881 }
4880 4882
4881 tmf = (MAC_HEADER *)fcb->bdb_ptr->data_block_ptr; 4883 tmf = (MAC_HEADER *)fcb->bdb_ptr->data_block_ptr;
@@ -4888,7 +4890,7 @@ static int smctr_send_rsp(struct net_device *dev, MAC_HEADER *rmf,
4888 tsv = (MAC_SUB_VECTOR *)((__u32)tmf + sizeof(MAC_HEADER)); 4890 tsv = (MAC_SUB_VECTOR *)((__u32)tmf + sizeof(MAC_HEADER));
4889 smctr_make_corr(dev, tsv, correlator); 4891 smctr_make_corr(dev, tsv, correlator);
4890 4892
4891 return (0); 4893 return 0;
4892} 4894}
4893 4895
4894static int smctr_send_rq_init(struct net_device *dev) 4896static int smctr_send_rq_init(struct net_device *dev)
@@ -4907,7 +4909,7 @@ static int smctr_send_rq_init(struct net_device *dev)
4907 + S_RING_STATION_VERSION_NUMBER + S_ADDRESS_MODIFER)) 4909 + S_RING_STATION_VERSION_NUMBER + S_ADDRESS_MODIFER))
4908 == (FCBlock *)(-1L))) 4910 == (FCBlock *)(-1L)))
4909 { 4911 {
4910 return (0); 4912 return 0;
4911 } 4913 }
4912 4914
4913 tmf = (MAC_HEADER *)fcb->bdb_ptr->data_block_ptr; 4915 tmf = (MAC_HEADER *)fcb->bdb_ptr->data_block_ptr;
@@ -4943,7 +4945,7 @@ static int smctr_send_rq_init(struct net_device *dev)
4943 tmf->vl = SWAP_BYTES(tmf->vl); 4945 tmf->vl = SWAP_BYTES(tmf->vl);
4944 4946
4945 if((err = smctr_trc_send_packet(dev, fcb, MAC_QUEUE))) 4947 if((err = smctr_trc_send_packet(dev, fcb, MAC_QUEUE)))
4946 return (err); 4948 return err;
4947 4949
4948 /* Wait for Transmit to Complete */ 4950 /* Wait for Transmit to Complete */
4949 for(i = 0; i < 10000; i++) 4951 for(i = 0; i < 10000; i++)
@@ -4957,7 +4959,7 @@ static int smctr_send_rq_init(struct net_device *dev)
4957 fstatus = fcb->frame_status; 4959 fstatus = fcb->frame_status;
4958 4960
4959 if(!(fstatus & FCB_COMMAND_DONE)) 4961 if(!(fstatus & FCB_COMMAND_DONE))
4960 return (HARDWARE_FAILED); 4962 return HARDWARE_FAILED;
4961 4963
4962 if(!(fstatus & FCB_TX_STATUS_E)) 4964 if(!(fstatus & FCB_TX_STATUS_E))
4963 count++; 4965 count++;
@@ -4971,7 +4973,7 @@ static int smctr_send_rq_init(struct net_device *dev)
4971 smctr_update_tx_chain(dev, fcb, MAC_QUEUE); 4973 smctr_update_tx_chain(dev, fcb, MAC_QUEUE);
4972 } while(count < 4 && ((fstatus & FCB_TX_AC_BITS) ^ FCB_TX_AC_BITS)); 4974 } while(count < 4 && ((fstatus & FCB_TX_AC_BITS) ^ FCB_TX_AC_BITS));
4973 4975
4974 return (smctr_join_complete_state(dev)); 4976 return smctr_join_complete_state(dev);
4975} 4977}
4976 4978
4977static int smctr_send_tx_forward(struct net_device *dev, MAC_HEADER *rmf, 4979static int smctr_send_tx_forward(struct net_device *dev, MAC_HEADER *rmf,
@@ -4984,13 +4986,13 @@ static int smctr_send_tx_forward(struct net_device *dev, MAC_HEADER *rmf,
4984 4986
4985 /* Check if this is the END POINT of the Transmit Forward Chain. */ 4987 /* Check if this is the END POINT of the Transmit Forward Chain. */
4986 if(rmf->vl <= 18) 4988 if(rmf->vl <= 18)
4987 return (0); 4989 return 0;
4988 4990
4989 /* Allocate Transmit FCB only by requesting 0 bytes 4991 /* Allocate Transmit FCB only by requesting 0 bytes
4990 * of data buffer. 4992 * of data buffer.
4991 */ 4993 */
4992 if((fcb = smctr_get_tx_fcb(dev, MAC_QUEUE, 0)) == (FCBlock *)(-1L)) 4994 if((fcb = smctr_get_tx_fcb(dev, MAC_QUEUE, 0)) == (FCBlock *)(-1L))
4993 return (0); 4995 return 0;
4994 4996
4995 /* Set pointer to Transmit Frame Buffer to the data 4997 /* Set pointer to Transmit Frame Buffer to the data
4996 * portion of the received TX Forward frame, making 4998 * portion of the received TX Forward frame, making
@@ -5006,7 +5008,7 @@ static int smctr_send_tx_forward(struct net_device *dev, MAC_HEADER *rmf,
5006 fcb->bdb_ptr->buffer_length = rmf->vl - 4 - 2; 5008 fcb->bdb_ptr->buffer_length = rmf->vl - 4 - 2;
5007 5009
5008 if((err = smctr_trc_send_packet(dev, fcb, MAC_QUEUE))) 5010 if((err = smctr_trc_send_packet(dev, fcb, MAC_QUEUE)))
5009 return (err); 5011 return err;
5010 5012
5011 /* Wait for Transmit to Complete */ 5013 /* Wait for Transmit to Complete */
5012 for(i = 0; i < 10000; i++) 5014 for(i = 0; i < 10000; i++)
@@ -5020,7 +5022,7 @@ static int smctr_send_tx_forward(struct net_device *dev, MAC_HEADER *rmf,
5020 if(!(fcb->frame_status & FCB_COMMAND_DONE)) 5022 if(!(fcb->frame_status & FCB_COMMAND_DONE))
5021 { 5023 {
5022 if((err = smctr_issue_resume_tx_fcb_cmd(dev, MAC_QUEUE))) 5024 if((err = smctr_issue_resume_tx_fcb_cmd(dev, MAC_QUEUE)))
5023 return (err); 5025 return err;
5024 5026
5025 for(i = 0; i < 10000; i++) 5027 for(i = 0; i < 10000; i++)
5026 { 5028 {
@@ -5030,12 +5032,12 @@ static int smctr_send_tx_forward(struct net_device *dev, MAC_HEADER *rmf,
5030 } 5032 }
5031 5033
5032 if(!(fcb->frame_status & FCB_COMMAND_DONE)) 5034 if(!(fcb->frame_status & FCB_COMMAND_DONE))
5033 return (HARDWARE_FAILED); 5035 return HARDWARE_FAILED;
5034 } 5036 }
5035 5037
5036 *tx_fstatus = fcb->frame_status; 5038 *tx_fstatus = fcb->frame_status;
5037 5039
5038 return (A_FRAME_WAS_FORWARDED); 5040 return A_FRAME_WAS_FORWARDED;
5039} 5041}
5040 5042
5041static int smctr_set_auth_access_pri(struct net_device *dev, 5043static int smctr_set_auth_access_pri(struct net_device *dev,
@@ -5044,11 +5046,11 @@ static int smctr_set_auth_access_pri(struct net_device *dev,
5044 struct net_local *tp = netdev_priv(dev); 5046 struct net_local *tp = netdev_priv(dev);
5045 5047
5046 if(rsv->svl != S_AUTHORIZED_ACCESS_PRIORITY) 5048 if(rsv->svl != S_AUTHORIZED_ACCESS_PRIORITY)
5047 return (E_SUB_VECTOR_LENGTH_ERROR); 5049 return E_SUB_VECTOR_LENGTH_ERROR;
5048 5050
5049 tp->authorized_access_priority = (rsv->svv[0] << 8 | rsv->svv[1]); 5051 tp->authorized_access_priority = (rsv->svv[0] << 8 | rsv->svv[1]);
5050 5052
5051 return (POSITIVE_ACK); 5053 return POSITIVE_ACK;
5052} 5054}
5053 5055
5054static int smctr_set_auth_funct_class(struct net_device *dev, 5056static int smctr_set_auth_funct_class(struct net_device *dev,
@@ -5057,22 +5059,22 @@ static int smctr_set_auth_funct_class(struct net_device *dev,
5057 struct net_local *tp = netdev_priv(dev); 5059 struct net_local *tp = netdev_priv(dev);
5058 5060
5059 if(rsv->svl != S_AUTHORIZED_FUNCTION_CLASS) 5061 if(rsv->svl != S_AUTHORIZED_FUNCTION_CLASS)
5060 return (E_SUB_VECTOR_LENGTH_ERROR); 5062 return E_SUB_VECTOR_LENGTH_ERROR;
5061 5063
5062 tp->authorized_function_classes = (rsv->svv[0] << 8 | rsv->svv[1]); 5064 tp->authorized_function_classes = (rsv->svv[0] << 8 | rsv->svv[1]);
5063 5065
5064 return (POSITIVE_ACK); 5066 return POSITIVE_ACK;
5065} 5067}
5066 5068
5067static int smctr_set_corr(struct net_device *dev, MAC_SUB_VECTOR *rsv, 5069static int smctr_set_corr(struct net_device *dev, MAC_SUB_VECTOR *rsv,
5068 __u16 *correlator) 5070 __u16 *correlator)
5069{ 5071{
5070 if(rsv->svl != S_CORRELATOR) 5072 if(rsv->svl != S_CORRELATOR)
5071 return (E_SUB_VECTOR_LENGTH_ERROR); 5073 return E_SUB_VECTOR_LENGTH_ERROR;
5072 5074
5073 *correlator = (rsv->svv[0] << 8 | rsv->svv[1]); 5075 *correlator = (rsv->svv[0] << 8 | rsv->svv[1]);
5074 5076
5075 return (POSITIVE_ACK); 5077 return POSITIVE_ACK;
5076} 5078}
5077 5079
5078static int smctr_set_error_timer_value(struct net_device *dev, 5080static int smctr_set_error_timer_value(struct net_device *dev,
@@ -5082,34 +5084,34 @@ static int smctr_set_error_timer_value(struct net_device *dev,
5082 int err; 5084 int err;
5083 5085
5084 if(rsv->svl != S_ERROR_TIMER_VALUE) 5086 if(rsv->svl != S_ERROR_TIMER_VALUE)
5085 return (E_SUB_VECTOR_LENGTH_ERROR); 5087 return E_SUB_VECTOR_LENGTH_ERROR;
5086 5088
5087 err_tval = (rsv->svv[0] << 8 | rsv->svv[1])*10; 5089 err_tval = (rsv->svv[0] << 8 | rsv->svv[1])*10;
5088 5090
5089 smctr_issue_write_word_cmd(dev, RW_TER_THRESHOLD, &err_tval); 5091 smctr_issue_write_word_cmd(dev, RW_TER_THRESHOLD, &err_tval);
5090 5092
5091 if((err = smctr_wait_cmd(dev))) 5093 if((err = smctr_wait_cmd(dev)))
5092 return (err); 5094 return err;
5093 5095
5094 return (POSITIVE_ACK); 5096 return POSITIVE_ACK;
5095} 5097}
5096 5098
5097static int smctr_set_frame_forward(struct net_device *dev, 5099static int smctr_set_frame_forward(struct net_device *dev,
5098 MAC_SUB_VECTOR *rsv, __u8 dc_sc) 5100 MAC_SUB_VECTOR *rsv, __u8 dc_sc)
5099{ 5101{
5100 if((rsv->svl < 2) || (rsv->svl > S_FRAME_FORWARD)) 5102 if((rsv->svl < 2) || (rsv->svl > S_FRAME_FORWARD))
5101 return (E_SUB_VECTOR_LENGTH_ERROR); 5103 return E_SUB_VECTOR_LENGTH_ERROR;
5102 5104
5103 if((dc_sc & DC_MASK) != DC_CRS) 5105 if((dc_sc & DC_MASK) != DC_CRS)
5104 { 5106 {
5105 if(rsv->svl >= 2 && rsv->svl < 20) 5107 if(rsv->svl >= 2 && rsv->svl < 20)
5106 return (E_TRANSMIT_FORWARD_INVALID); 5108 return E_TRANSMIT_FORWARD_INVALID;
5107 5109
5108 if((rsv->svv[0] != 0) || (rsv->svv[1] != 0)) 5110 if((rsv->svv[0] != 0) || (rsv->svv[1] != 0))
5109 return (E_TRANSMIT_FORWARD_INVALID); 5111 return E_TRANSMIT_FORWARD_INVALID;
5110 } 5112 }
5111 5113
5112 return (POSITIVE_ACK); 5114 return POSITIVE_ACK;
5113} 5115}
5114 5116
5115static int smctr_set_local_ring_num(struct net_device *dev, 5117static int smctr_set_local_ring_num(struct net_device *dev,
@@ -5118,13 +5120,13 @@ static int smctr_set_local_ring_num(struct net_device *dev,
5118 struct net_local *tp = netdev_priv(dev); 5120 struct net_local *tp = netdev_priv(dev);
5119 5121
5120 if(rsv->svl != S_LOCAL_RING_NUMBER) 5122 if(rsv->svl != S_LOCAL_RING_NUMBER)
5121 return (E_SUB_VECTOR_LENGTH_ERROR); 5123 return E_SUB_VECTOR_LENGTH_ERROR;
5122 5124
5123 if(tp->ptr_local_ring_num) 5125 if(tp->ptr_local_ring_num)
5124 *(__u16 *)(tp->ptr_local_ring_num) 5126 *(__u16 *)(tp->ptr_local_ring_num)
5125 = (rsv->svv[0] << 8 | rsv->svv[1]); 5127 = (rsv->svv[0] << 8 | rsv->svv[1]);
5126 5128
5127 return (POSITIVE_ACK); 5129 return POSITIVE_ACK;
5128} 5130}
5129 5131
5130static unsigned short smctr_set_ctrl_attention(struct net_device *dev) 5132static unsigned short smctr_set_ctrl_attention(struct net_device *dev)
@@ -5140,7 +5142,7 @@ static unsigned short smctr_set_ctrl_attention(struct net_device *dev)
5140 outb(tp->trc_mask, ioaddr + CSR); 5142 outb(tp->trc_mask, ioaddr + CSR);
5141 } 5143 }
5142 5144
5143 return (0); 5145 return 0;
5144} 5146}
5145 5147
5146static void smctr_set_multicast_list(struct net_device *dev) 5148static void smctr_set_multicast_list(struct net_device *dev)
@@ -5159,7 +5161,7 @@ static int smctr_set_page(struct net_device *dev, __u8 *buf)
5159 amask = (__u8)((tptr & PR_PAGE_MASK) >> 8); 5161 amask = (__u8)((tptr & PR_PAGE_MASK) >> 8);
5160 outb(amask, dev->base_addr + PR); 5162 outb(amask, dev->base_addr + PR);
5161 5163
5162 return (0); 5164 return 0;
5163} 5165}
5164 5166
5165static int smctr_set_phy_drop(struct net_device *dev, MAC_SUB_VECTOR *rsv) 5167static int smctr_set_phy_drop(struct net_device *dev, MAC_SUB_VECTOR *rsv)
@@ -5167,13 +5169,13 @@ static int smctr_set_phy_drop(struct net_device *dev, MAC_SUB_VECTOR *rsv)
5167 int err; 5169 int err;
5168 5170
5169 if(rsv->svl != S_PHYSICAL_DROP) 5171 if(rsv->svl != S_PHYSICAL_DROP)
5170 return (E_SUB_VECTOR_LENGTH_ERROR); 5172 return E_SUB_VECTOR_LENGTH_ERROR;
5171 5173
5172 smctr_issue_write_byte_cmd(dev, RW_PHYSICAL_DROP_NUMBER, &rsv->svv[0]); 5174 smctr_issue_write_byte_cmd(dev, RW_PHYSICAL_DROP_NUMBER, &rsv->svv[0]);
5173 if((err = smctr_wait_cmd(dev))) 5175 if((err = smctr_wait_cmd(dev)))
5174 return (err); 5176 return err;
5175 5177
5176 return (POSITIVE_ACK); 5178 return POSITIVE_ACK;
5177} 5179}
5178 5180
5179/* Reset the ring speed to the opposite of what it was. This auto-pilot 5181/* Reset the ring speed to the opposite of what it was. This auto-pilot
@@ -5195,16 +5197,16 @@ static int smctr_set_ring_speed(struct net_device *dev)
5195 smctr_reset_adapter(dev); 5197 smctr_reset_adapter(dev);
5196 5198
5197 if((err = smctr_init_card_real(dev))) 5199 if((err = smctr_init_card_real(dev)))
5198 return (err); 5200 return err;
5199 5201
5200 smctr_enable_bic_int(dev); 5202 smctr_enable_bic_int(dev);
5201 5203
5202 if((err = smctr_issue_enable_int_cmd(dev, TRC_INTERRUPT_ENABLE_MASK))) 5204 if((err = smctr_issue_enable_int_cmd(dev, TRC_INTERRUPT_ENABLE_MASK)))
5203 return (err); 5205 return err;
5204 5206
5205 smctr_disable_16bit(dev); 5207 smctr_disable_16bit(dev);
5206 5208
5207 return (0); 5209 return 0;
5208} 5210}
5209 5211
5210static int smctr_set_rx_look_ahead(struct net_device *dev) 5212static int smctr_set_rx_look_ahead(struct net_device *dev)
@@ -5233,7 +5235,7 @@ static int smctr_set_rx_look_ahead(struct net_device *dev)
5233 *((__u16 *)(tp->ram_access)) = sword; 5235 *((__u16 *)(tp->ram_access)) = sword;
5234 } 5236 }
5235 5237
5236 return (0); 5238 return 0;
5237} 5239}
5238 5240
5239static int smctr_set_trc_reset(int ioaddr) 5241static int smctr_set_trc_reset(int ioaddr)
@@ -5243,7 +5245,7 @@ static int smctr_set_trc_reset(int ioaddr)
5243 r = inb(ioaddr + MSR); 5245 r = inb(ioaddr + MSR);
5244 outb(MSR_RST | r, ioaddr + MSR); 5246 outb(MSR_RST | r, ioaddr + MSR);
5245 5247
5246 return (0); 5248 return 0;
5247} 5249}
5248 5250
5249/* 5251/*
@@ -5259,10 +5261,10 @@ static int smctr_setup_single_cmd(struct net_device *dev,
5259 printk(KERN_DEBUG "%s: smctr_setup_single_cmd\n", dev->name); 5261 printk(KERN_DEBUG "%s: smctr_setup_single_cmd\n", dev->name);
5260 5262
5261 if((err = smctr_wait_while_cbusy(dev))) 5263 if((err = smctr_wait_while_cbusy(dev)))
5262 return (err); 5264 return err;
5263 5265
5264 if((err = (unsigned int)smctr_wait_cmd(dev))) 5266 if((err = (unsigned int)smctr_wait_cmd(dev)))
5265 return (err); 5267 return err;
5266 5268
5267 tp->acb_head->cmd_done_status = 0; 5269 tp->acb_head->cmd_done_status = 0;
5268 tp->acb_head->cmd = command; 5270 tp->acb_head->cmd = command;
@@ -5270,7 +5272,7 @@ static int smctr_setup_single_cmd(struct net_device *dev,
5270 5272
5271 err = smctr_issue_resume_acb_cmd(dev); 5273 err = smctr_issue_resume_acb_cmd(dev);
5272 5274
5273 return (err); 5275 return err;
5274} 5276}
5275 5277
5276/* 5278/*
@@ -5287,7 +5289,7 @@ static int smctr_setup_single_cmd_w_data(struct net_device *dev,
5287 tp->acb_head->data_offset_lo 5289 tp->acb_head->data_offset_lo
5288 = (__u16)TRC_POINTER(tp->misc_command_data); 5290 = (__u16)TRC_POINTER(tp->misc_command_data);
5289 5291
5290 return(smctr_issue_resume_acb_cmd(dev)); 5292 return smctr_issue_resume_acb_cmd(dev);
5291} 5293}
5292 5294
5293static char *smctr_malloc(struct net_device *dev, __u16 size) 5295static char *smctr_malloc(struct net_device *dev, __u16 size)
@@ -5298,7 +5300,7 @@ static char *smctr_malloc(struct net_device *dev, __u16 size)
5298 m = (char *)(tp->ram_access + tp->sh_mem_used); 5300 m = (char *)(tp->ram_access + tp->sh_mem_used);
5299 tp->sh_mem_used += (__u32)size; 5301 tp->sh_mem_used += (__u32)size;
5300 5302
5301 return (m); 5303 return m;
5302} 5304}
5303 5305
5304static int smctr_status_chg(struct net_device *dev) 5306static int smctr_status_chg(struct net_device *dev)
@@ -5333,7 +5335,7 @@ static int smctr_status_chg(struct net_device *dev)
5333 break; 5335 break;
5334 } 5336 }
5335 5337
5336 return (0); 5338 return 0;
5337} 5339}
5338 5340
5339static int smctr_trc_send_packet(struct net_device *dev, FCBlock *fcb, 5341static int smctr_trc_send_packet(struct net_device *dev, FCBlock *fcb,
@@ -5355,7 +5357,7 @@ static int smctr_trc_send_packet(struct net_device *dev, FCBlock *fcb,
5355 err = smctr_issue_resume_tx_fcb_cmd(dev, queue); 5357 err = smctr_issue_resume_tx_fcb_cmd(dev, queue);
5356 } 5358 }
5357 5359
5358 return (err); 5360 return err;
5359} 5361}
5360 5362
5361static __u16 smctr_tx_complete(struct net_device *dev, __u16 queue) 5363static __u16 smctr_tx_complete(struct net_device *dev, __u16 queue)
@@ -5409,7 +5411,7 @@ static __u16 smctr_tx_complete(struct net_device *dev, __u16 queue)
5409 break; 5411 break;
5410 } 5412 }
5411 5413
5412 return (err); 5414 return err;
5413} 5415}
5414 5416
5415static unsigned short smctr_tx_move_frame(struct net_device *dev, 5417static unsigned short smctr_tx_move_frame(struct net_device *dev,
@@ -5450,7 +5452,7 @@ static unsigned short smctr_tx_move_frame(struct net_device *dev,
5450 pbuff += len; 5452 pbuff += len;
5451 } 5453 }
5452 5454
5453 return (0); 5455 return 0;
5454} 5456}
5455 5457
5456/* Update the error statistic counters for this adapter. */ 5458/* Update the error statistic counters for this adapter. */
@@ -5493,7 +5495,7 @@ static int smctr_update_err_stats(struct net_device *dev)
5493 if(tstat->token_errors) 5495 if(tstat->token_errors)
5494 tstat->token_errors += *(tp->misc_command_data + 5) >> 8; 5496 tstat->token_errors += *(tp->misc_command_data + 5) >> 8;
5495 5497
5496 return (0); 5498 return 0;
5497} 5499}
5498 5500
5499static int smctr_update_rx_chain(struct net_device *dev, __u16 queue) 5501static int smctr_update_rx_chain(struct net_device *dev, __u16 queue)
@@ -5530,7 +5532,7 @@ static int smctr_update_rx_chain(struct net_device *dev, __u16 queue)
5530 tp->rx_bdb_curr[queue]->back_ptr->info = BDB_NOT_CHAIN_END; 5532 tp->rx_bdb_curr[queue]->back_ptr->info = BDB_NOT_CHAIN_END;
5531 tp->rx_bdb_curr[queue] = bdb; 5533 tp->rx_bdb_curr[queue] = bdb;
5532 5534
5533 return (0); 5535 return 0;
5534} 5536}
5535 5537
5536static int smctr_update_tx_chain(struct net_device *dev, FCBlock *fcb, 5538static int smctr_update_tx_chain(struct net_device *dev, FCBlock *fcb,
@@ -5542,13 +5544,13 @@ static int smctr_update_tx_chain(struct net_device *dev, FCBlock *fcb,
5542 printk(KERN_DEBUG "smctr_update_tx_chain\n"); 5544 printk(KERN_DEBUG "smctr_update_tx_chain\n");
5543 5545
5544 if(tp->num_tx_fcbs_used[queue] <= 0) 5546 if(tp->num_tx_fcbs_used[queue] <= 0)
5545 return (HARDWARE_FAILED); 5547 return HARDWARE_FAILED;
5546 else 5548 else
5547 { 5549 {
5548 if(tp->tx_buff_used[queue] < fcb->memory_alloc) 5550 if(tp->tx_buff_used[queue] < fcb->memory_alloc)
5549 { 5551 {
5550 tp->tx_buff_used[queue] = 0; 5552 tp->tx_buff_used[queue] = 0;
5551 return (HARDWARE_FAILED); 5553 return HARDWARE_FAILED;
5552 } 5554 }
5553 5555
5554 tp->tx_buff_used[queue] -= fcb->memory_alloc; 5556 tp->tx_buff_used[queue] -= fcb->memory_alloc;
@@ -5566,7 +5568,7 @@ static int smctr_update_tx_chain(struct net_device *dev, FCBlock *fcb,
5566 fcb->frame_status = 0; 5568 fcb->frame_status = 0;
5567 tp->tx_fcb_end[queue] = fcb->next_ptr; 5569 tp->tx_fcb_end[queue] = fcb->next_ptr;
5568 netif_wake_queue(dev); 5570 netif_wake_queue(dev);
5569 return (0); 5571 return 0;
5570 } 5572 }
5571} 5573}
5572 5574
@@ -5587,12 +5589,12 @@ static int smctr_wait_cmd(struct net_device *dev)
5587 } 5589 }
5588 5590
5589 if(loop_count == 0) 5591 if(loop_count == 0)
5590 return(HARDWARE_FAILED); 5592 return HARDWARE_FAILED;
5591 5593
5592 if(tp->acb_head->cmd_done_status & 0xff) 5594 if(tp->acb_head->cmd_done_status & 0xff)
5593 return(HARDWARE_FAILED); 5595 return HARDWARE_FAILED;
5594 5596
5595 return (0); 5597 return 0;
5596} 5598}
5597 5599
5598static int smctr_wait_while_cbusy(struct net_device *dev) 5600static int smctr_wait_while_cbusy(struct net_device *dev)
@@ -5624,9 +5626,9 @@ static int smctr_wait_while_cbusy(struct net_device *dev)
5624 } 5626 }
5625 5627
5626 if(timeout) 5628 if(timeout)
5627 return (0); 5629 return 0;
5628 else 5630 else
5629 return (HARDWARE_FAILED); 5631 return HARDWARE_FAILED;
5630} 5632}
5631 5633
5632#ifdef MODULE 5634#ifdef MODULE
diff --git a/drivers/net/tokenring/tms380tr.c b/drivers/net/tokenring/tms380tr.c
index 08182fde3dcd..c83f4f6e39e1 100644
--- a/drivers/net/tokenring/tms380tr.c
+++ b/drivers/net/tokenring/tms380tr.c
@@ -224,7 +224,7 @@ static int madgemc_sifprobe(struct net_device *dev)
224 chk2 ^= 0x0FE; 224 chk2 ^= 0x0FE;
225 225
226 if(chk1 != chk2) 226 if(chk1 != chk2)
227 return (-1); /* No adapter */ 227 return -1; /* No adapter */
228 chk1 -= 2; 228 chk1 -= 2;
229 } while(chk1 != 0); /* Repeat 128 times (all byte values) */ 229 } while(chk1 != 0); /* Repeat 128 times (all byte values) */
230 230
@@ -232,7 +232,7 @@ static int madgemc_sifprobe(struct net_device *dev)
232 /* Restore the SIFADR value */ 232 /* Restore the SIFADR value */
233 SIFWRITEB(old, SIFADR); 233 SIFWRITEB(old, SIFADR);
234 234
235 return (0); 235 return 0;
236} 236}
237#endif 237#endif
238 238
@@ -271,7 +271,7 @@ int tms380tr_open(struct net_device *dev)
271 { 271 {
272 printk(KERN_INFO "%s: Chipset initialization error\n", 272 printk(KERN_INFO "%s: Chipset initialization error\n",
273 dev->name); 273 dev->name);
274 return (-1); 274 return -1;
275 } 275 }
276 276
277 tp->timer.expires = jiffies + 30*HZ; 277 tp->timer.expires = jiffies + 30*HZ;
@@ -298,7 +298,7 @@ int tms380tr_open(struct net_device *dev)
298 if(tp->AdapterVirtOpenFlag == 0) 298 if(tp->AdapterVirtOpenFlag == 0)
299 { 299 {
300 tms380tr_disable_interrupts(dev); 300 tms380tr_disable_interrupts(dev);
301 return (-1); 301 return -1;
302 } 302 }
303 303
304 tp->StartTime = jiffies; 304 tp->StartTime = jiffies;
@@ -309,7 +309,7 @@ int tms380tr_open(struct net_device *dev)
309 tp->timer.data = (unsigned long)dev; 309 tp->timer.data = (unsigned long)dev;
310 add_timer(&tp->timer); 310 add_timer(&tp->timer);
311 311
312 return (0); 312 return 0;
313} 313}
314 314
315/* 315/*
@@ -343,23 +343,23 @@ static int tms380tr_chipset_init(struct net_device *dev)
343 printk(KERN_DEBUG "%s: Resetting adapter...\n", dev->name); 343 printk(KERN_DEBUG "%s: Resetting adapter...\n", dev->name);
344 err = tms380tr_reset_adapter(dev); 344 err = tms380tr_reset_adapter(dev);
345 if(err < 0) 345 if(err < 0)
346 return (-1); 346 return -1;
347 347
348 if(tms380tr_debug > 3) 348 if(tms380tr_debug > 3)
349 printk(KERN_DEBUG "%s: Bringup diags...\n", dev->name); 349 printk(KERN_DEBUG "%s: Bringup diags...\n", dev->name);
350 err = tms380tr_bringup_diags(dev); 350 err = tms380tr_bringup_diags(dev);
351 if(err < 0) 351 if(err < 0)
352 return (-1); 352 return -1;
353 353
354 if(tms380tr_debug > 3) 354 if(tms380tr_debug > 3)
355 printk(KERN_DEBUG "%s: Init adapter...\n", dev->name); 355 printk(KERN_DEBUG "%s: Init adapter...\n", dev->name);
356 err = tms380tr_init_adapter(dev); 356 err = tms380tr_init_adapter(dev);
357 if(err < 0) 357 if(err < 0)
358 return (-1); 358 return -1;
359 359
360 if(tms380tr_debug > 3) 360 if(tms380tr_debug > 3)
361 printk(KERN_DEBUG "%s: Done!\n", dev->name); 361 printk(KERN_DEBUG "%s: Done!\n", dev->name);
362 return (0); 362 return 0;
363} 363}
364 364
365/* 365/*
@@ -877,7 +877,7 @@ static unsigned char tms380tr_chk_ssb(struct net_local *tp, unsigned short IrqTy
877 IrqType != STS_IRQ_COMMAND_STATUS && 877 IrqType != STS_IRQ_COMMAND_STATUS &&
878 IrqType != STS_IRQ_RING_STATUS) 878 IrqType != STS_IRQ_RING_STATUS)
879 { 879 {
880 return (1); /* SSB not involved. */ 880 return 1; /* SSB not involved. */
881 } 881 }
882 882
883 /* Note: All fields of the SSB have been set to all ones (-1) after it 883 /* Note: All fields of the SSB have been set to all ones (-1) after it
@@ -887,21 +887,21 @@ static unsigned char tms380tr_chk_ssb(struct net_local *tp, unsigned short IrqTy
887 */ 887 */
888 888
889 if(ssb->STS == (unsigned short) -1) 889 if(ssb->STS == (unsigned short) -1)
890 return (0); /* Command field not yet available. */ 890 return 0; /* Command field not yet available. */
891 if(IrqType == STS_IRQ_COMMAND_STATUS) 891 if(IrqType == STS_IRQ_COMMAND_STATUS)
892 return (1); /* Status fields not always affected. */ 892 return 1; /* Status fields not always affected. */
893 if(ssb->Parm[0] == (unsigned short) -1) 893 if(ssb->Parm[0] == (unsigned short) -1)
894 return (0); /* Status 1 field not yet available. */ 894 return 0; /* Status 1 field not yet available. */
895 if(IrqType == STS_IRQ_RING_STATUS) 895 if(IrqType == STS_IRQ_RING_STATUS)
896 return (1); /* Status 2 & 3 fields not affected. */ 896 return 1; /* Status 2 & 3 fields not affected. */
897 897
898 /* Note: At this point, the interrupt is either TRANSMIT or RECEIVE. */ 898 /* Note: At this point, the interrupt is either TRANSMIT or RECEIVE. */
899 if(ssb->Parm[1] == (unsigned short) -1) 899 if(ssb->Parm[1] == (unsigned short) -1)
900 return (0); /* Status 2 field not yet available. */ 900 return 0; /* Status 2 field not yet available. */
901 if(ssb->Parm[2] == (unsigned short) -1) 901 if(ssb->Parm[2] == (unsigned short) -1)
902 return (0); /* Status 3 field not yet available. */ 902 return 0; /* Status 3 field not yet available. */
903 903
904 return (1); /* All SSB fields have been written by the adapter. */ 904 return 1; /* All SSB fields have been written by the adapter. */
905} 905}
906 906
907/* 907/*
@@ -1143,7 +1143,7 @@ int tms380tr_close(struct net_device *dev)
1143#endif 1143#endif
1144 tms380tr_cancel_tx_queue(tp); 1144 tms380tr_cancel_tx_queue(tp);
1145 1145
1146 return (0); 1146 return 0;
1147} 1147}
1148 1148
1149/* 1149/*
@@ -1154,7 +1154,7 @@ static struct net_device_stats *tms380tr_get_stats(struct net_device *dev)
1154{ 1154{
1155 struct net_local *tp = netdev_priv(dev); 1155 struct net_local *tp = netdev_priv(dev);
1156 1156
1157 return ((struct net_device_stats *)&tp->MacStat); 1157 return (struct net_device_stats *)&tp->MacStat;
1158} 1158}
1159 1159
1160/* 1160/*
@@ -1256,7 +1256,7 @@ static int tms380tr_reset_adapter(struct net_device *dev)
1256 if (request_firmware(&fw_entry, "tms380tr.bin", tp->pdev) != 0) { 1256 if (request_firmware(&fw_entry, "tms380tr.bin", tp->pdev) != 0) {
1257 printk(KERN_ALERT "%s: firmware %s is missing, cannot start.\n", 1257 printk(KERN_ALERT "%s: firmware %s is missing, cannot start.\n",
1258 dev->name, "tms380tr.bin"); 1258 dev->name, "tms380tr.bin");
1259 return (-1); 1259 return -1;
1260 } 1260 }
1261 1261
1262 fw_ptr = (unsigned short *)fw_entry->data; 1262 fw_ptr = (unsigned short *)fw_entry->data;
@@ -1322,13 +1322,13 @@ static int tms380tr_reset_adapter(struct net_device *dev)
1322 /* Clear CPHALT and start BUD */ 1322 /* Clear CPHALT and start BUD */
1323 SIFWRITEW(c, SIFACL); 1323 SIFWRITEW(c, SIFACL);
1324 release_firmware(fw_entry); 1324 release_firmware(fw_entry);
1325 return (1); 1325 return 1;
1326 } 1326 }
1327 } while(count == 0); 1327 } while(count == 0);
1328 1328
1329 release_firmware(fw_entry); 1329 release_firmware(fw_entry);
1330 printk(KERN_INFO "%s: Adapter Download Failed\n", dev->name); 1330 printk(KERN_INFO "%s: Adapter Download Failed\n", dev->name);
1331 return (-1); 1331 return -1;
1332} 1332}
1333 1333
1334MODULE_FIRMWARE("tms380tr.bin"); 1334MODULE_FIRMWARE("tms380tr.bin");
@@ -1363,7 +1363,7 @@ static int tms380tr_bringup_diags(struct net_device *dev)
1363 printk(KERN_DEBUG " %04X\n", Status); 1363 printk(KERN_DEBUG " %04X\n", Status);
1364 /* BUD successfully completed */ 1364 /* BUD successfully completed */
1365 if(Status == STS_INITIALIZE) 1365 if(Status == STS_INITIALIZE)
1366 return (1); 1366 return 1;
1367 /* Unrecoverable hardware error, BUD not completed? */ 1367 /* Unrecoverable hardware error, BUD not completed? */
1368 } while((loop_cnt > 0) && ((Status & (STS_ERROR | STS_TEST)) 1368 } while((loop_cnt > 0) && ((Status & (STS_ERROR | STS_TEST))
1369 != (STS_ERROR | STS_TEST))); 1369 != (STS_ERROR | STS_TEST)));
@@ -1390,7 +1390,7 @@ static int tms380tr_bringup_diags(struct net_device *dev)
1390 else 1390 else
1391 printk(KERN_INFO "%s: Bring Up Diagnostics Error (%04X) occurred\n", dev->name, Status & 0x000f); 1391 printk(KERN_INFO "%s: Bring Up Diagnostics Error (%04X) occurred\n", dev->name, Status & 0x000f);
1392 1392
1393 return (-1); 1393 return -1;
1394} 1394}
1395 1395
1396/* 1396/*
@@ -1464,7 +1464,7 @@ static int tms380tr_init_adapter(struct net_device *dev)
1464 { 1464 {
1465 printk(KERN_INFO "%s: DMA failed\n", dev->name); 1465 printk(KERN_INFO "%s: DMA failed\n", dev->name);
1466 /* DMA data error: wrong data in SCB */ 1466 /* DMA data error: wrong data in SCB */
1467 return (-1); 1467 return -1;
1468 } 1468 }
1469 i++; 1469 i++;
1470 } while(i < 6); 1470 } while(i < 6);
@@ -1473,11 +1473,11 @@ static int tms380tr_init_adapter(struct net_device *dev)
1473 do { /* Test if contents of SSB is valid */ 1473 do { /* Test if contents of SSB is valid */
1474 if(SSB_Test[i] != *(sb_ptr + i)) 1474 if(SSB_Test[i] != *(sb_ptr + i))
1475 /* DMA data error: wrong data in SSB */ 1475 /* DMA data error: wrong data in SSB */
1476 return (-1); 1476 return -1;
1477 i++; 1477 i++;
1478 } while (i < 8); 1478 } while (i < 8);
1479 1479
1480 return (1); /* Adapter successfully initialized */ 1480 return 1; /* Adapter successfully initialized */
1481 } 1481 }
1482 else 1482 else
1483 { 1483 {
@@ -1488,7 +1488,7 @@ static int tms380tr_init_adapter(struct net_device *dev)
1488 Status &= STS_ERROR_MASK; 1488 Status &= STS_ERROR_MASK;
1489 /* ShowInitialisationErrorCode(Status); */ 1489 /* ShowInitialisationErrorCode(Status); */
1490 printk(KERN_INFO "%s: Status error: %d\n", dev->name, Status); 1490 printk(KERN_INFO "%s: Status error: %d\n", dev->name, Status);
1491 return (-1); /* Unrecoverable error */ 1491 return -1; /* Unrecoverable error */
1492 } 1492 }
1493 else 1493 else
1494 { 1494 {
@@ -1503,7 +1503,7 @@ static int tms380tr_init_adapter(struct net_device *dev)
1503 } while(retry_cnt > 0); 1503 } while(retry_cnt > 0);
1504 1504
1505 printk(KERN_INFO "%s: Retry exceeded\n", dev->name); 1505 printk(KERN_INFO "%s: Retry exceeded\n", dev->name);
1506 return (-1); 1506 return -1;
1507} 1507}
1508 1508
1509/* 1509/*
diff --git a/drivers/net/tokenring/tmspci.c b/drivers/net/tokenring/tmspci.c
index d4c7c0c0a3d6..d3e788a9cd1c 100644
--- a/drivers/net/tokenring/tmspci.c
+++ b/drivers/net/tokenring/tmspci.c
@@ -125,18 +125,16 @@ static int __devinit tms_pci_attach(struct pci_dev *pdev, const struct pci_devic
125 dev->irq = pci_irq_line; 125 dev->irq = pci_irq_line;
126 dev->dma = 0; 126 dev->dma = 0;
127 127
128 printk("%s: %s\n", dev->name, cardinfo->name); 128 dev_info(&pdev->dev, "%s\n", cardinfo->name);
129 printk("%s: IO: %#4lx IRQ: %d\n", 129 dev_info(&pdev->dev, " IO: %#4lx IRQ: %d\n", dev->base_addr, dev->irq);
130 dev->name, dev->base_addr, dev->irq);
131 130
132 tms_pci_read_eeprom(dev); 131 tms_pci_read_eeprom(dev);
133 132
134 printk("%s: Ring Station Address: %pM\n", 133 dev_info(&pdev->dev, " Ring Station Address: %pM\n", dev->dev_addr);
135 dev->name, dev->dev_addr);
136 134
137 ret = tmsdev_init(dev, &pdev->dev); 135 ret = tmsdev_init(dev, &pdev->dev);
138 if (ret) { 136 if (ret) {
139 printk("%s: unable to get memory for dev->priv.\n", dev->name); 137 dev_info(&pdev->dev, "unable to get memory for dev->priv.\n");
140 goto err_out_region; 138 goto err_out_region;
141 } 139 }
142 140
diff --git a/drivers/net/tsi108_eth.c b/drivers/net/tsi108_eth.c
index a03730bd1da5..5c633a32eaeb 100644
--- a/drivers/net/tsi108_eth.c
+++ b/drivers/net/tsi108_eth.c
@@ -219,7 +219,7 @@ static int tsi108_read_mii(struct tsi108_prv_data *data, int reg)
219 if (i == 100) 219 if (i == 100)
220 return 0xffff; 220 return 0xffff;
221 else 221 else
222 return (TSI_READ_PHY(TSI108_MAC_MII_DATAIN)); 222 return TSI_READ_PHY(TSI108_MAC_MII_DATAIN);
223} 223}
224 224
225static void tsi108_write_mii(struct tsi108_prv_data *data, 225static void tsi108_write_mii(struct tsi108_prv_data *data,
diff --git a/drivers/net/tulip/Kconfig b/drivers/net/tulip/Kconfig
index 516713fa0a05..f3035951422f 100644
--- a/drivers/net/tulip/Kconfig
+++ b/drivers/net/tulip/Kconfig
@@ -11,8 +11,8 @@ menuconfig NET_TULIP
11if NET_TULIP 11if NET_TULIP
12 12
13config DE2104X 13config DE2104X
14 tristate "Early DECchip Tulip (dc2104x) PCI support (EXPERIMENTAL)" 14 tristate "Early DECchip Tulip (dc2104x) PCI support"
15 depends on PCI && EXPERIMENTAL 15 depends on PCI
16 select CRC32 16 select CRC32
17 ---help--- 17 ---help---
18 This driver is developed for the SMC EtherPower series Ethernet 18 This driver is developed for the SMC EtherPower series Ethernet
diff --git a/drivers/net/tulip/de2104x.c b/drivers/net/tulip/de2104x.c
index 5efa57757a2c..28e1ffb13db9 100644
--- a/drivers/net/tulip/de2104x.c
+++ b/drivers/net/tulip/de2104x.c
@@ -243,6 +243,7 @@ enum {
243 NWayState = (1 << 14) | (1 << 13) | (1 << 12), 243 NWayState = (1 << 14) | (1 << 13) | (1 << 12),
244 NWayRestart = (1 << 12), 244 NWayRestart = (1 << 12),
245 NonselPortActive = (1 << 9), 245 NonselPortActive = (1 << 9),
246 SelPortActive = (1 << 8),
246 LinkFailStatus = (1 << 2), 247 LinkFailStatus = (1 << 2),
247 NetCxnErr = (1 << 1), 248 NetCxnErr = (1 << 1),
248}; 249};
@@ -363,7 +364,9 @@ static u16 t21040_csr15[] = { 0, 0, 0x0006, 0x0000, 0x0000, };
363 364
364/* 21041 transceiver register settings: TP AUTO, BNC, AUI, TP, TP FD*/ 365/* 21041 transceiver register settings: TP AUTO, BNC, AUI, TP, TP FD*/
365static u16 t21041_csr13[] = { 0xEF01, 0xEF09, 0xEF09, 0xEF01, 0xEF09, }; 366static u16 t21041_csr13[] = { 0xEF01, 0xEF09, 0xEF09, 0xEF01, 0xEF09, };
366static u16 t21041_csr14[] = { 0xFFFF, 0xF7FD, 0xF7FD, 0x6F3F, 0x6F3D, }; 367static u16 t21041_csr14[] = { 0xFFFF, 0xF7FD, 0xF7FD, 0x7F3F, 0x7F3D, };
368/* If on-chip autonegotiation is broken, use half-duplex (FF3F) instead */
369static u16 t21041_csr14_brk[] = { 0xFF3F, 0xF7FD, 0xF7FD, 0x7F3F, 0x7F3D, };
367static u16 t21041_csr15[] = { 0x0008, 0x0006, 0x000E, 0x0008, 0x0008, }; 370static u16 t21041_csr15[] = { 0x0008, 0x0006, 0x000E, 0x0008, 0x0008, };
368 371
369 372
@@ -945,8 +948,9 @@ static void de_set_media (struct de_private *de)
945 else 948 else
946 macmode &= ~FullDuplex; 949 macmode &= ~FullDuplex;
947 950
948 if (netif_msg_link(de)) { 951 if (netif_msg_link(de))
949 dev_info(&de->dev->dev, "set link %s\n", media_name[media]); 952 dev_info(&de->dev->dev, "set link %s\n", media_name[media]);
953 if (netif_msg_hw(de)) {
950 dev_info(&de->dev->dev, "mode 0x%x, sia 0x%x,0x%x,0x%x,0x%x\n", 954 dev_info(&de->dev->dev, "mode 0x%x, sia 0x%x,0x%x,0x%x,0x%x\n",
951 dr32(MacMode), dr32(SIAStatus), 955 dr32(MacMode), dr32(SIAStatus),
952 dr32(CSR13), dr32(CSR14), dr32(CSR15)); 956 dr32(CSR13), dr32(CSR14), dr32(CSR15));
@@ -1064,6 +1068,9 @@ static void de21041_media_timer (unsigned long data)
1064 unsigned int carrier; 1068 unsigned int carrier;
1065 unsigned long flags; 1069 unsigned long flags;
1066 1070
1071 /* clear port active bits */
1072 dw32(SIAStatus, NonselPortActive | SelPortActive);
1073
1067 carrier = (status & NetCxnErr) ? 0 : 1; 1074 carrier = (status & NetCxnErr) ? 0 : 1;
1068 1075
1069 if (carrier) { 1076 if (carrier) {
@@ -1158,14 +1165,29 @@ no_link_yet:
1158static void de_media_interrupt (struct de_private *de, u32 status) 1165static void de_media_interrupt (struct de_private *de, u32 status)
1159{ 1166{
1160 if (status & LinkPass) { 1167 if (status & LinkPass) {
1168 /* Ignore if current media is AUI or BNC and we can't use TP */
1169 if ((de->media_type == DE_MEDIA_AUI ||
1170 de->media_type == DE_MEDIA_BNC) &&
1171 (de->media_lock ||
1172 !de_ok_to_advertise(de, DE_MEDIA_TP_AUTO)))
1173 return;
1174 /* If current media is not TP, change it to TP */
1175 if ((de->media_type == DE_MEDIA_AUI ||
1176 de->media_type == DE_MEDIA_BNC)) {
1177 de->media_type = DE_MEDIA_TP_AUTO;
1178 de_stop_rxtx(de);
1179 de_set_media(de);
1180 de_start_rxtx(de);
1181 }
1161 de_link_up(de); 1182 de_link_up(de);
1162 mod_timer(&de->media_timer, jiffies + DE_TIMER_LINK); 1183 mod_timer(&de->media_timer, jiffies + DE_TIMER_LINK);
1163 return; 1184 return;
1164 } 1185 }
1165 1186
1166 BUG_ON(!(status & LinkFail)); 1187 BUG_ON(!(status & LinkFail));
1167 1188 /* Mark the link as down only if current media is TP */
1168 if (netif_carrier_ok(de->dev)) { 1189 if (netif_carrier_ok(de->dev) && de->media_type != DE_MEDIA_AUI &&
1190 de->media_type != DE_MEDIA_BNC) {
1169 de_link_down(de); 1191 de_link_down(de);
1170 mod_timer(&de->media_timer, jiffies + DE_TIMER_NO_LINK); 1192 mod_timer(&de->media_timer, jiffies + DE_TIMER_NO_LINK);
1171 } 1193 }
@@ -1229,6 +1251,7 @@ static void de_adapter_sleep (struct de_private *de)
1229 if (de->de21040) 1251 if (de->de21040)
1230 return; 1252 return;
1231 1253
1254 dw32(CSR13, 0); /* Reset phy */
1232 pci_read_config_dword(de->pdev, PCIPM, &pmctl); 1255 pci_read_config_dword(de->pdev, PCIPM, &pmctl);
1233 pmctl |= PM_Sleep; 1256 pmctl |= PM_Sleep;
1234 pci_write_config_dword(de->pdev, PCIPM, pmctl); 1257 pci_write_config_dword(de->pdev, PCIPM, pmctl);
@@ -1574,12 +1597,15 @@ static int __de_set_settings(struct de_private *de, struct ethtool_cmd *ecmd)
1574 return 0; /* nothing to change */ 1597 return 0; /* nothing to change */
1575 1598
1576 de_link_down(de); 1599 de_link_down(de);
1600 mod_timer(&de->media_timer, jiffies + DE_TIMER_NO_LINK);
1577 de_stop_rxtx(de); 1601 de_stop_rxtx(de);
1578 1602
1579 de->media_type = new_media; 1603 de->media_type = new_media;
1580 de->media_lock = media_lock; 1604 de->media_lock = media_lock;
1581 de->media_advertise = ecmd->advertising; 1605 de->media_advertise = ecmd->advertising;
1582 de_set_media(de); 1606 de_set_media(de);
1607 if (netif_running(de->dev))
1608 de_start_rxtx(de);
1583 1609
1584 return 0; 1610 return 0;
1585} 1611}
@@ -1911,8 +1937,14 @@ fill_defaults:
1911 for (i = 0; i < DE_MAX_MEDIA; i++) { 1937 for (i = 0; i < DE_MAX_MEDIA; i++) {
1912 if (de->media[i].csr13 == 0xffff) 1938 if (de->media[i].csr13 == 0xffff)
1913 de->media[i].csr13 = t21041_csr13[i]; 1939 de->media[i].csr13 = t21041_csr13[i];
1914 if (de->media[i].csr14 == 0xffff) 1940 if (de->media[i].csr14 == 0xffff) {
1915 de->media[i].csr14 = t21041_csr14[i]; 1941 /* autonegotiation is broken at least on some chip
1942 revisions - rev. 0x21 works, 0x11 does not */
1943 if (de->pdev->revision < 0x20)
1944 de->media[i].csr14 = t21041_csr14_brk[i];
1945 else
1946 de->media[i].csr14 = t21041_csr14[i];
1947 }
1916 if (de->media[i].csr15 == 0xffff) 1948 if (de->media[i].csr15 == 0xffff)
1917 de->media[i].csr15 = t21041_csr15[i]; 1949 de->media[i].csr15 = t21041_csr15[i];
1918 } 1950 }
@@ -2158,6 +2190,8 @@ static int de_resume (struct pci_dev *pdev)
2158 dev_err(&dev->dev, "pci_enable_device failed in resume\n"); 2190 dev_err(&dev->dev, "pci_enable_device failed in resume\n");
2159 goto out; 2191 goto out;
2160 } 2192 }
2193 pci_set_master(pdev);
2194 de_init_rings(de);
2161 de_init_hw(de); 2195 de_init_hw(de);
2162out_attach: 2196out_attach:
2163 netif_device_attach(dev); 2197 netif_device_attach(dev);
diff --git a/drivers/net/tulip/de4x5.c b/drivers/net/tulip/de4x5.c
index 4f7503063446..4dbd493b996b 100644
--- a/drivers/net/tulip/de4x5.c
+++ b/drivers/net/tulip/de4x5.c
@@ -3119,7 +3119,7 @@ dc2114x_autoconf(struct net_device *dev)
3119 if (lp->media == _100Mb) { 3119 if (lp->media == _100Mb) {
3120 if ((slnk = test_for_100Mb(dev, 6500)) < 0) { 3120 if ((slnk = test_for_100Mb(dev, 6500)) < 0) {
3121 lp->media = SPD_DET; 3121 lp->media = SPD_DET;
3122 return (slnk & ~TIMER_CB); 3122 return slnk & ~TIMER_CB;
3123 } 3123 }
3124 } else { 3124 } else {
3125 if (wait_for_link(dev) < 0) { 3125 if (wait_for_link(dev) < 0) {
@@ -3484,7 +3484,7 @@ is_spd_100(struct net_device *dev)
3484 spd = ((~gep_rd(dev)) & GEP_SLNK); 3484 spd = ((~gep_rd(dev)) & GEP_SLNK);
3485 } else { 3485 } else {
3486 if ((lp->ibn == 2) || !lp->asBitValid) 3486 if ((lp->ibn == 2) || !lp->asBitValid)
3487 return ((lp->chipset == DC21143)?(~inl(DE4X5_SISR)&SISR_LS100):0); 3487 return (lp->chipset == DC21143) ? (~inl(DE4X5_SISR)&SISR_LS100) : 0;
3488 3488
3489 spd = (lp->asBitValid & (lp->asPolarity ^ (gep_rd(dev) & lp->asBit))) | 3489 spd = (lp->asBitValid & (lp->asPolarity ^ (gep_rd(dev) & lp->asBit))) |
3490 (lp->linkOK & ~lp->asBitValid); 3490 (lp->linkOK & ~lp->asBitValid);
@@ -3502,15 +3502,15 @@ is_100_up(struct net_device *dev)
3502 if (lp->useMII) { 3502 if (lp->useMII) {
3503 /* Double read for sticky bits & temporary drops */ 3503 /* Double read for sticky bits & temporary drops */
3504 mii_rd(MII_SR, lp->phy[lp->active].addr, DE4X5_MII); 3504 mii_rd(MII_SR, lp->phy[lp->active].addr, DE4X5_MII);
3505 return (mii_rd(MII_SR, lp->phy[lp->active].addr, DE4X5_MII) & MII_SR_LKS); 3505 return mii_rd(MII_SR, lp->phy[lp->active].addr, DE4X5_MII) & MII_SR_LKS;
3506 } else if (!lp->useSROM) { /* de500-xa */ 3506 } else if (!lp->useSROM) { /* de500-xa */
3507 return ((~gep_rd(dev)) & GEP_SLNK); 3507 return (~gep_rd(dev)) & GEP_SLNK;
3508 } else { 3508 } else {
3509 if ((lp->ibn == 2) || !lp->asBitValid) 3509 if ((lp->ibn == 2) || !lp->asBitValid)
3510 return ((lp->chipset == DC21143)?(~inl(DE4X5_SISR)&SISR_LS100):0); 3510 return (lp->chipset == DC21143) ? (~inl(DE4X5_SISR)&SISR_LS100) : 0;
3511 3511
3512 return ((lp->asBitValid&(lp->asPolarity^(gep_rd(dev)&lp->asBit))) | 3512 return (lp->asBitValid&(lp->asPolarity^(gep_rd(dev)&lp->asBit))) |
3513 (lp->linkOK & ~lp->asBitValid)); 3513 (lp->linkOK & ~lp->asBitValid);
3514 } 3514 }
3515} 3515}
3516 3516
@@ -3523,17 +3523,17 @@ is_10_up(struct net_device *dev)
3523 if (lp->useMII) { 3523 if (lp->useMII) {
3524 /* Double read for sticky bits & temporary drops */ 3524 /* Double read for sticky bits & temporary drops */
3525 mii_rd(MII_SR, lp->phy[lp->active].addr, DE4X5_MII); 3525 mii_rd(MII_SR, lp->phy[lp->active].addr, DE4X5_MII);
3526 return (mii_rd(MII_SR, lp->phy[lp->active].addr, DE4X5_MII) & MII_SR_LKS); 3526 return mii_rd(MII_SR, lp->phy[lp->active].addr, DE4X5_MII) & MII_SR_LKS;
3527 } else if (!lp->useSROM) { /* de500-xa */ 3527 } else if (!lp->useSROM) { /* de500-xa */
3528 return ((~gep_rd(dev)) & GEP_LNP); 3528 return (~gep_rd(dev)) & GEP_LNP;
3529 } else { 3529 } else {
3530 if ((lp->ibn == 2) || !lp->asBitValid) 3530 if ((lp->ibn == 2) || !lp->asBitValid)
3531 return (((lp->chipset & ~0x00ff) == DC2114x) ? 3531 return ((lp->chipset & ~0x00ff) == DC2114x) ?
3532 (~inl(DE4X5_SISR)&SISR_LS10): 3532 (~inl(DE4X5_SISR)&SISR_LS10):
3533 0); 3533 0;
3534 3534
3535 return ((lp->asBitValid&(lp->asPolarity^(gep_rd(dev)&lp->asBit))) | 3535 return (lp->asBitValid&(lp->asPolarity^(gep_rd(dev)&lp->asBit))) |
3536 (lp->linkOK & ~lp->asBitValid)); 3536 (lp->linkOK & ~lp->asBitValid);
3537 } 3537 }
3538} 3538}
3539 3539
@@ -3544,7 +3544,7 @@ is_anc_capable(struct net_device *dev)
3544 u_long iobase = dev->base_addr; 3544 u_long iobase = dev->base_addr;
3545 3545
3546 if (lp->phy[lp->active].id && (!lp->useSROM || lp->useMII)) { 3546 if (lp->phy[lp->active].id && (!lp->useSROM || lp->useMII)) {
3547 return (mii_rd(MII_SR, lp->phy[lp->active].addr, DE4X5_MII)); 3547 return mii_rd(MII_SR, lp->phy[lp->active].addr, DE4X5_MII);
3548 } else if ((lp->chipset & ~0x00ff) == DC2114x) { 3548 } else if ((lp->chipset & ~0x00ff) == DC2114x) {
3549 return (inl(DE4X5_SISR) & SISR_LPN) >> 12; 3549 return (inl(DE4X5_SISR) & SISR_LPN) >> 12;
3550 } else { 3550 } else {
@@ -4930,7 +4930,7 @@ getfrom_mii(u32 command, u_long ioaddr)
4930 outl(command | MII_MDC, ioaddr); 4930 outl(command | MII_MDC, ioaddr);
4931 udelay(1); 4931 udelay(1);
4932 4932
4933 return ((inl(ioaddr) >> 19) & 1); 4933 return (inl(ioaddr) >> 19) & 1;
4934} 4934}
4935 4935
4936/* 4936/*
@@ -4975,8 +4975,8 @@ mii_get_oui(u_char phyaddr, u_long ioaddr)
4975 a.breg[0]=a.breg[1]; 4975 a.breg[0]=a.breg[1];
4976 a.breg[1]=i; 4976 a.breg[1]=i;
4977 4977
4978 return ((a.reg<<8)|ret); */ /* SEEQ and Cypress way */ 4978 return (a.reg<<8)|ret; */ /* SEEQ and Cypress way */
4979/* return ((r2<<6)|(u_int)(r3>>10)); */ /* NATIONAL and BROADCOM way */ 4979/* return (r2<<6)|(u_int)(r3>>10); */ /* NATIONAL and BROADCOM way */
4980 return r2; /* (I did it) My way */ 4980 return r2; /* (I did it) My way */
4981} 4981}
4982 4982
@@ -5144,7 +5144,7 @@ gep_rd(struct net_device *dev)
5144 if (lp->chipset == DC21140) { 5144 if (lp->chipset == DC21140) {
5145 return inl(DE4X5_GEP); 5145 return inl(DE4X5_GEP);
5146 } else if ((lp->chipset & ~0x00ff) == DC2114x) { 5146 } else if ((lp->chipset & ~0x00ff) == DC2114x) {
5147 return (inl(DE4X5_SIGR) & 0x000fffff); 5147 return inl(DE4X5_SIGR) & 0x000fffff;
5148 } 5148 }
5149 5149
5150 return 0; 5150 return 0;
@@ -5474,7 +5474,8 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
5474 tmp.lval[6] = inl(DE4X5_STRR); j+=4; 5474 tmp.lval[6] = inl(DE4X5_STRR); j+=4;
5475 tmp.lval[7] = inl(DE4X5_SIGR); j+=4; 5475 tmp.lval[7] = inl(DE4X5_SIGR); j+=4;
5476 ioc->len = j; 5476 ioc->len = j;
5477 if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT; 5477 if (copy_to_user(ioc->data, tmp.lval, ioc->len))
5478 return -EFAULT;
5478 break; 5479 break;
5479 5480
5480#define DE4X5_DUMP 0x0f /* Dump the DE4X5 Status */ 5481#define DE4X5_DUMP 0x0f /* Dump the DE4X5 Status */
diff --git a/drivers/net/tulip/uli526x.c b/drivers/net/tulip/uli526x.c
index 1dc27a557275..74217dbf0143 100644
--- a/drivers/net/tulip/uli526x.c
+++ b/drivers/net/tulip/uli526x.c
@@ -1747,7 +1747,7 @@ static u16 phy_readby_cr10(unsigned long iobase, u8 phy_addr, u8 offset)
1747 if(cr10_value&0x10000000) 1747 if(cr10_value&0x10000000)
1748 break; 1748 break;
1749 } 1749 }
1750 return (cr10_value&0x0ffff); 1750 return cr10_value & 0x0ffff;
1751} 1751}
1752 1752
1753static void phy_writeby_cr10(unsigned long iobase, u8 phy_addr, u8 offset, u16 phy_data) 1753static void phy_writeby_cr10(unsigned long iobase, u8 phy_addr, u8 offset, u16 phy_data)
diff --git a/drivers/net/typhoon.c b/drivers/net/typhoon.c
index 5dfb39539b3e..1cc67138adbf 100644
--- a/drivers/net/typhoon.c
+++ b/drivers/net/typhoon.c
@@ -541,7 +541,7 @@ cleanup:
541 541
542 indexes->respCleared = cpu_to_le32(cleared); 542 indexes->respCleared = cpu_to_le32(cleared);
543 wmb(); 543 wmb();
544 return (resp_save == NULL); 544 return resp_save == NULL;
545} 545}
546 546
547static inline int 547static inline int
diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
index 4f123f869bdc..8110595fbbcc 100644
--- a/drivers/net/usb/hso.c
+++ b/drivers/net/usb/hso.c
@@ -1643,6 +1643,8 @@ static int hso_get_count(struct hso_serial *serial,
1643 struct uart_icount cnow; 1643 struct uart_icount cnow;
1644 struct hso_tiocmget *tiocmget = serial->tiocmget; 1644 struct hso_tiocmget *tiocmget = serial->tiocmget;
1645 1645
1646 memset(&icount, 0, sizeof(struct serial_icounter_struct));
1647
1646 if (!tiocmget) 1648 if (!tiocmget)
1647 return -ENOENT; 1649 return -ENOENT;
1648 spin_lock_irq(&serial->serial_lock); 1650 spin_lock_irq(&serial->serial_lock);
diff --git a/drivers/net/usb/sierra_net.c b/drivers/net/usb/sierra_net.c
index ee85c8b9a858..d1ac15c95faf 100644
--- a/drivers/net/usb/sierra_net.c
+++ b/drivers/net/usb/sierra_net.c
@@ -203,7 +203,7 @@ static inline void sierra_net_set_private(struct usbnet *dev,
203/* is packet IPv4 */ 203/* is packet IPv4 */
204static inline int is_ip(struct sk_buff *skb) 204static inline int is_ip(struct sk_buff *skb)
205{ 205{
206 return (skb->protocol == cpu_to_be16(ETH_P_IP)); 206 return skb->protocol == cpu_to_be16(ETH_P_IP);
207} 207}
208 208
209/* 209/*
@@ -354,7 +354,7 @@ static void sierra_net_set_ctx_index(struct sierra_net_data *priv, u8 ctx_ix)
354 354
355static inline int sierra_net_is_valid_addrlen(u8 len) 355static inline int sierra_net_is_valid_addrlen(u8 len)
356{ 356{
357 return (len == sizeof(struct in_addr)); 357 return len == sizeof(struct in_addr);
358} 358}
359 359
360static int sierra_net_parse_lsi(struct usbnet *dev, char *data, int datalen) 360static int sierra_net_parse_lsi(struct usbnet *dev, char *data, int datalen)
diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c
index 12a3c88c5282..65cb1abfbe57 100644
--- a/drivers/net/usb/smsc95xx.c
+++ b/drivers/net/usb/smsc95xx.c
@@ -805,8 +805,6 @@ static int smsc95xx_reset(struct usbnet *dev)
805 return ret; 805 return ret;
806 } 806 }
807 807
808 smsc95xx_init_mac_address(dev);
809
810 ret = smsc95xx_set_mac_address(dev); 808 ret = smsc95xx_set_mac_address(dev);
811 if (ret < 0) 809 if (ret < 0)
812 return ret; 810 return ret;
@@ -1047,6 +1045,8 @@ static int smsc95xx_bind(struct usbnet *dev, struct usb_interface *intf)
1047 pdata->use_tx_csum = DEFAULT_TX_CSUM_ENABLE; 1045 pdata->use_tx_csum = DEFAULT_TX_CSUM_ENABLE;
1048 pdata->use_rx_csum = DEFAULT_RX_CSUM_ENABLE; 1046 pdata->use_rx_csum = DEFAULT_RX_CSUM_ENABLE;
1049 1047
1048 smsc95xx_init_mac_address(dev);
1049
1050 /* Init all registers */ 1050 /* Init all registers */
1051 ret = smsc95xx_reset(dev); 1051 ret = smsc95xx_reset(dev);
1052 1052
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index 5ec542dd5b50..0bbc0c323135 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -250,7 +250,7 @@ static int veth_close(struct net_device *dev)
250 250
251static int is_valid_veth_mtu(int new_mtu) 251static int is_valid_veth_mtu(int new_mtu)
252{ 252{
253 return (new_mtu >= MIN_MTU && new_mtu <= MAX_MTU); 253 return new_mtu >= MIN_MTU && new_mtu <= MAX_MTU;
254} 254}
255 255
256static int veth_change_mtu(struct net_device *dev, int new_mtu) 256static int veth_change_mtu(struct net_device *dev, int new_mtu)
diff --git a/drivers/net/via-velocity.c b/drivers/net/via-velocity.c
index 6884813b809c..cab96ad49e60 100644
--- a/drivers/net/via-velocity.c
+++ b/drivers/net/via-velocity.c
@@ -312,13 +312,14 @@ VELOCITY_PARAM(flow_control, "Enable flow control ability");
312 312
313#define MED_LNK_DEF 0 313#define MED_LNK_DEF 0
314#define MED_LNK_MIN 0 314#define MED_LNK_MIN 0
315#define MED_LNK_MAX 4 315#define MED_LNK_MAX 5
316/* speed_duplex[] is used for setting the speed and duplex mode of NIC. 316/* speed_duplex[] is used for setting the speed and duplex mode of NIC.
317 0: indicate autonegotiation for both speed and duplex mode 317 0: indicate autonegotiation for both speed and duplex mode
318 1: indicate 100Mbps half duplex mode 318 1: indicate 100Mbps half duplex mode
319 2: indicate 100Mbps full duplex mode 319 2: indicate 100Mbps full duplex mode
320 3: indicate 10Mbps half duplex mode 320 3: indicate 10Mbps half duplex mode
321 4: indicate 10Mbps full duplex mode 321 4: indicate 10Mbps full duplex mode
322 5: indicate 1000Mbps full duplex mode
322 323
323 Note: 324 Note:
324 if EEPROM have been set to the force mode, this option is ignored 325 if EEPROM have been set to the force mode, this option is ignored
@@ -617,6 +618,9 @@ static u32 velocity_get_opt_media_mode(struct velocity_info *vptr)
617 case SPD_DPX_10_HALF: 618 case SPD_DPX_10_HALF:
618 status = VELOCITY_SPEED_10; 619 status = VELOCITY_SPEED_10;
619 break; 620 break;
621 case SPD_DPX_1000_FULL:
622 status = VELOCITY_SPEED_1000 | VELOCITY_DUPLEX_FULL;
623 break;
620 } 624 }
621 vptr->mii_status = status; 625 vptr->mii_status = status;
622 return status; 626 return status;
@@ -922,6 +926,7 @@ static int velocity_set_media_mode(struct velocity_info *vptr, u32 mii_status)
922 /* enable AUTO-NEGO mode */ 926 /* enable AUTO-NEGO mode */
923 mii_set_auto_on(vptr); 927 mii_set_auto_on(vptr);
924 } else { 928 } else {
929 u16 CTRL1000;
925 u16 ANAR; 930 u16 ANAR;
926 u8 CHIPGCR; 931 u8 CHIPGCR;
927 932
@@ -936,7 +941,11 @@ static int velocity_set_media_mode(struct velocity_info *vptr, u32 mii_status)
936 BYTE_REG_BITS_ON(CHIPGCR_FCMODE, &regs->CHIPGCR); 941 BYTE_REG_BITS_ON(CHIPGCR_FCMODE, &regs->CHIPGCR);
937 942
938 CHIPGCR = readb(&regs->CHIPGCR); 943 CHIPGCR = readb(&regs->CHIPGCR);
939 CHIPGCR &= ~CHIPGCR_FCGMII; 944
945 if (mii_status & VELOCITY_SPEED_1000)
946 CHIPGCR |= CHIPGCR_FCGMII;
947 else
948 CHIPGCR &= ~CHIPGCR_FCGMII;
940 949
941 if (mii_status & VELOCITY_DUPLEX_FULL) { 950 if (mii_status & VELOCITY_DUPLEX_FULL) {
942 CHIPGCR |= CHIPGCR_FCFDX; 951 CHIPGCR |= CHIPGCR_FCFDX;
@@ -952,7 +961,13 @@ static int velocity_set_media_mode(struct velocity_info *vptr, u32 mii_status)
952 BYTE_REG_BITS_ON(TCR_TB2BDIS, &regs->TCR); 961 BYTE_REG_BITS_ON(TCR_TB2BDIS, &regs->TCR);
953 } 962 }
954 963
955 MII_REG_BITS_OFF(ADVERTISE_1000FULL | ADVERTISE_1000HALF, MII_CTRL1000, vptr->mac_regs); 964 velocity_mii_read(vptr->mac_regs, MII_CTRL1000, &CTRL1000);
965 CTRL1000 &= ~(ADVERTISE_1000FULL | ADVERTISE_1000HALF);
966 if ((mii_status & VELOCITY_SPEED_1000) &&
967 (mii_status & VELOCITY_DUPLEX_FULL)) {
968 CTRL1000 |= ADVERTISE_1000FULL;
969 }
970 velocity_mii_write(vptr->mac_regs, MII_CTRL1000, CTRL1000);
956 971
957 if (!(mii_status & VELOCITY_DUPLEX_FULL) && (mii_status & VELOCITY_SPEED_10)) 972 if (!(mii_status & VELOCITY_DUPLEX_FULL) && (mii_status & VELOCITY_SPEED_10))
958 BYTE_REG_BITS_OFF(TESTCFG_HBDIS, &regs->TESTCFG); 973 BYTE_REG_BITS_OFF(TESTCFG_HBDIS, &regs->TESTCFG);
@@ -967,7 +982,7 @@ static int velocity_set_media_mode(struct velocity_info *vptr, u32 mii_status)
967 ANAR |= ADVERTISE_100FULL; 982 ANAR |= ADVERTISE_100FULL;
968 else 983 else
969 ANAR |= ADVERTISE_100HALF; 984 ANAR |= ADVERTISE_100HALF;
970 } else { 985 } else if (mii_status & VELOCITY_SPEED_10) {
971 if (mii_status & VELOCITY_DUPLEX_FULL) 986 if (mii_status & VELOCITY_DUPLEX_FULL)
972 ANAR |= ADVERTISE_10FULL; 987 ANAR |= ADVERTISE_10FULL;
973 else 988 else
@@ -1013,6 +1028,9 @@ static void velocity_print_link_status(struct velocity_info *vptr)
1013 } else { 1028 } else {
1014 VELOCITY_PRT(MSG_LEVEL_INFO, KERN_NOTICE "%s: Link forced", vptr->dev->name); 1029 VELOCITY_PRT(MSG_LEVEL_INFO, KERN_NOTICE "%s: Link forced", vptr->dev->name);
1015 switch (vptr->options.spd_dpx) { 1030 switch (vptr->options.spd_dpx) {
1031 case SPD_DPX_1000_FULL:
1032 VELOCITY_PRT(MSG_LEVEL_INFO, " speed 1000M bps full duplex\n");
1033 break;
1016 case SPD_DPX_100_HALF: 1034 case SPD_DPX_100_HALF:
1017 VELOCITY_PRT(MSG_LEVEL_INFO, " speed 100M bps half duplex\n"); 1035 VELOCITY_PRT(MSG_LEVEL_INFO, " speed 100M bps half duplex\n");
1018 break; 1036 break;
@@ -2574,7 +2592,7 @@ static netdev_tx_t velocity_xmit(struct sk_buff *skb,
2574 2592
2575 td_ptr->tdesc1.cmd = TCPLS_NORMAL + (tdinfo->nskb_dma + 1) * 16; 2593 td_ptr->tdesc1.cmd = TCPLS_NORMAL + (tdinfo->nskb_dma + 1) * 16;
2576 2594
2577 if (vptr->vlgrp && vlan_tx_tag_present(skb)) { 2595 if (vlan_tx_tag_present(skb)) {
2578 td_ptr->tdesc1.vlan = cpu_to_le16(vlan_tx_tag_get(skb)); 2596 td_ptr->tdesc1.vlan = cpu_to_le16(vlan_tx_tag_get(skb));
2579 td_ptr->tdesc1.TCR |= TCR0_VETAG; 2597 td_ptr->tdesc1.TCR |= TCR0_VETAG;
2580 } 2598 }
@@ -3170,6 +3188,37 @@ static int velocity_get_settings(struct net_device *dev, struct ethtool_cmd *cmd
3170 SUPPORTED_100baseT_Full | 3188 SUPPORTED_100baseT_Full |
3171 SUPPORTED_1000baseT_Half | 3189 SUPPORTED_1000baseT_Half |
3172 SUPPORTED_1000baseT_Full; 3190 SUPPORTED_1000baseT_Full;
3191
3192 cmd->advertising = ADVERTISED_TP | ADVERTISED_Autoneg;
3193 if (vptr->options.spd_dpx == SPD_DPX_AUTO) {
3194 cmd->advertising |=
3195 ADVERTISED_10baseT_Half |
3196 ADVERTISED_10baseT_Full |
3197 ADVERTISED_100baseT_Half |
3198 ADVERTISED_100baseT_Full |
3199 ADVERTISED_1000baseT_Half |
3200 ADVERTISED_1000baseT_Full;
3201 } else {
3202 switch (vptr->options.spd_dpx) {
3203 case SPD_DPX_1000_FULL:
3204 cmd->advertising |= ADVERTISED_1000baseT_Full;
3205 break;
3206 case SPD_DPX_100_HALF:
3207 cmd->advertising |= ADVERTISED_100baseT_Half;
3208 break;
3209 case SPD_DPX_100_FULL:
3210 cmd->advertising |= ADVERTISED_100baseT_Full;
3211 break;
3212 case SPD_DPX_10_HALF:
3213 cmd->advertising |= ADVERTISED_10baseT_Half;
3214 break;
3215 case SPD_DPX_10_FULL:
3216 cmd->advertising |= ADVERTISED_10baseT_Full;
3217 break;
3218 default:
3219 break;
3220 }
3221 }
3173 if (status & VELOCITY_SPEED_1000) 3222 if (status & VELOCITY_SPEED_1000)
3174 cmd->speed = SPEED_1000; 3223 cmd->speed = SPEED_1000;
3175 else if (status & VELOCITY_SPEED_100) 3224 else if (status & VELOCITY_SPEED_100)
@@ -3200,14 +3249,35 @@ static int velocity_set_settings(struct net_device *dev, struct ethtool_cmd *cmd
3200 curr_status &= (~VELOCITY_LINK_FAIL); 3249 curr_status &= (~VELOCITY_LINK_FAIL);
3201 3250
3202 new_status |= ((cmd->autoneg) ? VELOCITY_AUTONEG_ENABLE : 0); 3251 new_status |= ((cmd->autoneg) ? VELOCITY_AUTONEG_ENABLE : 0);
3252 new_status |= ((cmd->speed == SPEED_1000) ? VELOCITY_SPEED_1000 : 0);
3203 new_status |= ((cmd->speed == SPEED_100) ? VELOCITY_SPEED_100 : 0); 3253 new_status |= ((cmd->speed == SPEED_100) ? VELOCITY_SPEED_100 : 0);
3204 new_status |= ((cmd->speed == SPEED_10) ? VELOCITY_SPEED_10 : 0); 3254 new_status |= ((cmd->speed == SPEED_10) ? VELOCITY_SPEED_10 : 0);
3205 new_status |= ((cmd->duplex == DUPLEX_FULL) ? VELOCITY_DUPLEX_FULL : 0); 3255 new_status |= ((cmd->duplex == DUPLEX_FULL) ? VELOCITY_DUPLEX_FULL : 0);
3206 3256
3207 if ((new_status & VELOCITY_AUTONEG_ENABLE) && (new_status != (curr_status | VELOCITY_AUTONEG_ENABLE))) 3257 if ((new_status & VELOCITY_AUTONEG_ENABLE) &&
3258 (new_status != (curr_status | VELOCITY_AUTONEG_ENABLE))) {
3208 ret = -EINVAL; 3259 ret = -EINVAL;
3209 else 3260 } else {
3261 enum speed_opt spd_dpx;
3262
3263 if (new_status & VELOCITY_AUTONEG_ENABLE)
3264 spd_dpx = SPD_DPX_AUTO;
3265 else if ((new_status & VELOCITY_SPEED_1000) &&
3266 (new_status & VELOCITY_DUPLEX_FULL)) {
3267 spd_dpx = SPD_DPX_1000_FULL;
3268 } else if (new_status & VELOCITY_SPEED_100)
3269 spd_dpx = (new_status & VELOCITY_DUPLEX_FULL) ?
3270 SPD_DPX_100_FULL : SPD_DPX_100_HALF;
3271 else if (new_status & VELOCITY_SPEED_10)
3272 spd_dpx = (new_status & VELOCITY_DUPLEX_FULL) ?
3273 SPD_DPX_10_FULL : SPD_DPX_10_HALF;
3274 else
3275 return -EOPNOTSUPP;
3276
3277 vptr->options.spd_dpx = spd_dpx;
3278
3210 velocity_set_media_mode(vptr, new_status); 3279 velocity_set_media_mode(vptr, new_status);
3280 }
3211 3281
3212 return ret; 3282 return ret;
3213} 3283}
diff --git a/drivers/net/via-velocity.h b/drivers/net/via-velocity.h
index f7b33ae7a703..aa2e69b9ff61 100644
--- a/drivers/net/via-velocity.h
+++ b/drivers/net/via-velocity.h
@@ -848,7 +848,7 @@ enum velocity_owner {
848 * Bits in CHIPGCR register 848 * Bits in CHIPGCR register
849 */ 849 */
850 850
851#define CHIPGCR_FCGMII 0x80 851#define CHIPGCR_FCGMII 0x80 /* enable GMII mode */
852#define CHIPGCR_FCFDX 0x40 852#define CHIPGCR_FCFDX 0x40
853#define CHIPGCR_FCRESV 0x20 853#define CHIPGCR_FCRESV 0x20
854#define CHIPGCR_FCMODE 0x10 854#define CHIPGCR_FCMODE 0x10
@@ -1390,7 +1390,8 @@ enum speed_opt {
1390 SPD_DPX_100_HALF = 1, 1390 SPD_DPX_100_HALF = 1,
1391 SPD_DPX_100_FULL = 2, 1391 SPD_DPX_100_FULL = 2,
1392 SPD_DPX_10_HALF = 3, 1392 SPD_DPX_10_HALF = 3,
1393 SPD_DPX_10_FULL = 4 1393 SPD_DPX_10_FULL = 4,
1394 SPD_DPX_1000_FULL = 5
1394}; 1395};
1395 1396
1396enum velocity_init_type { 1397enum velocity_init_type {
@@ -1504,22 +1505,25 @@ struct velocity_info {
1504 * addresses on this chain then we use the first - multi-IP WOL is not 1505 * addresses on this chain then we use the first - multi-IP WOL is not
1505 * supported. 1506 * supported.
1506 * 1507 *
1507 * CHECK ME: locking
1508 */ 1508 */
1509 1509
1510static inline int velocity_get_ip(struct velocity_info *vptr) 1510static inline int velocity_get_ip(struct velocity_info *vptr)
1511{ 1511{
1512 struct in_device *in_dev = (struct in_device *) vptr->dev->ip_ptr; 1512 struct in_device *in_dev;
1513 struct in_ifaddr *ifa; 1513 struct in_ifaddr *ifa;
1514 int res = -ENOENT;
1514 1515
1516 rcu_read_lock();
1517 in_dev = __in_dev_get_rcu(vptr->dev);
1515 if (in_dev != NULL) { 1518 if (in_dev != NULL) {
1516 ifa = (struct in_ifaddr *) in_dev->ifa_list; 1519 ifa = (struct in_ifaddr *) in_dev->ifa_list;
1517 if (ifa != NULL) { 1520 if (ifa != NULL) {
1518 memcpy(vptr->ip_addr, &ifa->ifa_address, 4); 1521 memcpy(vptr->ip_addr, &ifa->ifa_address, 4);
1519 return 0; 1522 res = 0;
1520 } 1523 }
1521 } 1524 }
1522 return -ENOENT; 1525 rcu_read_unlock();
1526 return res;
1523} 1527}
1524 1528
1525/** 1529/**
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index 198ce92af0c3..3f60e0e3097b 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -1548,23 +1548,6 @@ vmxnet3_free_irqs(struct vmxnet3_adapter *adapter)
1548 } 1548 }
1549} 1549}
1550 1550
1551
1552inline void set_flag_le16(__le16 *data, u16 flag)
1553{
1554 *data = cpu_to_le16(le16_to_cpu(*data) | flag);
1555}
1556
1557inline void set_flag_le64(__le64 *data, u64 flag)
1558{
1559 *data = cpu_to_le64(le64_to_cpu(*data) | flag);
1560}
1561
1562inline void reset_flag_le64(__le64 *data, u64 flag)
1563{
1564 *data = cpu_to_le64(le64_to_cpu(*data) & ~flag);
1565}
1566
1567
1568static void 1551static void
1569vmxnet3_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp) 1552vmxnet3_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
1570{ 1553{
@@ -1634,7 +1617,7 @@ vmxnet3_restore_vlan(struct vmxnet3_adapter *adapter)
1634 u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable; 1617 u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
1635 bool activeVlan = false; 1618 bool activeVlan = false;
1636 1619
1637 for (vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) { 1620 for (vid = 0; vid < VLAN_N_VID; vid++) {
1638 if (vlan_group_get_device(adapter->vlan_grp, vid)) { 1621 if (vlan_group_get_device(adapter->vlan_grp, vid)) {
1639 VMXNET3_SET_VFTABLE_ENTRY(vfTable, vid); 1622 VMXNET3_SET_VFTABLE_ENTRY(vfTable, vid);
1640 activeVlan = true; 1623 activeVlan = true;
diff --git a/drivers/net/vmxnet3/vmxnet3_int.h b/drivers/net/vmxnet3/vmxnet3_int.h
index 2121c735cabd..c88ea5cbba0d 100644
--- a/drivers/net/vmxnet3/vmxnet3_int.h
+++ b/drivers/net/vmxnet3/vmxnet3_int.h
@@ -353,9 +353,20 @@ struct vmxnet3_adapter {
353#define VMXNET3_MAX_ETH_HDR_SIZE 22 353#define VMXNET3_MAX_ETH_HDR_SIZE 22
354#define VMXNET3_MAX_SKB_BUF_SIZE (3*1024) 354#define VMXNET3_MAX_SKB_BUF_SIZE (3*1024)
355 355
356void set_flag_le16(__le16 *data, u16 flag); 356static inline void set_flag_le16(__le16 *data, u16 flag)
357void set_flag_le64(__le64 *data, u64 flag); 357{
358void reset_flag_le64(__le64 *data, u64 flag); 358 *data = cpu_to_le16(le16_to_cpu(*data) | flag);
359}
360
361static inline void set_flag_le64(__le64 *data, u64 flag)
362{
363 *data = cpu_to_le64(le64_to_cpu(*data) | flag);
364}
365
366static inline void reset_flag_le64(__le64 *data, u64 flag)
367{
368 *data = cpu_to_le64(le64_to_cpu(*data) & ~flag);
369}
359 370
360int 371int
361vmxnet3_quiesce_dev(struct vmxnet3_adapter *adapter); 372vmxnet3_quiesce_dev(struct vmxnet3_adapter *adapter);
diff --git a/drivers/net/vxge/vxge-main.c b/drivers/net/vxge/vxge-main.c
index 5378b849f54f..a69542ecb68d 100644
--- a/drivers/net/vxge/vxge-main.c
+++ b/drivers/net/vxge/vxge-main.c
@@ -822,7 +822,7 @@ vxge_xmit(struct sk_buff *skb, struct net_device *dev)
822 dev->name, __func__, __LINE__, 822 dev->name, __func__, __LINE__,
823 fifo_hw, dtr, dtr_priv); 823 fifo_hw, dtr, dtr_priv);
824 824
825 if (vdev->vlgrp && vlan_tx_tag_present(skb)) { 825 if (vlan_tx_tag_present(skb)) {
826 u16 vlan_tag = vlan_tx_tag_get(skb); 826 u16 vlan_tag = vlan_tx_tag_get(skb);
827 vxge_hw_fifo_txdl_vlan_set(dtr, vlan_tag); 827 vxge_hw_fifo_txdl_vlan_set(dtr, vlan_tag);
828 } 828 }
@@ -1862,7 +1862,7 @@ enum vxge_hw_status vxge_restore_vpath_vid_table(struct vxge_vpath *vpath)
1862 1862
1863 if (vdev->vlgrp && vpath->is_open) { 1863 if (vdev->vlgrp && vpath->is_open) {
1864 1864
1865 for (vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) { 1865 for (vid = 0; vid < VLAN_N_VID; vid++) {
1866 if (!vlan_group_get_device(vdev->vlgrp, vid)) 1866 if (!vlan_group_get_device(vdev->vlgrp, vid))
1867 continue; 1867 continue;
1868 /* Add these vlan to the vid table */ 1868 /* Add these vlan to the vid table */
diff --git a/drivers/net/wan/dlci.c b/drivers/net/wan/dlci.c
index 421d0715310e..1481a446fefb 100644
--- a/drivers/net/wan/dlci.c
+++ b/drivers/net/wan/dlci.c
@@ -97,11 +97,11 @@ static int dlci_header(struct sk_buff *skb, struct net_device *dev,
97 97
98 dest = skb_push(skb, hlen); 98 dest = skb_push(skb, hlen);
99 if (!dest) 99 if (!dest)
100 return(0); 100 return 0;
101 101
102 memcpy(dest, &hdr, hlen); 102 memcpy(dest, &hdr, hlen);
103 103
104 return(hlen); 104 return hlen;
105} 105}
106 106
107static void dlci_receive(struct sk_buff *skb, struct net_device *dev) 107static void dlci_receive(struct sk_buff *skb, struct net_device *dev)
@@ -211,14 +211,14 @@ static int dlci_config(struct net_device *dev, struct dlci_conf __user *conf, in
211 if (copy_from_user(&config, conf, sizeof(struct dlci_conf))) 211 if (copy_from_user(&config, conf, sizeof(struct dlci_conf)))
212 return -EFAULT; 212 return -EFAULT;
213 if (config.flags & ~DLCI_VALID_FLAGS) 213 if (config.flags & ~DLCI_VALID_FLAGS)
214 return(-EINVAL); 214 return -EINVAL;
215 memcpy(&dlp->config, &config, sizeof(struct dlci_conf)); 215 memcpy(&dlp->config, &config, sizeof(struct dlci_conf));
216 dlp->configured = 1; 216 dlp->configured = 1;
217 } 217 }
218 218
219 err = (*flp->dlci_conf)(dlp->slave, dev, get); 219 err = (*flp->dlci_conf)(dlp->slave, dev, get);
220 if (err) 220 if (err)
221 return(err); 221 return err;
222 222
223 if (get) 223 if (get)
224 { 224 {
@@ -226,7 +226,7 @@ static int dlci_config(struct net_device *dev, struct dlci_conf __user *conf, in
226 return -EFAULT; 226 return -EFAULT;
227 } 227 }
228 228
229 return(0); 229 return 0;
230} 230}
231 231
232static int dlci_dev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 232static int dlci_dev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
@@ -234,7 +234,7 @@ static int dlci_dev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
234 struct dlci_local *dlp; 234 struct dlci_local *dlp;
235 235
236 if (!capable(CAP_NET_ADMIN)) 236 if (!capable(CAP_NET_ADMIN))
237 return(-EPERM); 237 return -EPERM;
238 238
239 dlp = netdev_priv(dev); 239 dlp = netdev_priv(dev);
240 240
@@ -242,7 +242,7 @@ static int dlci_dev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
242 { 242 {
243 case DLCI_GET_SLAVE: 243 case DLCI_GET_SLAVE:
244 if (!*(short *)(dev->dev_addr)) 244 if (!*(short *)(dev->dev_addr))
245 return(-EINVAL); 245 return -EINVAL;
246 246
247 strncpy(ifr->ifr_slave, dlp->slave->name, sizeof(ifr->ifr_slave)); 247 strncpy(ifr->ifr_slave, dlp->slave->name, sizeof(ifr->ifr_slave));
248 break; 248 break;
@@ -250,15 +250,15 @@ static int dlci_dev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
250 case DLCI_GET_CONF: 250 case DLCI_GET_CONF:
251 case DLCI_SET_CONF: 251 case DLCI_SET_CONF:
252 if (!*(short *)(dev->dev_addr)) 252 if (!*(short *)(dev->dev_addr))
253 return(-EINVAL); 253 return -EINVAL;
254 254
255 return(dlci_config(dev, ifr->ifr_data, cmd == DLCI_GET_CONF)); 255 return dlci_config(dev, ifr->ifr_data, cmd == DLCI_GET_CONF);
256 break; 256 break;
257 257
258 default: 258 default:
259 return(-EOPNOTSUPP); 259 return -EOPNOTSUPP;
260 } 260 }
261 return(0); 261 return 0;
262} 262}
263 263
264static int dlci_change_mtu(struct net_device *dev, int new_mtu) 264static int dlci_change_mtu(struct net_device *dev, int new_mtu)
@@ -277,15 +277,15 @@ static int dlci_open(struct net_device *dev)
277 dlp = netdev_priv(dev); 277 dlp = netdev_priv(dev);
278 278
279 if (!*(short *)(dev->dev_addr)) 279 if (!*(short *)(dev->dev_addr))
280 return(-EINVAL); 280 return -EINVAL;
281 281
282 if (!netif_running(dlp->slave)) 282 if (!netif_running(dlp->slave))
283 return(-ENOTCONN); 283 return -ENOTCONN;
284 284
285 flp = netdev_priv(dlp->slave); 285 flp = netdev_priv(dlp->slave);
286 err = (*flp->activate)(dlp->slave, dev); 286 err = (*flp->activate)(dlp->slave, dev);
287 if (err) 287 if (err)
288 return(err); 288 return err;
289 289
290 netif_start_queue(dev); 290 netif_start_queue(dev);
291 291
@@ -365,14 +365,14 @@ static int dlci_add(struct dlci_add *dlci)
365 list_add(&dlp->list, &dlci_devs); 365 list_add(&dlp->list, &dlci_devs);
366 rtnl_unlock(); 366 rtnl_unlock();
367 367
368 return(0); 368 return 0;
369 369
370 err2: 370 err2:
371 rtnl_unlock(); 371 rtnl_unlock();
372 free_netdev(master); 372 free_netdev(master);
373 err1: 373 err1:
374 dev_put(slave); 374 dev_put(slave);
375 return(err); 375 return err;
376} 376}
377 377
378static int dlci_del(struct dlci_add *dlci) 378static int dlci_del(struct dlci_add *dlci)
@@ -385,10 +385,10 @@ static int dlci_del(struct dlci_add *dlci)
385 /* validate slave device */ 385 /* validate slave device */
386 master = __dev_get_by_name(&init_net, dlci->devname); 386 master = __dev_get_by_name(&init_net, dlci->devname);
387 if (!master) 387 if (!master)
388 return(-ENODEV); 388 return -ENODEV;
389 389
390 if (netif_running(master)) { 390 if (netif_running(master)) {
391 return(-EBUSY); 391 return -EBUSY;
392 } 392 }
393 393
394 dlp = netdev_priv(master); 394 dlp = netdev_priv(master);
@@ -406,7 +406,7 @@ static int dlci_del(struct dlci_add *dlci)
406 } 406 }
407 rtnl_unlock(); 407 rtnl_unlock();
408 408
409 return(err); 409 return err;
410} 410}
411 411
412static int dlci_ioctl(unsigned int cmd, void __user *arg) 412static int dlci_ioctl(unsigned int cmd, void __user *arg)
@@ -415,7 +415,7 @@ static int dlci_ioctl(unsigned int cmd, void __user *arg)
415 int err; 415 int err;
416 416
417 if (!capable(CAP_NET_ADMIN)) 417 if (!capable(CAP_NET_ADMIN))
418 return(-EPERM); 418 return -EPERM;
419 419
420 if (copy_from_user(&add, arg, sizeof(struct dlci_add))) 420 if (copy_from_user(&add, arg, sizeof(struct dlci_add)))
421 return -EFAULT; 421 return -EFAULT;
@@ -438,7 +438,7 @@ static int dlci_ioctl(unsigned int cmd, void __user *arg)
438 err = -EINVAL; 438 err = -EINVAL;
439 } 439 }
440 440
441 return(err); 441 return err;
442} 442}
443 443
444static const struct header_ops dlci_header_ops = { 444static const struct header_ops dlci_header_ops = {
diff --git a/drivers/net/wan/hdlc_cisco.c b/drivers/net/wan/hdlc_cisco.c
index b38ffa149aba..b1e5e5b69c2a 100644
--- a/drivers/net/wan/hdlc_cisco.c
+++ b/drivers/net/wan/hdlc_cisco.c
@@ -191,7 +191,8 @@ static int cisco_rx(struct sk_buff *skb)
191 191
192 switch (ntohl (cisco_data->type)) { 192 switch (ntohl (cisco_data->type)) {
193 case CISCO_ADDR_REQ: /* Stolen from syncppp.c :-) */ 193 case CISCO_ADDR_REQ: /* Stolen from syncppp.c :-) */
194 in_dev = dev->ip_ptr; 194 rcu_read_lock();
195 in_dev = __in_dev_get_rcu(dev);
195 addr = 0; 196 addr = 0;
196 mask = ~cpu_to_be32(0); /* is the mask correct? */ 197 mask = ~cpu_to_be32(0); /* is the mask correct? */
197 198
@@ -211,6 +212,7 @@ static int cisco_rx(struct sk_buff *skb)
211 cisco_keepalive_send(dev, CISCO_ADDR_REPLY, 212 cisco_keepalive_send(dev, CISCO_ADDR_REPLY,
212 addr, mask); 213 addr, mask);
213 } 214 }
215 rcu_read_unlock();
214 dev_kfree_skb_any(skb); 216 dev_kfree_skb_any(skb);
215 return NET_RX_SUCCESS; 217 return NET_RX_SUCCESS;
216 218
diff --git a/drivers/net/wan/lmc/lmc_main.c b/drivers/net/wan/lmc/lmc_main.c
index 43af85b8e45e..70feb84df670 100644
--- a/drivers/net/wan/lmc/lmc_main.c
+++ b/drivers/net/wan/lmc/lmc_main.c
@@ -1022,7 +1022,7 @@ static int lmc_open(struct net_device *dev)
1022 1022
1023 if (sc->lmc_ok){ 1023 if (sc->lmc_ok){
1024 lmc_trace(dev, "lmc_open lmc_ok out"); 1024 lmc_trace(dev, "lmc_open lmc_ok out");
1025 return (0); 1025 return 0;
1026 } 1026 }
1027 1027
1028 lmc_softreset (sc); 1028 lmc_softreset (sc);
@@ -1110,7 +1110,7 @@ static int lmc_open(struct net_device *dev)
1110 1110
1111 lmc_trace(dev, "lmc_open out"); 1111 lmc_trace(dev, "lmc_open out");
1112 1112
1113 return (0); 1113 return 0;
1114} 1114}
1115 1115
1116/* Total reset to compensate for the AdTran DSU doing bad things 1116/* Total reset to compensate for the AdTran DSU doing bad things
diff --git a/drivers/net/wan/n2.c b/drivers/net/wan/n2.c
index 7a3720f09ce3..17d408fe693f 100644
--- a/drivers/net/wan/n2.c
+++ b/drivers/net/wan/n2.c
@@ -379,14 +379,14 @@ static int __init n2_run(unsigned long io, unsigned long irq,
379 if (request_irq(irq, sca_intr, 0, devname, card)) { 379 if (request_irq(irq, sca_intr, 0, devname, card)) {
380 printk(KERN_ERR "n2: could not allocate IRQ\n"); 380 printk(KERN_ERR "n2: could not allocate IRQ\n");
381 n2_destroy_card(card); 381 n2_destroy_card(card);
382 return(-EBUSY); 382 return -EBUSY;
383 } 383 }
384 card->irq = irq; 384 card->irq = irq;
385 385
386 if (!request_mem_region(winbase, USE_WINDOWSIZE, devname)) { 386 if (!request_mem_region(winbase, USE_WINDOWSIZE, devname)) {
387 printk(KERN_ERR "n2: could not request RAM window\n"); 387 printk(KERN_ERR "n2: could not request RAM window\n");
388 n2_destroy_card(card); 388 n2_destroy_card(card);
389 return(-EBUSY); 389 return -EBUSY;
390 } 390 }
391 card->phy_winbase = winbase; 391 card->phy_winbase = winbase;
392 card->winbase = ioremap(winbase, USE_WINDOWSIZE); 392 card->winbase = ioremap(winbase, USE_WINDOWSIZE);
diff --git a/drivers/net/wan/pc300_drv.c b/drivers/net/wan/pc300_drv.c
index fbf1175a07f1..f875cfae3093 100644
--- a/drivers/net/wan/pc300_drv.c
+++ b/drivers/net/wan/pc300_drv.c
@@ -451,11 +451,11 @@ static int dma_get_rx_frame_size(pc300_t * card, int ch)
451 if ((status & DST_EOM) || (first_bd == card->chan[ch].rx_last_bd)) { 451 if ((status & DST_EOM) || (first_bd == card->chan[ch].rx_last_bd)) {
452 /* Return the size of a good frame or incomplete bad frame 452 /* Return the size of a good frame or incomplete bad frame
453 * (dma_buf_read will clean the buffer descriptors in this case). */ 453 * (dma_buf_read will clean the buffer descriptors in this case). */
454 return (rcvd); 454 return rcvd;
455 } 455 }
456 ptdescr = (card->hw.rambase + cpc_readl(&ptdescr->next)); 456 ptdescr = (card->hw.rambase + cpc_readl(&ptdescr->next));
457 } 457 }
458 return (-1); 458 return -1;
459} 459}
460 460
461/* 461/*
@@ -557,7 +557,7 @@ static int dma_buf_read(pc300_t * card, int ch, struct sk_buff *skb)
557 cpc_writel(card->hw.scabase + DRX_REG(EDAL, ch), 557 cpc_writel(card->hw.scabase + DRX_REG(EDAL, ch),
558 RX_BD_ADDR(ch, chan->rx_last_bd)); 558 RX_BD_ADDR(ch, chan->rx_last_bd));
559 } 559 }
560 return (rcvd); 560 return rcvd;
561} 561}
562 562
563static void tx_dma_stop(pc300_t * card, int ch) 563static void tx_dma_stop(pc300_t * card, int ch)
@@ -1733,7 +1733,7 @@ static u16 falc_pattern_test_error(pc300_t * card, int ch)
1733 pc300ch_t *chan = (pc300ch_t *) & card->chan[ch]; 1733 pc300ch_t *chan = (pc300ch_t *) & card->chan[ch];
1734 falc_t *pfalc = (falc_t *) & chan->falc; 1734 falc_t *pfalc = (falc_t *) & chan->falc;
1735 1735
1736 return (pfalc->bec); 1736 return pfalc->bec;
1737} 1737}
1738 1738
1739/**********************************/ 1739/**********************************/
@@ -2819,7 +2819,7 @@ static int clock_rate_calc(u32 rate, u32 clock, int *br_io)
2819 *br_io = 0; 2819 *br_io = 0;
2820 2820
2821 if (rate == 0) 2821 if (rate == 0)
2822 return (0); 2822 return 0;
2823 2823
2824 for (br = 0, br_pwr = 1; br <= 9; br++, br_pwr <<= 1) { 2824 for (br = 0, br_pwr = 1; br <= 9; br++, br_pwr <<= 1) {
2825 if ((tc = clock / br_pwr / rate) <= 0xff) { 2825 if ((tc = clock / br_pwr / rate) <= 0xff) {
@@ -2832,11 +2832,11 @@ static int clock_rate_calc(u32 rate, u32 clock, int *br_io)
2832 error = ((rate - (clock / br_pwr / rate)) / rate) * 1000; 2832 error = ((rate - (clock / br_pwr / rate)) / rate) * 1000;
2833 /* Errors bigger than +/- 1% won't be tolerated */ 2833 /* Errors bigger than +/- 1% won't be tolerated */
2834 if (error < -10 || error > 10) 2834 if (error < -10 || error > 10)
2835 return (-1); 2835 return -1;
2836 else 2836 else
2837 return (tc); 2837 return tc;
2838 } else { 2838 } else {
2839 return (-1); 2839 return -1;
2840 } 2840 }
2841} 2841}
2842 2842
@@ -3207,7 +3207,7 @@ static u32 detect_ram(pc300_t * card)
3207 break; 3207 break;
3208 } 3208 }
3209 } 3209 }
3210 return (i); 3210 return i;
3211} 3211}
3212 3212
3213static void plx_init(pc300_t * card) 3213static void plx_init(pc300_t * card)
diff --git a/drivers/net/wan/pc300_tty.c b/drivers/net/wan/pc300_tty.c
index 4293889e287e..515d9b8af01e 100644
--- a/drivers/net/wan/pc300_tty.c
+++ b/drivers/net/wan/pc300_tty.c
@@ -540,7 +540,7 @@ static int cpc_tty_chars_in_buffer(struct tty_struct *tty)
540 return -ENODEV; 540 return -ENODEV;
541 } 541 }
542 542
543 return(0); 543 return 0;
544} 544}
545 545
546static int pc300_tiocmset(struct tty_struct *tty, struct file *file, 546static int pc300_tiocmset(struct tty_struct *tty, struct file *file,
diff --git a/drivers/net/wan/sdla.c b/drivers/net/wan/sdla.c
index f4125da2762f..3f4e2b5684db 100644
--- a/drivers/net/wan/sdla.c
+++ b/drivers/net/wan/sdla.c
@@ -178,7 +178,7 @@ static char sdla_byte(struct net_device *dev, int addr)
178 byte = *temp; 178 byte = *temp;
179 spin_unlock_irqrestore(&sdla_lock, flags); 179 spin_unlock_irqrestore(&sdla_lock, flags);
180 180
181 return(byte); 181 return byte;
182} 182}
183 183
184static void sdla_stop(struct net_device *dev) 184static void sdla_stop(struct net_device *dev)
@@ -267,7 +267,7 @@ static int sdla_z80_poll(struct net_device *dev, int z80_addr, int jiffs, char r
267 resp = *temp; 267 resp = *temp;
268 } 268 }
269 } 269 }
270 return(time_before(jiffies, done) ? jiffies - start : -1); 270 return time_before(jiffies, done) ? jiffies - start : -1;
271} 271}
272 272
273/* constants for Z80 CPU speed */ 273/* constants for Z80 CPU speed */
@@ -283,13 +283,13 @@ static int sdla_cpuspeed(struct net_device *dev, struct ifreq *ifr)
283 283
284 sdla_start(dev); 284 sdla_start(dev);
285 if (sdla_z80_poll(dev, 0, 3*HZ, Z80_READY, 0) < 0) 285 if (sdla_z80_poll(dev, 0, 3*HZ, Z80_READY, 0) < 0)
286 return(-EIO); 286 return -EIO;
287 287
288 data = LOADER_READY; 288 data = LOADER_READY;
289 sdla_write(dev, 0, &data, 1); 289 sdla_write(dev, 0, &data, 1);
290 290
291 if ((jiffs = sdla_z80_poll(dev, 0, 8*HZ, Z80_SCC_OK, Z80_SCC_BAD)) < 0) 291 if ((jiffs = sdla_z80_poll(dev, 0, 8*HZ, Z80_SCC_OK, Z80_SCC_BAD)) < 0)
292 return(-EIO); 292 return -EIO;
293 293
294 sdla_stop(dev); 294 sdla_stop(dev);
295 sdla_read(dev, 0, &data, 1); 295 sdla_read(dev, 0, &data, 1);
@@ -297,11 +297,11 @@ static int sdla_cpuspeed(struct net_device *dev, struct ifreq *ifr)
297 if (data == Z80_SCC_BAD) 297 if (data == Z80_SCC_BAD)
298 { 298 {
299 printk("%s: SCC bad\n", dev->name); 299 printk("%s: SCC bad\n", dev->name);
300 return(-EIO); 300 return -EIO;
301 } 301 }
302 302
303 if (data != Z80_SCC_OK) 303 if (data != Z80_SCC_OK)
304 return(-EINVAL); 304 return -EINVAL;
305 305
306 if (jiffs < 165) 306 if (jiffs < 165)
307 ifr->ifr_mtu = SDLA_CPU_16M; 307 ifr->ifr_mtu = SDLA_CPU_16M;
@@ -316,7 +316,7 @@ static int sdla_cpuspeed(struct net_device *dev, struct ifreq *ifr)
316 else 316 else
317 ifr->ifr_mtu = SDLA_CPU_3M; 317 ifr->ifr_mtu = SDLA_CPU_3M;
318 318
319 return(0); 319 return 0;
320} 320}
321 321
322/************************************************ 322/************************************************
@@ -493,7 +493,7 @@ static int sdla_cmd(struct net_device *dev, int cmd, short dlci, short flags,
493 if (ret != SDLA_RET_OK) 493 if (ret != SDLA_RET_OK)
494 sdla_errors(dev, cmd, dlci, ret, len, &status); 494 sdla_errors(dev, cmd, dlci, ret, len, &status);
495 495
496 return(ret); 496 return ret;
497} 497}
498 498
499/*********************************************** 499/***********************************************
@@ -516,14 +516,14 @@ static int sdla_activate(struct net_device *slave, struct net_device *master)
516 break; 516 break;
517 517
518 if (i == CONFIG_DLCI_MAX) 518 if (i == CONFIG_DLCI_MAX)
519 return(-ENODEV); 519 return -ENODEV;
520 520
521 flp->dlci[i] = abs(flp->dlci[i]); 521 flp->dlci[i] = abs(flp->dlci[i]);
522 522
523 if (netif_running(slave) && (flp->config.station == FRAD_STATION_NODE)) 523 if (netif_running(slave) && (flp->config.station == FRAD_STATION_NODE))
524 sdla_cmd(slave, SDLA_ACTIVATE_DLCI, 0, 0, &flp->dlci[i], sizeof(short), NULL, NULL); 524 sdla_cmd(slave, SDLA_ACTIVATE_DLCI, 0, 0, &flp->dlci[i], sizeof(short), NULL, NULL);
525 525
526 return(0); 526 return 0;
527} 527}
528 528
529static int sdla_deactivate(struct net_device *slave, struct net_device *master) 529static int sdla_deactivate(struct net_device *slave, struct net_device *master)
@@ -538,14 +538,14 @@ static int sdla_deactivate(struct net_device *slave, struct net_device *master)
538 break; 538 break;
539 539
540 if (i == CONFIG_DLCI_MAX) 540 if (i == CONFIG_DLCI_MAX)
541 return(-ENODEV); 541 return -ENODEV;
542 542
543 flp->dlci[i] = -abs(flp->dlci[i]); 543 flp->dlci[i] = -abs(flp->dlci[i]);
544 544
545 if (netif_running(slave) && (flp->config.station == FRAD_STATION_NODE)) 545 if (netif_running(slave) && (flp->config.station == FRAD_STATION_NODE))
546 sdla_cmd(slave, SDLA_DEACTIVATE_DLCI, 0, 0, &flp->dlci[i], sizeof(short), NULL, NULL); 546 sdla_cmd(slave, SDLA_DEACTIVATE_DLCI, 0, 0, &flp->dlci[i], sizeof(short), NULL, NULL);
547 547
548 return(0); 548 return 0;
549} 549}
550 550
551static int sdla_assoc(struct net_device *slave, struct net_device *master) 551static int sdla_assoc(struct net_device *slave, struct net_device *master)
@@ -554,7 +554,7 @@ static int sdla_assoc(struct net_device *slave, struct net_device *master)
554 int i; 554 int i;
555 555
556 if (master->type != ARPHRD_DLCI) 556 if (master->type != ARPHRD_DLCI)
557 return(-EINVAL); 557 return -EINVAL;
558 558
559 flp = netdev_priv(slave); 559 flp = netdev_priv(slave);
560 560
@@ -563,11 +563,11 @@ static int sdla_assoc(struct net_device *slave, struct net_device *master)
563 if (!flp->master[i]) 563 if (!flp->master[i])
564 break; 564 break;
565 if (abs(flp->dlci[i]) == *(short *)(master->dev_addr)) 565 if (abs(flp->dlci[i]) == *(short *)(master->dev_addr))
566 return(-EADDRINUSE); 566 return -EADDRINUSE;
567 } 567 }
568 568
569 if (i == CONFIG_DLCI_MAX) 569 if (i == CONFIG_DLCI_MAX)
570 return(-EMLINK); /* #### Alan: Comments on this ?? */ 570 return -EMLINK; /* #### Alan: Comments on this ?? */
571 571
572 572
573 flp->master[i] = master; 573 flp->master[i] = master;
@@ -581,7 +581,7 @@ static int sdla_assoc(struct net_device *slave, struct net_device *master)
581 sdla_cmd(slave, SDLA_ADD_DLCI, 0, 0, master->dev_addr, sizeof(short), NULL, NULL); 581 sdla_cmd(slave, SDLA_ADD_DLCI, 0, 0, master->dev_addr, sizeof(short), NULL, NULL);
582 } 582 }
583 583
584 return(0); 584 return 0;
585} 585}
586 586
587static int sdla_deassoc(struct net_device *slave, struct net_device *master) 587static int sdla_deassoc(struct net_device *slave, struct net_device *master)
@@ -596,7 +596,7 @@ static int sdla_deassoc(struct net_device *slave, struct net_device *master)
596 break; 596 break;
597 597
598 if (i == CONFIG_DLCI_MAX) 598 if (i == CONFIG_DLCI_MAX)
599 return(-ENODEV); 599 return -ENODEV;
600 600
601 flp->master[i] = NULL; 601 flp->master[i] = NULL;
602 flp->dlci[i] = 0; 602 flp->dlci[i] = 0;
@@ -609,7 +609,7 @@ static int sdla_deassoc(struct net_device *slave, struct net_device *master)
609 sdla_cmd(slave, SDLA_DELETE_DLCI, 0, 0, master->dev_addr, sizeof(short), NULL, NULL); 609 sdla_cmd(slave, SDLA_DELETE_DLCI, 0, 0, master->dev_addr, sizeof(short), NULL, NULL);
610 } 610 }
611 611
612 return(0); 612 return 0;
613} 613}
614 614
615static int sdla_dlci_conf(struct net_device *slave, struct net_device *master, int get) 615static int sdla_dlci_conf(struct net_device *slave, struct net_device *master, int get)
@@ -626,7 +626,7 @@ static int sdla_dlci_conf(struct net_device *slave, struct net_device *master, i
626 break; 626 break;
627 627
628 if (i == CONFIG_DLCI_MAX) 628 if (i == CONFIG_DLCI_MAX)
629 return(-ENODEV); 629 return -ENODEV;
630 630
631 dlp = netdev_priv(master); 631 dlp = netdev_priv(master);
632 632
@@ -641,7 +641,7 @@ static int sdla_dlci_conf(struct net_device *slave, struct net_device *master, i
641 &dlp->config, sizeof(struct dlci_conf) - 4 * sizeof(short), NULL, NULL); 641 &dlp->config, sizeof(struct dlci_conf) - 4 * sizeof(short), NULL, NULL);
642 } 642 }
643 643
644 return(ret == SDLA_RET_OK ? 0 : -EIO); 644 return ret == SDLA_RET_OK ? 0 : -EIO;
645} 645}
646 646
647/************************** 647/**************************
@@ -986,7 +986,7 @@ static int sdla_close(struct net_device *dev)
986 986
987 netif_stop_queue(dev); 987 netif_stop_queue(dev);
988 988
989 return(0); 989 return 0;
990} 990}
991 991
992struct conf_data { 992struct conf_data {
@@ -1006,10 +1006,10 @@ static int sdla_open(struct net_device *dev)
1006 flp = netdev_priv(dev); 1006 flp = netdev_priv(dev);
1007 1007
1008 if (!flp->initialized) 1008 if (!flp->initialized)
1009 return(-EPERM); 1009 return -EPERM;
1010 1010
1011 if (!flp->configured) 1011 if (!flp->configured)
1012 return(-EPERM); 1012 return -EPERM;
1013 1013
1014 /* time to send in the configuration */ 1014 /* time to send in the configuration */
1015 len = 0; 1015 len = 0;
@@ -1087,7 +1087,7 @@ static int sdla_open(struct net_device *dev)
1087 1087
1088 netif_start_queue(dev); 1088 netif_start_queue(dev);
1089 1089
1090 return(0); 1090 return 0;
1091} 1091}
1092 1092
1093static int sdla_config(struct net_device *dev, struct frad_conf __user *conf, int get) 1093static int sdla_config(struct net_device *dev, struct frad_conf __user *conf, int get)
@@ -1098,48 +1098,48 @@ static int sdla_config(struct net_device *dev, struct frad_conf __user *conf, in
1098 short size; 1098 short size;
1099 1099
1100 if (dev->type == 0xFFFF) 1100 if (dev->type == 0xFFFF)
1101 return(-EUNATCH); 1101 return -EUNATCH;
1102 1102
1103 flp = netdev_priv(dev); 1103 flp = netdev_priv(dev);
1104 1104
1105 if (!get) 1105 if (!get)
1106 { 1106 {
1107 if (netif_running(dev)) 1107 if (netif_running(dev))
1108 return(-EBUSY); 1108 return -EBUSY;
1109 1109
1110 if(copy_from_user(&data.config, conf, sizeof(struct frad_conf))) 1110 if(copy_from_user(&data.config, conf, sizeof(struct frad_conf)))
1111 return -EFAULT; 1111 return -EFAULT;
1112 1112
1113 if (data.config.station & ~FRAD_STATION_NODE) 1113 if (data.config.station & ~FRAD_STATION_NODE)
1114 return(-EINVAL); 1114 return -EINVAL;
1115 1115
1116 if (data.config.flags & ~FRAD_VALID_FLAGS) 1116 if (data.config.flags & ~FRAD_VALID_FLAGS)
1117 return(-EINVAL); 1117 return -EINVAL;
1118 1118
1119 if ((data.config.kbaud < 0) || 1119 if ((data.config.kbaud < 0) ||
1120 ((data.config.kbaud > 128) && (flp->type != SDLA_S508))) 1120 ((data.config.kbaud > 128) && (flp->type != SDLA_S508)))
1121 return(-EINVAL); 1121 return -EINVAL;
1122 1122
1123 if (data.config.clocking & ~(FRAD_CLOCK_INT | SDLA_S508_PORT_RS232)) 1123 if (data.config.clocking & ~(FRAD_CLOCK_INT | SDLA_S508_PORT_RS232))
1124 return(-EINVAL); 1124 return -EINVAL;
1125 1125
1126 if ((data.config.mtu < 0) || (data.config.mtu > SDLA_MAX_MTU)) 1126 if ((data.config.mtu < 0) || (data.config.mtu > SDLA_MAX_MTU))
1127 return(-EINVAL); 1127 return -EINVAL;
1128 1128
1129 if ((data.config.T391 < 5) || (data.config.T391 > 30)) 1129 if ((data.config.T391 < 5) || (data.config.T391 > 30))
1130 return(-EINVAL); 1130 return -EINVAL;
1131 1131
1132 if ((data.config.T392 < 5) || (data.config.T392 > 30)) 1132 if ((data.config.T392 < 5) || (data.config.T392 > 30))
1133 return(-EINVAL); 1133 return -EINVAL;
1134 1134
1135 if ((data.config.N391 < 1) || (data.config.N391 > 255)) 1135 if ((data.config.N391 < 1) || (data.config.N391 > 255))
1136 return(-EINVAL); 1136 return -EINVAL;
1137 1137
1138 if ((data.config.N392 < 1) || (data.config.N392 > 10)) 1138 if ((data.config.N392 < 1) || (data.config.N392 > 10))
1139 return(-EINVAL); 1139 return -EINVAL;
1140 1140
1141 if ((data.config.N393 < 1) || (data.config.N393 > 10)) 1141 if ((data.config.N393 < 1) || (data.config.N393 > 10))
1142 return(-EINVAL); 1142 return -EINVAL;
1143 1143
1144 memcpy(&flp->config, &data.config, sizeof(struct frad_conf)); 1144 memcpy(&flp->config, &data.config, sizeof(struct frad_conf));
1145 flp->config.flags |= SDLA_DIRECT_RECV; 1145 flp->config.flags |= SDLA_DIRECT_RECV;
@@ -1171,7 +1171,7 @@ static int sdla_config(struct net_device *dev, struct frad_conf __user *conf, in
1171 { 1171 {
1172 size = sizeof(data); 1172 size = sizeof(data);
1173 if (sdla_cmd(dev, SDLA_READ_DLCI_CONFIGURATION, 0, 0, NULL, 0, &data, &size) != SDLA_RET_OK) 1173 if (sdla_cmd(dev, SDLA_READ_DLCI_CONFIGURATION, 0, 0, NULL, 0, &data, &size) != SDLA_RET_OK)
1174 return(-EIO); 1174 return -EIO;
1175 } 1175 }
1176 else 1176 else
1177 if (flp->configured) 1177 if (flp->configured)
@@ -1185,7 +1185,7 @@ static int sdla_config(struct net_device *dev, struct frad_conf __user *conf, in
1185 return copy_to_user(conf, &data.config, sizeof(struct frad_conf))?-EFAULT:0; 1185 return copy_to_user(conf, &data.config, sizeof(struct frad_conf))?-EFAULT:0;
1186 } 1186 }
1187 1187
1188 return(0); 1188 return 0;
1189} 1189}
1190 1190
1191static int sdla_xfer(struct net_device *dev, struct sdla_mem __user *info, int read) 1191static int sdla_xfer(struct net_device *dev, struct sdla_mem __user *info, int read)
@@ -1200,7 +1200,7 @@ static int sdla_xfer(struct net_device *dev, struct sdla_mem __user *info, int r
1200 { 1200 {
1201 temp = kzalloc(mem.len, GFP_KERNEL); 1201 temp = kzalloc(mem.len, GFP_KERNEL);
1202 if (!temp) 1202 if (!temp)
1203 return(-ENOMEM); 1203 return -ENOMEM;
1204 sdla_read(dev, mem.addr, temp, mem.len); 1204 sdla_read(dev, mem.addr, temp, mem.len);
1205 if(copy_to_user(mem.data, temp, mem.len)) 1205 if(copy_to_user(mem.data, temp, mem.len))
1206 { 1206 {
@@ -1217,7 +1217,7 @@ static int sdla_xfer(struct net_device *dev, struct sdla_mem __user *info, int r
1217 sdla_write(dev, mem.addr, temp, mem.len); 1217 sdla_write(dev, mem.addr, temp, mem.len);
1218 kfree(temp); 1218 kfree(temp);
1219 } 1219 }
1220 return(0); 1220 return 0;
1221} 1221}
1222 1222
1223static int sdla_reconfig(struct net_device *dev) 1223static int sdla_reconfig(struct net_device *dev)
@@ -1241,7 +1241,7 @@ static int sdla_reconfig(struct net_device *dev)
1241 sdla_cmd(dev, SDLA_SET_DLCI_CONFIGURATION, 0, 0, &data, len, NULL, NULL); 1241 sdla_cmd(dev, SDLA_SET_DLCI_CONFIGURATION, 0, 0, &data, len, NULL, NULL);
1242 sdla_cmd(dev, SDLA_ENABLE_COMMUNICATIONS, 0, 0, NULL, 0, NULL, NULL); 1242 sdla_cmd(dev, SDLA_ENABLE_COMMUNICATIONS, 0, 0, NULL, 0, NULL, NULL);
1243 1243
1244 return(0); 1244 return 0;
1245} 1245}
1246 1246
1247static int sdla_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 1247static int sdla_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
@@ -1254,20 +1254,20 @@ static int sdla_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1254 flp = netdev_priv(dev); 1254 flp = netdev_priv(dev);
1255 1255
1256 if (!flp->initialized) 1256 if (!flp->initialized)
1257 return(-EINVAL); 1257 return -EINVAL;
1258 1258
1259 switch (cmd) 1259 switch (cmd)
1260 { 1260 {
1261 case FRAD_GET_CONF: 1261 case FRAD_GET_CONF:
1262 case FRAD_SET_CONF: 1262 case FRAD_SET_CONF:
1263 return(sdla_config(dev, ifr->ifr_data, cmd == FRAD_GET_CONF)); 1263 return sdla_config(dev, ifr->ifr_data, cmd == FRAD_GET_CONF);
1264 1264
1265 case SDLA_IDENTIFY: 1265 case SDLA_IDENTIFY:
1266 ifr->ifr_flags = flp->type; 1266 ifr->ifr_flags = flp->type;
1267 break; 1267 break;
1268 1268
1269 case SDLA_CPUSPEED: 1269 case SDLA_CPUSPEED:
1270 return(sdla_cpuspeed(dev, ifr)); 1270 return sdla_cpuspeed(dev, ifr);
1271 1271
1272/* ========================================================== 1272/* ==========================================================
1273NOTE: This is rather a useless action right now, as the 1273NOTE: This is rather a useless action right now, as the
@@ -1277,7 +1277,7 @@ NOTE: This is rather a useless action right now, as the
1277============================================================*/ 1277============================================================*/
1278 case SDLA_PROTOCOL: 1278 case SDLA_PROTOCOL:
1279 if (flp->configured) 1279 if (flp->configured)
1280 return(-EALREADY); 1280 return -EALREADY;
1281 1281
1282 switch (ifr->ifr_flags) 1282 switch (ifr->ifr_flags)
1283 { 1283 {
@@ -1285,7 +1285,7 @@ NOTE: This is rather a useless action right now, as the
1285 dev->type = ifr->ifr_flags; 1285 dev->type = ifr->ifr_flags;
1286 break; 1286 break;
1287 default: 1287 default:
1288 return(-ENOPROTOOPT); 1288 return -ENOPROTOOPT;
1289 } 1289 }
1290 break; 1290 break;
1291 1291
@@ -1297,7 +1297,7 @@ NOTE: This is rather a useless action right now, as the
1297 case SDLA_READMEM: 1297 case SDLA_READMEM:
1298 if(!capable(CAP_SYS_RAWIO)) 1298 if(!capable(CAP_SYS_RAWIO))
1299 return -EPERM; 1299 return -EPERM;
1300 return(sdla_xfer(dev, ifr->ifr_data, cmd == SDLA_READMEM)); 1300 return sdla_xfer(dev, ifr->ifr_data, cmd == SDLA_READMEM);
1301 1301
1302 case SDLA_START: 1302 case SDLA_START:
1303 sdla_start(dev); 1303 sdla_start(dev);
@@ -1308,9 +1308,9 @@ NOTE: This is rather a useless action right now, as the
1308 break; 1308 break;
1309 1309
1310 default: 1310 default:
1311 return(-EOPNOTSUPP); 1311 return -EOPNOTSUPP;
1312 } 1312 }
1313 return(0); 1313 return 0;
1314} 1314}
1315 1315
1316static int sdla_change_mtu(struct net_device *dev, int new_mtu) 1316static int sdla_change_mtu(struct net_device *dev, int new_mtu)
@@ -1320,10 +1320,10 @@ static int sdla_change_mtu(struct net_device *dev, int new_mtu)
1320 flp = netdev_priv(dev); 1320 flp = netdev_priv(dev);
1321 1321
1322 if (netif_running(dev)) 1322 if (netif_running(dev))
1323 return(-EBUSY); 1323 return -EBUSY;
1324 1324
1325 /* for now, you can't change the MTU! */ 1325 /* for now, you can't change the MTU! */
1326 return(-EOPNOTSUPP); 1326 return -EOPNOTSUPP;
1327} 1327}
1328 1328
1329static int sdla_set_config(struct net_device *dev, struct ifmap *map) 1329static int sdla_set_config(struct net_device *dev, struct ifmap *map)
@@ -1337,18 +1337,18 @@ static int sdla_set_config(struct net_device *dev, struct ifmap *map)
1337 flp = netdev_priv(dev); 1337 flp = netdev_priv(dev);
1338 1338
1339 if (flp->initialized) 1339 if (flp->initialized)
1340 return(-EINVAL); 1340 return -EINVAL;
1341 1341
1342 for(i=0; i < ARRAY_SIZE(valid_port); i++) 1342 for(i=0; i < ARRAY_SIZE(valid_port); i++)
1343 if (valid_port[i] == map->base_addr) 1343 if (valid_port[i] == map->base_addr)
1344 break; 1344 break;
1345 1345
1346 if (i == ARRAY_SIZE(valid_port)) 1346 if (i == ARRAY_SIZE(valid_port))
1347 return(-EINVAL); 1347 return -EINVAL;
1348 1348
1349 if (!request_region(map->base_addr, SDLA_IO_EXTENTS, dev->name)){ 1349 if (!request_region(map->base_addr, SDLA_IO_EXTENTS, dev->name)){
1350 printk(KERN_WARNING "SDLA: io-port 0x%04lx in use\n", dev->base_addr); 1350 printk(KERN_WARNING "SDLA: io-port 0x%04lx in use\n", dev->base_addr);
1351 return(-EINVAL); 1351 return -EINVAL;
1352 } 1352 }
1353 base = map->base_addr; 1353 base = map->base_addr;
1354 1354
diff --git a/drivers/net/wan/x25_asy.c b/drivers/net/wan/x25_asy.c
index e47f5a986b1c..d81ad8397885 100644
--- a/drivers/net/wan/x25_asy.c
+++ b/drivers/net/wan/x25_asy.c
@@ -648,7 +648,7 @@ static int x25_asy_esc(unsigned char *s, unsigned char *d, int len)
648 } 648 }
649 } 649 }
650 *ptr++ = X25_END; 650 *ptr++ = X25_END;
651 return (ptr - d); 651 return ptr - d;
652} 652}
653 653
654static void x25_asy_unesc(struct x25_asy *sl, unsigned char s) 654static void x25_asy_unesc(struct x25_asy *sl, unsigned char s)
diff --git a/drivers/net/wimax/i2400m/control.c b/drivers/net/wimax/i2400m/control.c
index 9fb03082153a..12b84ed0e38a 100644
--- a/drivers/net/wimax/i2400m/control.c
+++ b/drivers/net/wimax/i2400m/control.c
@@ -98,7 +98,7 @@ MODULE_PARM_DESC(power_save_disabled,
98 "False by default (so the device is told to do power " 98 "False by default (so the device is told to do power "
99 "saving)."); 99 "saving).");
100 100
101int i2400m_passive_mode; /* 0 (passive mode disabled) by default */ 101static int i2400m_passive_mode; /* 0 (passive mode disabled) by default */
102module_param_named(passive_mode, i2400m_passive_mode, int, 0644); 102module_param_named(passive_mode, i2400m_passive_mode, int, 0644);
103MODULE_PARM_DESC(passive_mode, 103MODULE_PARM_DESC(passive_mode,
104 "If true, the driver will not do any device setup " 104 "If true, the driver will not do any device setup "
@@ -558,8 +558,9 @@ void i2400m_report_hook(struct i2400m *i2400m,
558 * processing should be done in the function that calls the 558 * processing should be done in the function that calls the
559 * command. This is here for some cases where it can't happen... 559 * command. This is here for some cases where it can't happen...
560 */ 560 */
561void i2400m_msg_ack_hook(struct i2400m *i2400m, 561static void i2400m_msg_ack_hook(struct i2400m *i2400m,
562 const struct i2400m_l3l4_hdr *l3l4_hdr, size_t size) 562 const struct i2400m_l3l4_hdr *l3l4_hdr,
563 size_t size)
563{ 564{
564 int result; 565 int result;
565 struct device *dev = i2400m_dev(i2400m); 566 struct device *dev = i2400m_dev(i2400m);
@@ -1135,7 +1136,7 @@ error_alloc:
1135 * i2400m_report_state_hook() to parse the answer. This will set the 1136 * i2400m_report_state_hook() to parse the answer. This will set the
1136 * carrier state, as well as the RF Kill switches state. 1137 * carrier state, as well as the RF Kill switches state.
1137 */ 1138 */
1138int i2400m_cmd_get_state(struct i2400m *i2400m) 1139static int i2400m_cmd_get_state(struct i2400m *i2400m)
1139{ 1140{
1140 int result; 1141 int result;
1141 struct device *dev = i2400m_dev(i2400m); 1142 struct device *dev = i2400m_dev(i2400m);
@@ -1177,8 +1178,6 @@ error_msg_to_dev:
1177error_alloc: 1178error_alloc:
1178 return result; 1179 return result;
1179} 1180}
1180EXPORT_SYMBOL_GPL(i2400m_cmd_get_state);
1181
1182 1181
1183/** 1182/**
1184 * Set basic configuration settings 1183 * Set basic configuration settings
@@ -1190,8 +1189,9 @@ EXPORT_SYMBOL_GPL(i2400m_cmd_get_state);
1190 * right endianess (LE). 1189 * right endianess (LE).
1191 * @arg_size: number of pointers in the @args array 1190 * @arg_size: number of pointers in the @args array
1192 */ 1191 */
1193int i2400m_set_init_config(struct i2400m *i2400m, 1192static int i2400m_set_init_config(struct i2400m *i2400m,
1194 const struct i2400m_tlv_hdr **arg, size_t args) 1193 const struct i2400m_tlv_hdr **arg,
1194 size_t args)
1195{ 1195{
1196 int result; 1196 int result;
1197 struct device *dev = i2400m_dev(i2400m); 1197 struct device *dev = i2400m_dev(i2400m);
@@ -1258,8 +1258,6 @@ none:
1258 return result; 1258 return result;
1259 1259
1260} 1260}
1261EXPORT_SYMBOL_GPL(i2400m_set_init_config);
1262
1263 1261
1264/** 1262/**
1265 * i2400m_set_idle_timeout - Set the device's idle mode timeout 1263 * i2400m_set_idle_timeout - Set the device's idle mode timeout
diff --git a/drivers/net/wimax/i2400m/driver.c b/drivers/net/wimax/i2400m/driver.c
index 9c8b78d4abd2..cdedab46ba21 100644
--- a/drivers/net/wimax/i2400m/driver.c
+++ b/drivers/net/wimax/i2400m/driver.c
@@ -122,7 +122,7 @@ struct i2400m_work *__i2400m_work_setup(
122 * works struct was already queued, but we have just allocated it, so 122 * works struct was already queued, but we have just allocated it, so
123 * it should not happen. 123 * it should not happen.
124 */ 124 */
125int i2400m_schedule_work(struct i2400m *i2400m, 125static int i2400m_schedule_work(struct i2400m *i2400m,
126 void (*fn)(struct work_struct *), gfp_t gfp_flags, 126 void (*fn)(struct work_struct *), gfp_t gfp_flags,
127 const void *pl, size_t pl_size) 127 const void *pl, size_t pl_size)
128{ 128{
diff --git a/drivers/net/wimax/i2400m/i2400m-sdio.h b/drivers/net/wimax/i2400m/i2400m-sdio.h
index 360d4fb195f4..1d63ffdedfde 100644
--- a/drivers/net/wimax/i2400m/i2400m-sdio.h
+++ b/drivers/net/wimax/i2400m/i2400m-sdio.h
@@ -140,7 +140,6 @@ void i2400ms_init(struct i2400ms *i2400ms)
140 140
141extern int i2400ms_rx_setup(struct i2400ms *); 141extern int i2400ms_rx_setup(struct i2400ms *);
142extern void i2400ms_rx_release(struct i2400ms *); 142extern void i2400ms_rx_release(struct i2400ms *);
143extern ssize_t __i2400ms_rx_get_size(struct i2400ms *);
144 143
145extern int i2400ms_tx_setup(struct i2400ms *); 144extern int i2400ms_tx_setup(struct i2400ms *);
146extern void i2400ms_tx_release(struct i2400ms *); 145extern void i2400ms_tx_release(struct i2400ms *);
diff --git a/drivers/net/wimax/i2400m/i2400m.h b/drivers/net/wimax/i2400m/i2400m.h
index fa74777fd65f..59ac7705e76e 100644
--- a/drivers/net/wimax/i2400m/i2400m.h
+++ b/drivers/net/wimax/i2400m/i2400m.h
@@ -910,28 +910,19 @@ struct i2400m_work {
910 u8 pl[0]; 910 u8 pl[0];
911}; 911};
912 912
913extern int i2400m_schedule_work(struct i2400m *,
914 void (*)(struct work_struct *), gfp_t,
915 const void *, size_t);
916
917extern int i2400m_msg_check_status(const struct i2400m_l3l4_hdr *, 913extern int i2400m_msg_check_status(const struct i2400m_l3l4_hdr *,
918 char *, size_t); 914 char *, size_t);
919extern int i2400m_msg_size_check(struct i2400m *, 915extern int i2400m_msg_size_check(struct i2400m *,
920 const struct i2400m_l3l4_hdr *, size_t); 916 const struct i2400m_l3l4_hdr *, size_t);
921extern struct sk_buff *i2400m_msg_to_dev(struct i2400m *, const void *, size_t); 917extern struct sk_buff *i2400m_msg_to_dev(struct i2400m *, const void *, size_t);
922extern void i2400m_msg_to_dev_cancel_wait(struct i2400m *, int); 918extern void i2400m_msg_to_dev_cancel_wait(struct i2400m *, int);
923extern void i2400m_msg_ack_hook(struct i2400m *,
924 const struct i2400m_l3l4_hdr *, size_t);
925extern void i2400m_report_hook(struct i2400m *, 919extern void i2400m_report_hook(struct i2400m *,
926 const struct i2400m_l3l4_hdr *, size_t); 920 const struct i2400m_l3l4_hdr *, size_t);
927extern void i2400m_report_hook_work(struct work_struct *); 921extern void i2400m_report_hook_work(struct work_struct *);
928extern int i2400m_cmd_enter_powersave(struct i2400m *); 922extern int i2400m_cmd_enter_powersave(struct i2400m *);
929extern int i2400m_cmd_get_state(struct i2400m *);
930extern int i2400m_cmd_exit_idle(struct i2400m *); 923extern int i2400m_cmd_exit_idle(struct i2400m *);
931extern struct sk_buff *i2400m_get_device_info(struct i2400m *); 924extern struct sk_buff *i2400m_get_device_info(struct i2400m *);
932extern int i2400m_firmware_check(struct i2400m *); 925extern int i2400m_firmware_check(struct i2400m *);
933extern int i2400m_set_init_config(struct i2400m *,
934 const struct i2400m_tlv_hdr **, size_t);
935extern int i2400m_set_idle_timeout(struct i2400m *, unsigned); 926extern int i2400m_set_idle_timeout(struct i2400m *, unsigned);
936 927
937static inline 928static inline
diff --git a/drivers/net/wimax/i2400m/rx.c b/drivers/net/wimax/i2400m/rx.c
index 8cc9e319f435..844133b44af0 100644
--- a/drivers/net/wimax/i2400m/rx.c
+++ b/drivers/net/wimax/i2400m/rx.c
@@ -922,7 +922,7 @@ void i2400m_roq_queue_update_ws(struct i2400m *i2400m, struct i2400m_roq *roq,
922 * rx_roq_refcount becomes zero. This routine gets executed when 922 * rx_roq_refcount becomes zero. This routine gets executed when
923 * rx_roq_refcount becomes zero. 923 * rx_roq_refcount becomes zero.
924 */ 924 */
925void i2400m_rx_roq_destroy(struct kref *ref) 925static void i2400m_rx_roq_destroy(struct kref *ref)
926{ 926{
927 unsigned itr; 927 unsigned itr;
928 struct i2400m *i2400m 928 struct i2400m *i2400m
@@ -1244,16 +1244,16 @@ int i2400m_rx(struct i2400m *i2400m, struct sk_buff *skb)
1244 int i, result; 1244 int i, result;
1245 struct device *dev = i2400m_dev(i2400m); 1245 struct device *dev = i2400m_dev(i2400m);
1246 const struct i2400m_msg_hdr *msg_hdr; 1246 const struct i2400m_msg_hdr *msg_hdr;
1247 size_t pl_itr, pl_size, skb_len; 1247 size_t pl_itr, pl_size;
1248 unsigned long flags; 1248 unsigned long flags;
1249 unsigned num_pls, single_last; 1249 unsigned num_pls, single_last, skb_len;
1250 1250
1251 skb_len = skb->len; 1251 skb_len = skb->len;
1252 d_fnstart(4, dev, "(i2400m %p skb %p [size %zu])\n", 1252 d_fnstart(4, dev, "(i2400m %p skb %p [size %u])\n",
1253 i2400m, skb, skb_len); 1253 i2400m, skb, skb_len);
1254 result = -EIO; 1254 result = -EIO;
1255 msg_hdr = (void *) skb->data; 1255 msg_hdr = (void *) skb->data;
1256 result = i2400m_rx_msg_hdr_check(i2400m, msg_hdr, skb->len); 1256 result = i2400m_rx_msg_hdr_check(i2400m, msg_hdr, skb_len);
1257 if (result < 0) 1257 if (result < 0)
1258 goto error_msg_hdr_check; 1258 goto error_msg_hdr_check;
1259 result = -EIO; 1259 result = -EIO;
@@ -1261,10 +1261,10 @@ int i2400m_rx(struct i2400m *i2400m, struct sk_buff *skb)
1261 pl_itr = sizeof(*msg_hdr) + /* Check payload descriptor(s) */ 1261 pl_itr = sizeof(*msg_hdr) + /* Check payload descriptor(s) */
1262 num_pls * sizeof(msg_hdr->pld[0]); 1262 num_pls * sizeof(msg_hdr->pld[0]);
1263 pl_itr = ALIGN(pl_itr, I2400M_PL_ALIGN); 1263 pl_itr = ALIGN(pl_itr, I2400M_PL_ALIGN);
1264 if (pl_itr > skb->len) { /* got all the payload descriptors? */ 1264 if (pl_itr > skb_len) { /* got all the payload descriptors? */
1265 dev_err(dev, "RX: HW BUG? message too short (%u bytes) for " 1265 dev_err(dev, "RX: HW BUG? message too short (%u bytes) for "
1266 "%u payload descriptors (%zu each, total %zu)\n", 1266 "%u payload descriptors (%zu each, total %zu)\n",
1267 skb->len, num_pls, sizeof(msg_hdr->pld[0]), pl_itr); 1267 skb_len, num_pls, sizeof(msg_hdr->pld[0]), pl_itr);
1268 goto error_pl_descr_short; 1268 goto error_pl_descr_short;
1269 } 1269 }
1270 /* Walk each payload payload--check we really got it */ 1270 /* Walk each payload payload--check we really got it */
@@ -1272,7 +1272,7 @@ int i2400m_rx(struct i2400m *i2400m, struct sk_buff *skb)
1272 /* work around old gcc warnings */ 1272 /* work around old gcc warnings */
1273 pl_size = i2400m_pld_size(&msg_hdr->pld[i]); 1273 pl_size = i2400m_pld_size(&msg_hdr->pld[i]);
1274 result = i2400m_rx_pl_descr_check(i2400m, &msg_hdr->pld[i], 1274 result = i2400m_rx_pl_descr_check(i2400m, &msg_hdr->pld[i],
1275 pl_itr, skb->len); 1275 pl_itr, skb_len);
1276 if (result < 0) 1276 if (result < 0)
1277 goto error_pl_descr_check; 1277 goto error_pl_descr_check;
1278 single_last = num_pls == 1 || i == num_pls - 1; 1278 single_last = num_pls == 1 || i == num_pls - 1;
@@ -1290,16 +1290,16 @@ int i2400m_rx(struct i2400m *i2400m, struct sk_buff *skb)
1290 if (i < i2400m->rx_pl_min) 1290 if (i < i2400m->rx_pl_min)
1291 i2400m->rx_pl_min = i; 1291 i2400m->rx_pl_min = i;
1292 i2400m->rx_num++; 1292 i2400m->rx_num++;
1293 i2400m->rx_size_acc += skb->len; 1293 i2400m->rx_size_acc += skb_len;
1294 if (skb->len < i2400m->rx_size_min) 1294 if (skb_len < i2400m->rx_size_min)
1295 i2400m->rx_size_min = skb->len; 1295 i2400m->rx_size_min = skb_len;
1296 if (skb->len > i2400m->rx_size_max) 1296 if (skb_len > i2400m->rx_size_max)
1297 i2400m->rx_size_max = skb->len; 1297 i2400m->rx_size_max = skb_len;
1298 spin_unlock_irqrestore(&i2400m->rx_lock, flags); 1298 spin_unlock_irqrestore(&i2400m->rx_lock, flags);
1299error_pl_descr_check: 1299error_pl_descr_check:
1300error_pl_descr_short: 1300error_pl_descr_short:
1301error_msg_hdr_check: 1301error_msg_hdr_check:
1302 d_fnend(4, dev, "(i2400m %p skb %p [size %zu]) = %d\n", 1302 d_fnend(4, dev, "(i2400m %p skb %p [size %u]) = %d\n",
1303 i2400m, skb, skb_len, result); 1303 i2400m, skb, skb_len, result);
1304 return result; 1304 return result;
1305} 1305}
diff --git a/drivers/net/wimax/i2400m/sdio-rx.c b/drivers/net/wimax/i2400m/sdio-rx.c
index 8b809c2ead6c..fb6396dd115f 100644
--- a/drivers/net/wimax/i2400m/sdio-rx.c
+++ b/drivers/net/wimax/i2400m/sdio-rx.c
@@ -87,7 +87,7 @@ static const __le32 i2400m_ACK_BARKER[4] = {
87 * 87 *
88 * sdio_readl() doesn't work. 88 * sdio_readl() doesn't work.
89 */ 89 */
90ssize_t __i2400ms_rx_get_size(struct i2400ms *i2400ms) 90static ssize_t __i2400ms_rx_get_size(struct i2400ms *i2400ms)
91{ 91{
92 int ret, cnt, val; 92 int ret, cnt, val;
93 ssize_t rx_size; 93 ssize_t rx_size;
diff --git a/drivers/net/wireless/Kconfig b/drivers/net/wireless/Kconfig
index 174e3442d519..4de4410cd38e 100644
--- a/drivers/net/wireless/Kconfig
+++ b/drivers/net/wireless/Kconfig
@@ -279,6 +279,7 @@ source "drivers/net/wireless/libertas/Kconfig"
279source "drivers/net/wireless/orinoco/Kconfig" 279source "drivers/net/wireless/orinoco/Kconfig"
280source "drivers/net/wireless/p54/Kconfig" 280source "drivers/net/wireless/p54/Kconfig"
281source "drivers/net/wireless/rt2x00/Kconfig" 281source "drivers/net/wireless/rt2x00/Kconfig"
282source "drivers/net/wireless/wl1251/Kconfig"
282source "drivers/net/wireless/wl12xx/Kconfig" 283source "drivers/net/wireless/wl12xx/Kconfig"
283source "drivers/net/wireless/zd1211rw/Kconfig" 284source "drivers/net/wireless/zd1211rw/Kconfig"
284 285
diff --git a/drivers/net/wireless/Makefile b/drivers/net/wireless/Makefile
index 5d4ce4d2b32b..06f8ca26c5c1 100644
--- a/drivers/net/wireless/Makefile
+++ b/drivers/net/wireless/Makefile
@@ -49,6 +49,8 @@ obj-$(CONFIG_ATH_COMMON) += ath/
49 49
50obj-$(CONFIG_MAC80211_HWSIM) += mac80211_hwsim.o 50obj-$(CONFIG_MAC80211_HWSIM) += mac80211_hwsim.o
51 51
52obj-$(CONFIG_WL1251) += wl1251/
52obj-$(CONFIG_WL12XX) += wl12xx/ 53obj-$(CONFIG_WL12XX) += wl12xx/
54obj-$(CONFIG_WL12XX_PLATFORM_DATA) += wl12xx/
53 55
54obj-$(CONFIG_IWM) += iwmc3200wifi/ 56obj-$(CONFIG_IWM) += iwmc3200wifi/
diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c
index 7d26506957d7..5a56502c4eb0 100644
--- a/drivers/net/wireless/airo.c
+++ b/drivers/net/wireless/airo.c
@@ -105,7 +105,7 @@ static struct pci_driver airo_driver = {
105 of statistics in the /proc filesystem */ 105 of statistics in the /proc filesystem */
106 106
107#define IGNLABEL(comment) NULL 107#define IGNLABEL(comment) NULL
108static char *statsLabels[] = { 108static const char *statsLabels[] = {
109 "RxOverrun", 109 "RxOverrun",
110 IGNLABEL("RxPlcpCrcErr"), 110 IGNLABEL("RxPlcpCrcErr"),
111 IGNLABEL("RxPlcpFormatErr"), 111 IGNLABEL("RxPlcpFormatErr"),
@@ -217,7 +217,6 @@ static char *statsLabels[] = {
217 (no spaces) list of rates (up to 8). */ 217 (no spaces) list of rates (up to 8). */
218 218
219static int rates[8]; 219static int rates[8];
220static int basic_rate;
221static char *ssids[3]; 220static char *ssids[3];
222 221
223static int io[4]; 222static int io[4];
@@ -250,7 +249,6 @@ MODULE_LICENSE("Dual BSD/GPL");
250MODULE_SUPPORTED_DEVICE("Aironet 4500, 4800 and Cisco 340/350"); 249MODULE_SUPPORTED_DEVICE("Aironet 4500, 4800 and Cisco 340/350");
251module_param_array(io, int, NULL, 0); 250module_param_array(io, int, NULL, 0);
252module_param_array(irq, int, NULL, 0); 251module_param_array(irq, int, NULL, 0);
253module_param(basic_rate, int, 0);
254module_param_array(rates, int, NULL, 0); 252module_param_array(rates, int, NULL, 0);
255module_param_array(ssids, charp, NULL, 0); 253module_param_array(ssids, charp, NULL, 0);
256module_param(auto_wep, int, 0); 254module_param(auto_wep, int, 0);
@@ -932,7 +930,7 @@ typedef struct aironet_ioctl {
932 unsigned char __user *data; // d-data 930 unsigned char __user *data; // d-data
933} aironet_ioctl; 931} aironet_ioctl;
934 932
935static char swversion[] = "2.1"; 933static const char swversion[] = "2.1";
936#endif /* CISCO_EXT */ 934#endif /* CISCO_EXT */
937 935
938#define NUM_MODULES 2 936#define NUM_MODULES 2
@@ -1374,7 +1372,7 @@ static int micsetup(struct airo_info *ai) {
1374 return SUCCESS; 1372 return SUCCESS;
1375} 1373}
1376 1374
1377static char micsnap[] = {0xAA,0xAA,0x03,0x00,0x40,0x96,0x00,0x02}; 1375static const u8 micsnap[] = {0xAA,0xAA,0x03,0x00,0x40,0x96,0x00,0x02};
1378 1376
1379/*=========================================================================== 1377/*===========================================================================
1380 * Description: Mic a packet 1378 * Description: Mic a packet
@@ -3883,15 +3881,6 @@ static u16 setup_card(struct airo_info *ai, u8 *mac, int lock)
3883 ai->config.rates[i] = rates[i]; 3881 ai->config.rates[i] = rates[i];
3884 } 3882 }
3885 } 3883 }
3886 if ( basic_rate > 0 ) {
3887 for( i = 0; i < 8; i++ ) {
3888 if ( ai->config.rates[i] == basic_rate ||
3889 !ai->config.rates ) {
3890 ai->config.rates[i] = basic_rate | 0x80;
3891 break;
3892 }
3893 }
3894 }
3895 set_bit (FLAG_COMMIT, &ai->flags); 3884 set_bit (FLAG_COMMIT, &ai->flags);
3896 } 3885 }
3897 3886
@@ -5023,7 +5012,7 @@ static void proc_config_on_close(struct inode *inode, struct file *file)
5023 airo_config_commit(dev, NULL, NULL, NULL); 5012 airo_config_commit(dev, NULL, NULL, NULL);
5024} 5013}
5025 5014
5026static char *get_rmode(__le16 mode) 5015static const char *get_rmode(__le16 mode)
5027{ 5016{
5028 switch(mode & RXMODE_MASK) { 5017 switch(mode & RXMODE_MASK) {
5029 case RXMODE_RFMON: return "rfmon"; 5018 case RXMODE_RFMON: return "rfmon";
diff --git a/drivers/net/wireless/at76c50x-usb.c b/drivers/net/wireless/at76c50x-usb.c
index 91c5f73b5ba3..1476314afa8a 100644
--- a/drivers/net/wireless/at76c50x-usb.c
+++ b/drivers/net/wireless/at76c50x-usb.c
@@ -1525,8 +1525,7 @@ static void at76_rx_tasklet(unsigned long param)
1525 1525
1526 if (priv->device_unplugged) { 1526 if (priv->device_unplugged) {
1527 at76_dbg(DBG_DEVSTART, "device unplugged"); 1527 at76_dbg(DBG_DEVSTART, "device unplugged");
1528 if (urb) 1528 at76_dbg(DBG_DEVSTART, "urb status %d", urb->status);
1529 at76_dbg(DBG_DEVSTART, "urb status %d", urb->status);
1530 return; 1529 return;
1531 } 1530 }
1532 1531
diff --git a/drivers/net/wireless/ath/Kconfig b/drivers/net/wireless/ath/Kconfig
index 0a75be027afa..92c216263ee9 100644
--- a/drivers/net/wireless/ath/Kconfig
+++ b/drivers/net/wireless/ath/Kconfig
@@ -25,5 +25,6 @@ config ATH_DEBUG
25source "drivers/net/wireless/ath/ath5k/Kconfig" 25source "drivers/net/wireless/ath/ath5k/Kconfig"
26source "drivers/net/wireless/ath/ath9k/Kconfig" 26source "drivers/net/wireless/ath/ath9k/Kconfig"
27source "drivers/net/wireless/ath/ar9170/Kconfig" 27source "drivers/net/wireless/ath/ar9170/Kconfig"
28source "drivers/net/wireless/ath/carl9170/Kconfig"
28 29
29endif 30endif
diff --git a/drivers/net/wireless/ath/Makefile b/drivers/net/wireless/ath/Makefile
index 8113a5042afa..6d711ec97ec2 100644
--- a/drivers/net/wireless/ath/Makefile
+++ b/drivers/net/wireless/ath/Makefile
@@ -1,11 +1,13 @@
1obj-$(CONFIG_ATH5K) += ath5k/ 1obj-$(CONFIG_ATH5K) += ath5k/
2obj-$(CONFIG_ATH9K_HW) += ath9k/ 2obj-$(CONFIG_ATH9K_HW) += ath9k/
3obj-$(CONFIG_AR9170_USB) += ar9170/ 3obj-$(CONFIG_AR9170_USB) += ar9170/
4obj-$(CONFIG_CARL9170) += carl9170/
4 5
5obj-$(CONFIG_ATH_COMMON) += ath.o 6obj-$(CONFIG_ATH_COMMON) += ath.o
6 7
7ath-objs := main.o \ 8ath-objs := main.o \
8 regd.o \ 9 regd.o \
9 hw.o 10 hw.o \
11 key.o
10 12
11ath-$(CONFIG_ATH_DEBUG) += debug.o 13ath-$(CONFIG_ATH_DEBUG) += debug.o
diff --git a/drivers/net/wireless/ath/ar9170/usb.c b/drivers/net/wireless/ath/ar9170/usb.c
index a93dc18a45c3..5dbb5361fd51 100644
--- a/drivers/net/wireless/ath/ar9170/usb.c
+++ b/drivers/net/wireless/ath/ar9170/usb.c
@@ -54,8 +54,6 @@ MODULE_AUTHOR("Christian Lamparter <chunkeey@web.de>");
54MODULE_LICENSE("GPL"); 54MODULE_LICENSE("GPL");
55MODULE_DESCRIPTION("Atheros AR9170 802.11n USB wireless"); 55MODULE_DESCRIPTION("Atheros AR9170 802.11n USB wireless");
56MODULE_FIRMWARE("ar9170.fw"); 56MODULE_FIRMWARE("ar9170.fw");
57MODULE_FIRMWARE("ar9170-1.fw");
58MODULE_FIRMWARE("ar9170-2.fw");
59 57
60enum ar9170_requirements { 58enum ar9170_requirements {
61 AR9170_REQ_FW1_ONLY = 1, 59 AR9170_REQ_FW1_ONLY = 1,
diff --git a/drivers/net/wireless/ath/ath.h b/drivers/net/wireless/ath/ath.h
index a706202fa67c..501050c0296f 100644
--- a/drivers/net/wireless/ath/ath.h
+++ b/drivers/net/wireless/ath/ath.h
@@ -19,6 +19,7 @@
19 19
20#include <linux/skbuff.h> 20#include <linux/skbuff.h>
21#include <linux/if_ether.h> 21#include <linux/if_ether.h>
22#include <linux/spinlock.h>
22#include <net/mac80211.h> 23#include <net/mac80211.h>
23 24
24/* 25/*
@@ -35,7 +36,6 @@ static const u8 ath_bcast_mac[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
35 36
36struct ath_ani { 37struct ath_ani {
37 bool caldone; 38 bool caldone;
38 int16_t noise_floor;
39 unsigned int longcal_timer; 39 unsigned int longcal_timer;
40 unsigned int shortcal_timer; 40 unsigned int shortcal_timer;
41 unsigned int resetcal_timer; 41 unsigned int resetcal_timer;
@@ -43,6 +43,13 @@ struct ath_ani {
43 struct timer_list timer; 43 struct timer_list timer;
44}; 44};
45 45
46struct ath_cycle_counters {
47 u32 cycles;
48 u32 rx_busy;
49 u32 rx_frame;
50 u32 tx_frame;
51};
52
46enum ath_device_state { 53enum ath_device_state {
47 ATH_HW_UNAVAILABLE, 54 ATH_HW_UNAVAILABLE,
48 ATH_HW_INITIALIZED, 55 ATH_HW_INITIALIZED,
@@ -71,20 +78,44 @@ struct ath_regulatory {
71 struct reg_dmn_pair_mapping *regpair; 78 struct reg_dmn_pair_mapping *regpair;
72}; 79};
73 80
81enum ath_crypt_caps {
82 ATH_CRYPT_CAP_CIPHER_AESCCM = BIT(0),
83 ATH_CRYPT_CAP_MIC_COMBINED = BIT(1),
84};
85
86struct ath_keyval {
87 u8 kv_type;
88 u8 kv_pad;
89 u16 kv_len;
90 u8 kv_val[16]; /* TK */
91 u8 kv_mic[8]; /* Michael MIC key */
92 u8 kv_txmic[8]; /* Michael MIC TX key (used only if the hardware
93 * supports both MIC keys in the same key cache entry;
94 * in that case, kv_mic is the RX key) */
95};
96
97enum ath_cipher {
98 ATH_CIPHER_WEP = 0,
99 ATH_CIPHER_AES_OCB = 1,
100 ATH_CIPHER_AES_CCM = 2,
101 ATH_CIPHER_CKIP = 3,
102 ATH_CIPHER_TKIP = 4,
103 ATH_CIPHER_CLR = 5,
104 ATH_CIPHER_MIC = 127
105};
106
74/** 107/**
75 * struct ath_ops - Register read/write operations 108 * struct ath_ops - Register read/write operations
76 * 109 *
77 * @read: Register read 110 * @read: Register read
78 * @write: Register write 111 * @write: Register write
79 * @enable_write_buffer: Enable multiple register writes 112 * @enable_write_buffer: Enable multiple register writes
80 * @disable_write_buffer: Disable multiple register writes 113 * @write_flush: flush buffered register writes and disable buffering
81 * @write_flush: Flush buffered register writes
82 */ 114 */
83struct ath_ops { 115struct ath_ops {
84 unsigned int (*read)(void *, u32 reg_offset); 116 unsigned int (*read)(void *, u32 reg_offset);
85 void (*write)(void *, u32 val, u32 reg_offset); 117 void (*write)(void *, u32 val, u32 reg_offset);
86 void (*enable_write_buffer)(void *); 118 void (*enable_write_buffer)(void *);
87 void (*disable_write_buffer)(void *);
88 void (*write_flush) (void *); 119 void (*write_flush) (void *);
89}; 120};
90 121
@@ -120,7 +151,13 @@ struct ath_common {
120 u32 keymax; 151 u32 keymax;
121 DECLARE_BITMAP(keymap, ATH_KEYMAX); 152 DECLARE_BITMAP(keymap, ATH_KEYMAX);
122 DECLARE_BITMAP(tkip_keymap, ATH_KEYMAX); 153 DECLARE_BITMAP(tkip_keymap, ATH_KEYMAX);
123 u8 splitmic; 154 enum ath_crypt_caps crypt_caps;
155
156 unsigned int clockrate;
157
158 spinlock_t cc_lock;
159 struct ath_cycle_counters cc_ani;
160 struct ath_cycle_counters cc_survey;
124 161
125 struct ath_regulatory regulatory; 162 struct ath_regulatory regulatory;
126 const struct ath_ops *ops; 163 const struct ath_ops *ops;
@@ -132,5 +169,13 @@ struct sk_buff *ath_rxbuf_alloc(struct ath_common *common,
132 gfp_t gfp_mask); 169 gfp_t gfp_mask);
133 170
134void ath_hw_setbssidmask(struct ath_common *common); 171void ath_hw_setbssidmask(struct ath_common *common);
172void ath_key_delete(struct ath_common *common, struct ieee80211_key_conf *key);
173int ath_key_config(struct ath_common *common,
174 struct ieee80211_vif *vif,
175 struct ieee80211_sta *sta,
176 struct ieee80211_key_conf *key);
177bool ath_hw_keyreset(struct ath_common *common, u16 entry);
178void ath_hw_cycle_counters_update(struct ath_common *common);
179int32_t ath_hw_get_listen_time(struct ath_common *common);
135 180
136#endif /* ATH_H */ 181#endif /* ATH_H */
diff --git a/drivers/net/wireless/ath/ath5k/ani.c b/drivers/net/wireless/ath/ath5k/ani.c
index e4a5f046bba4..f1419198a479 100644
--- a/drivers/net/wireless/ath/ath5k/ani.c
+++ b/drivers/net/wireless/ath/ath5k/ani.c
@@ -355,41 +355,28 @@ ath5k_ani_lower_immunity(struct ath5k_hw *ah, struct ath5k_ani_state *as)
355 355
356 356
357/** 357/**
358 * ath5k_hw_ani_get_listen_time() - Calculate time spent listening 358 * ath5k_hw_ani_get_listen_time() - Update counters and return listening time
359 * 359 *
360 * Return an approximation of the time spent "listening" in milliseconds (ms) 360 * Return an approximation of the time spent "listening" in milliseconds (ms)
361 * since the last call of this function by deducting the cycles spent 361 * since the last call of this function.
362 * transmitting and receiving from the total cycle count. 362 * Save a snapshot of the counter values for debugging/statistics.
363 * Save profile count values for debugging/statistics and because we might want
364 * to use them later.
365 *
366 * We assume no one else clears these registers!
367 */ 363 */
368static int 364static int
369ath5k_hw_ani_get_listen_time(struct ath5k_hw *ah, struct ath5k_ani_state *as) 365ath5k_hw_ani_get_listen_time(struct ath5k_hw *ah, struct ath5k_ani_state *as)
370{ 366{
367 struct ath_common *common = ath5k_hw_common(ah);
371 int listen; 368 int listen;
372 369
373 /* freeze */ 370 spin_lock_bh(&common->cc_lock);
374 ath5k_hw_reg_write(ah, AR5K_MIBC_FMC, AR5K_MIBC); 371
375 /* read */ 372 ath_hw_cycle_counters_update(common);
376 as->pfc_cycles = ath5k_hw_reg_read(ah, AR5K_PROFCNT_CYCLE); 373 memcpy(&as->last_cc, &common->cc_ani, sizeof(as->last_cc));
377 as->pfc_busy = ath5k_hw_reg_read(ah, AR5K_PROFCNT_RXCLR); 374
378 as->pfc_tx = ath5k_hw_reg_read(ah, AR5K_PROFCNT_TX); 375 /* clears common->cc_ani */
379 as->pfc_rx = ath5k_hw_reg_read(ah, AR5K_PROFCNT_RX); 376 listen = ath_hw_get_listen_time(common);
380 /* clear */ 377
381 ath5k_hw_reg_write(ah, 0, AR5K_PROFCNT_TX); 378 spin_unlock_bh(&common->cc_lock);
382 ath5k_hw_reg_write(ah, 0, AR5K_PROFCNT_RX); 379
383 ath5k_hw_reg_write(ah, 0, AR5K_PROFCNT_RXCLR);
384 ath5k_hw_reg_write(ah, 0, AR5K_PROFCNT_CYCLE);
385 /* un-freeze */
386 ath5k_hw_reg_write(ah, 0, AR5K_MIBC);
387
388 /* TODO: where does 44000 come from? (11g clock rate?) */
389 listen = (as->pfc_cycles - as->pfc_rx - as->pfc_tx) / 44000;
390
391 if (as->pfc_cycles == 0 || listen < 0)
392 return 0;
393 return listen; 380 return listen;
394} 381}
395 382
diff --git a/drivers/net/wireless/ath/ath5k/ani.h b/drivers/net/wireless/ath/ath5k/ani.h
index 55cf26d8522c..d0a664039c87 100644
--- a/drivers/net/wireless/ath/ath5k/ani.h
+++ b/drivers/net/wireless/ath/ath5k/ani.h
@@ -75,10 +75,7 @@ struct ath5k_ani_state {
75 unsigned int cck_errors; 75 unsigned int cck_errors;
76 76
77 /* debug/statistics only: numbers from last ANI calibration */ 77 /* debug/statistics only: numbers from last ANI calibration */
78 unsigned int pfc_tx; 78 struct ath_cycle_counters last_cc;
79 unsigned int pfc_rx;
80 unsigned int pfc_busy;
81 unsigned int pfc_cycles;
82 unsigned int last_listen; 79 unsigned int last_listen;
83 unsigned int last_ofdm_errors; 80 unsigned int last_ofdm_errors;
84 unsigned int last_cck_errors; 81 unsigned int last_cck_errors;
diff --git a/drivers/net/wireless/ath/ath5k/ath5k.h b/drivers/net/wireless/ath/ath5k/ath5k.h
index f399c4dd8e69..4a367cdb3eb9 100644
--- a/drivers/net/wireless/ath/ath5k/ath5k.h
+++ b/drivers/net/wireless/ath/ath5k/ath5k.h
@@ -206,6 +206,8 @@
206#define ATH5K_TUNE_CALIBRATION_INTERVAL_ANI 1000 /* 1 sec */ 206#define ATH5K_TUNE_CALIBRATION_INTERVAL_ANI 1000 /* 1 sec */
207#define ATH5K_TUNE_CALIBRATION_INTERVAL_NF 60000 /* 60 sec */ 207#define ATH5K_TUNE_CALIBRATION_INTERVAL_NF 60000 /* 60 sec */
208 208
209#define ATH5K_TX_COMPLETE_POLL_INT 3000 /* 3 sec */
210
209#define AR5K_INIT_CARR_SENSE_EN 1 211#define AR5K_INIT_CARR_SENSE_EN 1
210 212
211/*Swap RX/TX Descriptor for big endian archs*/ 213/*Swap RX/TX Descriptor for big endian archs*/
@@ -256,8 +258,6 @@
256 (AR5K_INIT_PROG_IFS_TURBO) \ 258 (AR5K_INIT_PROG_IFS_TURBO) \
257) 259)
258 260
259/* token to use for aifs, cwmin, cwmax in MadWiFi */
260#define AR5K_TXQ_USEDEFAULT ((u32) -1)
261 261
262/* GENERIC CHIPSET DEFINITIONS */ 262/* GENERIC CHIPSET DEFINITIONS */
263 263
@@ -528,9 +528,9 @@ struct ath5k_txq_info {
528 enum ath5k_tx_queue tqi_type; 528 enum ath5k_tx_queue tqi_type;
529 enum ath5k_tx_queue_subtype tqi_subtype; 529 enum ath5k_tx_queue_subtype tqi_subtype;
530 u16 tqi_flags; /* Tx queue flags (see above) */ 530 u16 tqi_flags; /* Tx queue flags (see above) */
531 u32 tqi_aifs; /* Arbitrated Interframe Space */ 531 u8 tqi_aifs; /* Arbitrated Interframe Space */
532 s32 tqi_cw_min; /* Minimum Contention Window */ 532 u16 tqi_cw_min; /* Minimum Contention Window */
533 s32 tqi_cw_max; /* Maximum Contention Window */ 533 u16 tqi_cw_max; /* Maximum Contention Window */
534 u32 tqi_cbr_period; /* Constant bit rate period */ 534 u32 tqi_cbr_period; /* Constant bit rate period */
535 u32 tqi_cbr_overflow_limit; 535 u32 tqi_cbr_overflow_limit;
536 u32 tqi_burst_time; 536 u32 tqi_burst_time;
@@ -1028,8 +1028,6 @@ struct ath5k_hw {
1028 bool ah_turbo; 1028 bool ah_turbo;
1029 bool ah_calibration; 1029 bool ah_calibration;
1030 bool ah_single_chip; 1030 bool ah_single_chip;
1031 bool ah_aes_support;
1032 bool ah_combined_mic;
1033 1031
1034 enum ath5k_version ah_version; 1032 enum ath5k_version ah_version;
1035 enum ath5k_radio ah_radio; 1033 enum ath5k_radio ah_radio;
@@ -1043,10 +1041,6 @@ struct ath5k_hw {
1043#define ah_modes ah_capabilities.cap_mode 1041#define ah_modes ah_capabilities.cap_mode
1044#define ah_ee_version ah_capabilities.cap_eeprom.ee_version 1042#define ah_ee_version ah_capabilities.cap_eeprom.ee_version
1045 1043
1046 u32 ah_atim_window;
1047 u32 ah_aifs;
1048 u32 ah_cw_min;
1049 u32 ah_cw_max;
1050 u32 ah_limit_tx_retries; 1044 u32 ah_limit_tx_retries;
1051 u8 ah_coverage_class; 1045 u8 ah_coverage_class;
1052 1046
@@ -1201,17 +1195,13 @@ u64 ath5k_hw_get_tsf64(struct ath5k_hw *ah);
1201void ath5k_hw_set_tsf64(struct ath5k_hw *ah, u64 tsf64); 1195void ath5k_hw_set_tsf64(struct ath5k_hw *ah, u64 tsf64);
1202void ath5k_hw_reset_tsf(struct ath5k_hw *ah); 1196void ath5k_hw_reset_tsf(struct ath5k_hw *ah);
1203void ath5k_hw_init_beacon(struct ath5k_hw *ah, u32 next_beacon, u32 interval); 1197void ath5k_hw_init_beacon(struct ath5k_hw *ah, u32 next_beacon, u32 interval);
1198bool ath5k_hw_check_beacon_timers(struct ath5k_hw *ah, int intval);
1204/* ACK bit rate */ 1199/* ACK bit rate */
1205void ath5k_hw_set_ack_bitrate_high(struct ath5k_hw *ah, bool high); 1200void ath5k_hw_set_ack_bitrate_high(struct ath5k_hw *ah, bool high);
1206/* Clock rate related functions */ 1201/* Clock rate related functions */
1207unsigned int ath5k_hw_htoclock(struct ath5k_hw *ah, unsigned int usec); 1202unsigned int ath5k_hw_htoclock(struct ath5k_hw *ah, unsigned int usec);
1208unsigned int ath5k_hw_clocktoh(struct ath5k_hw *ah, unsigned int clock); 1203unsigned int ath5k_hw_clocktoh(struct ath5k_hw *ah, unsigned int clock);
1209unsigned int ath5k_hw_get_clockrate(struct ath5k_hw *ah); 1204void ath5k_hw_set_clockrate(struct ath5k_hw *ah);
1210/* Key table (WEP) functions */
1211int ath5k_hw_reset_key(struct ath5k_hw *ah, u16 entry);
1212int ath5k_hw_set_key(struct ath5k_hw *ah, u16 entry,
1213 const struct ieee80211_key_conf *key, const u8 *mac);
1214int ath5k_hw_set_key_lladdr(struct ath5k_hw *ah, u16 entry, const u8 *mac);
1215 1205
1216/* Queue Control Unit, DFS Control Unit Functions */ 1206/* Queue Control Unit, DFS Control Unit Functions */
1217int ath5k_hw_get_tx_queueprops(struct ath5k_hw *ah, int queue, 1207int ath5k_hw_get_tx_queueprops(struct ath5k_hw *ah, int queue,
diff --git a/drivers/net/wireless/ath/ath5k/attach.c b/drivers/net/wireless/ath/ath5k/attach.c
index aabad4f13e2a..cd0b14a0a93a 100644
--- a/drivers/net/wireless/ath/ath5k/attach.c
+++ b/drivers/net/wireless/ath/ath5k/attach.c
@@ -118,9 +118,6 @@ int ath5k_hw_attach(struct ath5k_softc *sc)
118 ah->ah_turbo = false; 118 ah->ah_turbo = false;
119 ah->ah_txpower.txp_tpc = AR5K_TUNE_TPC_TXPOWER; 119 ah->ah_txpower.txp_tpc = AR5K_TUNE_TPC_TXPOWER;
120 ah->ah_imr = 0; 120 ah->ah_imr = 0;
121 ah->ah_atim_window = 0;
122 ah->ah_aifs = AR5K_TUNE_AIFS;
123 ah->ah_cw_min = AR5K_TUNE_CWMIN;
124 ah->ah_limit_tx_retries = AR5K_INIT_TX_RETRY; 121 ah->ah_limit_tx_retries = AR5K_INIT_TX_RETRY;
125 ah->ah_software_retry = false; 122 ah->ah_software_retry = false;
126 ah->ah_ant_mode = AR5K_ANTMODE_DEFAULT; 123 ah->ah_ant_mode = AR5K_ANTMODE_DEFAULT;
@@ -314,12 +311,16 @@ int ath5k_hw_attach(struct ath5k_softc *sc)
314 } 311 }
315 312
316 /* Crypto settings */ 313 /* Crypto settings */
317 ah->ah_aes_support = srev >= AR5K_SREV_AR5212_V4 && 314 common->keymax = (sc->ah->ah_version == AR5K_AR5210 ?
318 (ee->ee_version >= AR5K_EEPROM_VERSION_5_0 && 315 AR5K_KEYTABLE_SIZE_5210 : AR5K_KEYTABLE_SIZE_5211);
319 !AR5K_EEPROM_AES_DIS(ee->ee_misc5)); 316
317 if (srev >= AR5K_SREV_AR5212_V4 &&
318 (ee->ee_version >= AR5K_EEPROM_VERSION_5_0 &&
319 !AR5K_EEPROM_AES_DIS(ee->ee_misc5)))
320 common->crypt_caps |= ATH_CRYPT_CAP_CIPHER_AESCCM;
320 321
321 if (srev >= AR5K_SREV_AR2414) { 322 if (srev >= AR5K_SREV_AR2414) {
322 ah->ah_combined_mic = true; 323 common->crypt_caps |= ATH_CRYPT_CAP_MIC_COMBINED;
323 AR5K_REG_ENABLE_BITS(ah, AR5K_MISC_MODE, 324 AR5K_REG_ENABLE_BITS(ah, AR5K_MISC_MODE,
324 AR5K_MISC_MODE_COMBINED_MIC); 325 AR5K_MISC_MODE_COMBINED_MIC);
325 } 326 }
diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c
index 116ac66c6e3e..f1ae75d35d5d 100644
--- a/drivers/net/wireless/ath/ath5k/base.c
+++ b/drivers/net/wireless/ath/ath5k/base.c
@@ -52,6 +52,7 @@
52#include <linux/ethtool.h> 52#include <linux/ethtool.h>
53#include <linux/uaccess.h> 53#include <linux/uaccess.h>
54#include <linux/slab.h> 54#include <linux/slab.h>
55#include <linux/etherdevice.h>
55 56
56#include <net/ieee80211_radiotap.h> 57#include <net/ieee80211_radiotap.h>
57 58
@@ -61,6 +62,7 @@
61#include "reg.h" 62#include "reg.h"
62#include "debug.h" 63#include "debug.h"
63#include "ani.h" 64#include "ani.h"
65#include "../debug.h"
64 66
65static int modparam_nohwcrypt; 67static int modparam_nohwcrypt;
66module_param_named(nohwcrypt, modparam_nohwcrypt, bool, S_IRUGO); 68module_param_named(nohwcrypt, modparam_nohwcrypt, bool, S_IRUGO);
@@ -70,11 +72,6 @@ static int modparam_all_channels;
70module_param_named(all_channels, modparam_all_channels, bool, S_IRUGO); 72module_param_named(all_channels, modparam_all_channels, bool, S_IRUGO);
71MODULE_PARM_DESC(all_channels, "Expose all channels the device can use."); 73MODULE_PARM_DESC(all_channels, "Expose all channels the device can use.");
72 74
73
74/******************\
75* Internal defines *
76\******************/
77
78/* Module info */ 75/* Module info */
79MODULE_AUTHOR("Jiri Slaby"); 76MODULE_AUTHOR("Jiri Slaby");
80MODULE_AUTHOR("Nick Kossifidis"); 77MODULE_AUTHOR("Nick Kossifidis");
@@ -83,6 +80,10 @@ MODULE_SUPPORTED_DEVICE("Atheros 5xxx WLAN cards");
83MODULE_LICENSE("Dual BSD/GPL"); 80MODULE_LICENSE("Dual BSD/GPL");
84MODULE_VERSION("0.6.0 (EXPERIMENTAL)"); 81MODULE_VERSION("0.6.0 (EXPERIMENTAL)");
85 82
83static int ath5k_reset(struct ath5k_softc *sc, struct ieee80211_channel *chan);
84static int ath5k_beacon_update(struct ieee80211_hw *hw,
85 struct ieee80211_vif *vif);
86static void ath5k_beacon_update_timers(struct ath5k_softc *sc, u64 bc_tsf);
86 87
87/* Known PCI ids */ 88/* Known PCI ids */
88static DEFINE_PCI_DEVICE_TABLE(ath5k_pci_id_table) = { 89static DEFINE_PCI_DEVICE_TABLE(ath5k_pci_id_table) = {
@@ -190,129 +191,6 @@ static const struct ieee80211_rate ath5k_rates[] = {
190 /* XR missing */ 191 /* XR missing */
191}; 192};
192 193
193/*
194 * Prototypes - PCI stack related functions
195 */
196static int __devinit ath5k_pci_probe(struct pci_dev *pdev,
197 const struct pci_device_id *id);
198static void __devexit ath5k_pci_remove(struct pci_dev *pdev);
199#ifdef CONFIG_PM_SLEEP
200static int ath5k_pci_suspend(struct device *dev);
201static int ath5k_pci_resume(struct device *dev);
202
203static SIMPLE_DEV_PM_OPS(ath5k_pm_ops, ath5k_pci_suspend, ath5k_pci_resume);
204#define ATH5K_PM_OPS (&ath5k_pm_ops)
205#else
206#define ATH5K_PM_OPS NULL
207#endif /* CONFIG_PM_SLEEP */
208
209static struct pci_driver ath5k_pci_driver = {
210 .name = KBUILD_MODNAME,
211 .id_table = ath5k_pci_id_table,
212 .probe = ath5k_pci_probe,
213 .remove = __devexit_p(ath5k_pci_remove),
214 .driver.pm = ATH5K_PM_OPS,
215};
216
217
218
219/*
220 * Prototypes - MAC 802.11 stack related functions
221 */
222static int ath5k_tx(struct ieee80211_hw *hw, struct sk_buff *skb);
223static int ath5k_tx_queue(struct ieee80211_hw *hw, struct sk_buff *skb,
224 struct ath5k_txq *txq);
225static int ath5k_reset(struct ath5k_softc *sc, struct ieee80211_channel *chan);
226static int ath5k_start(struct ieee80211_hw *hw);
227static void ath5k_stop(struct ieee80211_hw *hw);
228static int ath5k_add_interface(struct ieee80211_hw *hw,
229 struct ieee80211_vif *vif);
230static void ath5k_remove_interface(struct ieee80211_hw *hw,
231 struct ieee80211_vif *vif);
232static int ath5k_config(struct ieee80211_hw *hw, u32 changed);
233static u64 ath5k_prepare_multicast(struct ieee80211_hw *hw,
234 struct netdev_hw_addr_list *mc_list);
235static void ath5k_configure_filter(struct ieee80211_hw *hw,
236 unsigned int changed_flags,
237 unsigned int *new_flags,
238 u64 multicast);
239static int ath5k_set_key(struct ieee80211_hw *hw,
240 enum set_key_cmd cmd,
241 struct ieee80211_vif *vif, struct ieee80211_sta *sta,
242 struct ieee80211_key_conf *key);
243static int ath5k_get_stats(struct ieee80211_hw *hw,
244 struct ieee80211_low_level_stats *stats);
245static int ath5k_get_survey(struct ieee80211_hw *hw,
246 int idx, struct survey_info *survey);
247static u64 ath5k_get_tsf(struct ieee80211_hw *hw);
248static void ath5k_set_tsf(struct ieee80211_hw *hw, u64 tsf);
249static void ath5k_reset_tsf(struct ieee80211_hw *hw);
250static int ath5k_beacon_update(struct ieee80211_hw *hw,
251 struct ieee80211_vif *vif);
252static void ath5k_bss_info_changed(struct ieee80211_hw *hw,
253 struct ieee80211_vif *vif,
254 struct ieee80211_bss_conf *bss_conf,
255 u32 changes);
256static void ath5k_sw_scan_start(struct ieee80211_hw *hw);
257static void ath5k_sw_scan_complete(struct ieee80211_hw *hw);
258static void ath5k_set_coverage_class(struct ieee80211_hw *hw,
259 u8 coverage_class);
260
261static const struct ieee80211_ops ath5k_hw_ops = {
262 .tx = ath5k_tx,
263 .start = ath5k_start,
264 .stop = ath5k_stop,
265 .add_interface = ath5k_add_interface,
266 .remove_interface = ath5k_remove_interface,
267 .config = ath5k_config,
268 .prepare_multicast = ath5k_prepare_multicast,
269 .configure_filter = ath5k_configure_filter,
270 .set_key = ath5k_set_key,
271 .get_stats = ath5k_get_stats,
272 .get_survey = ath5k_get_survey,
273 .conf_tx = NULL,
274 .get_tsf = ath5k_get_tsf,
275 .set_tsf = ath5k_set_tsf,
276 .reset_tsf = ath5k_reset_tsf,
277 .bss_info_changed = ath5k_bss_info_changed,
278 .sw_scan_start = ath5k_sw_scan_start,
279 .sw_scan_complete = ath5k_sw_scan_complete,
280 .set_coverage_class = ath5k_set_coverage_class,
281};
282
283/*
284 * Prototypes - Internal functions
285 */
286/* Attach detach */
287static int ath5k_attach(struct pci_dev *pdev,
288 struct ieee80211_hw *hw);
289static void ath5k_detach(struct pci_dev *pdev,
290 struct ieee80211_hw *hw);
291/* Channel/mode setup */
292static inline short ath5k_ieee2mhz(short chan);
293static unsigned int ath5k_copy_channels(struct ath5k_hw *ah,
294 struct ieee80211_channel *channels,
295 unsigned int mode,
296 unsigned int max);
297static int ath5k_setup_bands(struct ieee80211_hw *hw);
298static int ath5k_chan_set(struct ath5k_softc *sc,
299 struct ieee80211_channel *chan);
300static void ath5k_setcurmode(struct ath5k_softc *sc,
301 unsigned int mode);
302static void ath5k_mode_setup(struct ath5k_softc *sc);
303
304/* Descriptor setup */
305static int ath5k_desc_alloc(struct ath5k_softc *sc,
306 struct pci_dev *pdev);
307static void ath5k_desc_free(struct ath5k_softc *sc,
308 struct pci_dev *pdev);
309/* Buffers setup */
310static int ath5k_rxbuf_setup(struct ath5k_softc *sc,
311 struct ath5k_buf *bf);
312static int ath5k_txbuf_setup(struct ath5k_softc *sc,
313 struct ath5k_buf *bf,
314 struct ath5k_txq *txq, int padsize);
315
316static inline void ath5k_txbuf_free_skb(struct ath5k_softc *sc, 194static inline void ath5k_txbuf_free_skb(struct ath5k_softc *sc,
317 struct ath5k_buf *bf) 195 struct ath5k_buf *bf)
318{ 196{
@@ -345,35 +223,6 @@ static inline void ath5k_rxbuf_free_skb(struct ath5k_softc *sc,
345} 223}
346 224
347 225
348/* Queues setup */
349static struct ath5k_txq *ath5k_txq_setup(struct ath5k_softc *sc,
350 int qtype, int subtype);
351static int ath5k_beaconq_setup(struct ath5k_hw *ah);
352static int ath5k_beaconq_config(struct ath5k_softc *sc);
353static void ath5k_txq_drainq(struct ath5k_softc *sc,
354 struct ath5k_txq *txq);
355static void ath5k_txq_cleanup(struct ath5k_softc *sc);
356static void ath5k_txq_release(struct ath5k_softc *sc);
357/* Rx handling */
358static int ath5k_rx_start(struct ath5k_softc *sc);
359static void ath5k_rx_stop(struct ath5k_softc *sc);
360static unsigned int ath5k_rx_decrypted(struct ath5k_softc *sc,
361 struct sk_buff *skb,
362 struct ath5k_rx_status *rs);
363static void ath5k_tasklet_rx(unsigned long data);
364/* Tx handling */
365static void ath5k_tx_processq(struct ath5k_softc *sc,
366 struct ath5k_txq *txq);
367static void ath5k_tasklet_tx(unsigned long data);
368/* Beacon handling */
369static int ath5k_beacon_setup(struct ath5k_softc *sc,
370 struct ath5k_buf *bf);
371static void ath5k_beacon_send(struct ath5k_softc *sc);
372static void ath5k_beacon_config(struct ath5k_softc *sc);
373static void ath5k_beacon_update_timers(struct ath5k_softc *sc, u64 bc_tsf);
374static void ath5k_tasklet_beacon(unsigned long data);
375static void ath5k_tasklet_ani(unsigned long data);
376
377static inline u64 ath5k_extend_tsf(struct ath5k_hw *ah, u32 rstamp) 226static inline u64 ath5k_extend_tsf(struct ath5k_hw *ah, u32 rstamp)
378{ 227{
379 u64 tsf = ath5k_hw_get_tsf64(ah); 228 u64 tsf = ath5k_hw_get_tsf64(ah);
@@ -384,50 +233,6 @@ static inline u64 ath5k_extend_tsf(struct ath5k_hw *ah, u32 rstamp)
384 return (tsf & ~0x7fff) | rstamp; 233 return (tsf & ~0x7fff) | rstamp;
385} 234}
386 235
387/* Interrupt handling */
388static int ath5k_init(struct ath5k_softc *sc);
389static int ath5k_stop_locked(struct ath5k_softc *sc);
390static int ath5k_stop_hw(struct ath5k_softc *sc);
391static irqreturn_t ath5k_intr(int irq, void *dev_id);
392static void ath5k_reset_work(struct work_struct *work);
393
394static void ath5k_tasklet_calibrate(unsigned long data);
395
396/*
397 * Module init/exit functions
398 */
399static int __init
400init_ath5k_pci(void)
401{
402 int ret;
403
404 ath5k_debug_init();
405
406 ret = pci_register_driver(&ath5k_pci_driver);
407 if (ret) {
408 printk(KERN_ERR "ath5k_pci: can't register pci driver\n");
409 return ret;
410 }
411
412 return 0;
413}
414
415static void __exit
416exit_ath5k_pci(void)
417{
418 pci_unregister_driver(&ath5k_pci_driver);
419
420 ath5k_debug_finish();
421}
422
423module_init(init_ath5k_pci);
424module_exit(exit_ath5k_pci);
425
426
427/********************\
428* PCI Initialization *
429\********************/
430
431static const char * 236static const char *
432ath5k_chip_name(enum ath5k_srev_type type, u_int16_t val) 237ath5k_chip_name(enum ath5k_srev_type type, u_int16_t val)
433{ 238{
@@ -466,299 +271,6 @@ static const struct ath_ops ath5k_common_ops = {
466 .write = ath5k_iowrite32, 271 .write = ath5k_iowrite32,
467}; 272};
468 273
469static int __devinit
470ath5k_pci_probe(struct pci_dev *pdev,
471 const struct pci_device_id *id)
472{
473 void __iomem *mem;
474 struct ath5k_softc *sc;
475 struct ath_common *common;
476 struct ieee80211_hw *hw;
477 int ret;
478 u8 csz;
479
480 /*
481 * L0s needs to be disabled on all ath5k cards.
482 *
483 * For distributions shipping with CONFIG_PCIEASPM (this will be enabled
484 * by default in the future in 2.6.36) this will also mean both L1 and
485 * L0s will be disabled when a pre 1.1 PCIe device is detected. We do
486 * know L1 works correctly even for all ath5k pre 1.1 PCIe devices
487 * though but cannot currently undue the effect of a blacklist, for
488 * details you can read pcie_aspm_sanity_check() and see how it adjusts
489 * the device link capability.
490 *
491 * It may be possible in the future to implement some PCI API to allow
492 * drivers to override blacklists for pre 1.1 PCIe but for now it is
493 * best to accept that both L0s and L1 will be disabled completely for
494 * distributions shipping with CONFIG_PCIEASPM rather than having this
495 * issue present. Motivation for adding this new API will be to help
496 * with power consumption for some of these devices.
497 */
498 pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S);
499
500 ret = pci_enable_device(pdev);
501 if (ret) {
502 dev_err(&pdev->dev, "can't enable device\n");
503 goto err;
504 }
505
506 /* XXX 32-bit addressing only */
507 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
508 if (ret) {
509 dev_err(&pdev->dev, "32-bit DMA not available\n");
510 goto err_dis;
511 }
512
513 /*
514 * Cache line size is used to size and align various
515 * structures used to communicate with the hardware.
516 */
517 pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &csz);
518 if (csz == 0) {
519 /*
520 * Linux 2.4.18 (at least) writes the cache line size
521 * register as a 16-bit wide register which is wrong.
522 * We must have this setup properly for rx buffer
523 * DMA to work so force a reasonable value here if it
524 * comes up zero.
525 */
526 csz = L1_CACHE_BYTES >> 2;
527 pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, csz);
528 }
529 /*
530 * The default setting of latency timer yields poor results,
531 * set it to the value used by other systems. It may be worth
532 * tweaking this setting more.
533 */
534 pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0xa8);
535
536 /* Enable bus mastering */
537 pci_set_master(pdev);
538
539 /*
540 * Disable the RETRY_TIMEOUT register (0x41) to keep
541 * PCI Tx retries from interfering with C3 CPU state.
542 */
543 pci_write_config_byte(pdev, 0x41, 0);
544
545 ret = pci_request_region(pdev, 0, "ath5k");
546 if (ret) {
547 dev_err(&pdev->dev, "cannot reserve PCI memory region\n");
548 goto err_dis;
549 }
550
551 mem = pci_iomap(pdev, 0, 0);
552 if (!mem) {
553 dev_err(&pdev->dev, "cannot remap PCI memory region\n") ;
554 ret = -EIO;
555 goto err_reg;
556 }
557
558 /*
559 * Allocate hw (mac80211 main struct)
560 * and hw->priv (driver private data)
561 */
562 hw = ieee80211_alloc_hw(sizeof(*sc), &ath5k_hw_ops);
563 if (hw == NULL) {
564 dev_err(&pdev->dev, "cannot allocate ieee80211_hw\n");
565 ret = -ENOMEM;
566 goto err_map;
567 }
568
569 dev_info(&pdev->dev, "registered as '%s'\n", wiphy_name(hw->wiphy));
570
571 /* Initialize driver private data */
572 SET_IEEE80211_DEV(hw, &pdev->dev);
573 hw->flags = IEEE80211_HW_RX_INCLUDES_FCS |
574 IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
575 IEEE80211_HW_SIGNAL_DBM;
576
577 hw->wiphy->interface_modes =
578 BIT(NL80211_IFTYPE_AP) |
579 BIT(NL80211_IFTYPE_STATION) |
580 BIT(NL80211_IFTYPE_ADHOC) |
581 BIT(NL80211_IFTYPE_MESH_POINT);
582
583 hw->extra_tx_headroom = 2;
584 hw->channel_change_time = 5000;
585 sc = hw->priv;
586 sc->hw = hw;
587 sc->pdev = pdev;
588
589 ath5k_debug_init_device(sc);
590
591 /*
592 * Mark the device as detached to avoid processing
593 * interrupts until setup is complete.
594 */
595 __set_bit(ATH_STAT_INVALID, sc->status);
596
597 sc->iobase = mem; /* So we can unmap it on detach */
598 sc->opmode = NL80211_IFTYPE_STATION;
599 sc->bintval = 1000;
600 mutex_init(&sc->lock);
601 spin_lock_init(&sc->rxbuflock);
602 spin_lock_init(&sc->txbuflock);
603 spin_lock_init(&sc->block);
604
605 /* Set private data */
606 pci_set_drvdata(pdev, sc);
607
608 /* Setup interrupt handler */
609 ret = request_irq(pdev->irq, ath5k_intr, IRQF_SHARED, "ath", sc);
610 if (ret) {
611 ATH5K_ERR(sc, "request_irq failed\n");
612 goto err_free;
613 }
614
615 /* If we passed the test, malloc an ath5k_hw struct */
616 sc->ah = kzalloc(sizeof(struct ath5k_hw), GFP_KERNEL);
617 if (!sc->ah) {
618 ret = -ENOMEM;
619 ATH5K_ERR(sc, "out of memory\n");
620 goto err_irq;
621 }
622
623 sc->ah->ah_sc = sc;
624 sc->ah->ah_iobase = sc->iobase;
625 common = ath5k_hw_common(sc->ah);
626 common->ops = &ath5k_common_ops;
627 common->ah = sc->ah;
628 common->hw = hw;
629 common->cachelsz = csz << 2; /* convert to bytes */
630
631 /* Initialize device */
632 ret = ath5k_hw_attach(sc);
633 if (ret) {
634 goto err_free_ah;
635 }
636
637 /* set up multi-rate retry capabilities */
638 if (sc->ah->ah_version == AR5K_AR5212) {
639 hw->max_rates = 4;
640 hw->max_rate_tries = 11;
641 }
642
643 /* Finish private driver data initialization */
644 ret = ath5k_attach(pdev, hw);
645 if (ret)
646 goto err_ah;
647
648 ATH5K_INFO(sc, "Atheros AR%s chip found (MAC: 0x%x, PHY: 0x%x)\n",
649 ath5k_chip_name(AR5K_VERSION_MAC, sc->ah->ah_mac_srev),
650 sc->ah->ah_mac_srev,
651 sc->ah->ah_phy_revision);
652
653 if (!sc->ah->ah_single_chip) {
654 /* Single chip radio (!RF5111) */
655 if (sc->ah->ah_radio_5ghz_revision &&
656 !sc->ah->ah_radio_2ghz_revision) {
657 /* No 5GHz support -> report 2GHz radio */
658 if (!test_bit(AR5K_MODE_11A,
659 sc->ah->ah_capabilities.cap_mode)) {
660 ATH5K_INFO(sc, "RF%s 2GHz radio found (0x%x)\n",
661 ath5k_chip_name(AR5K_VERSION_RAD,
662 sc->ah->ah_radio_5ghz_revision),
663 sc->ah->ah_radio_5ghz_revision);
664 /* No 2GHz support (5110 and some
665 * 5Ghz only cards) -> report 5Ghz radio */
666 } else if (!test_bit(AR5K_MODE_11B,
667 sc->ah->ah_capabilities.cap_mode)) {
668 ATH5K_INFO(sc, "RF%s 5GHz radio found (0x%x)\n",
669 ath5k_chip_name(AR5K_VERSION_RAD,
670 sc->ah->ah_radio_5ghz_revision),
671 sc->ah->ah_radio_5ghz_revision);
672 /* Multiband radio */
673 } else {
674 ATH5K_INFO(sc, "RF%s multiband radio found"
675 " (0x%x)\n",
676 ath5k_chip_name(AR5K_VERSION_RAD,
677 sc->ah->ah_radio_5ghz_revision),
678 sc->ah->ah_radio_5ghz_revision);
679 }
680 }
681 /* Multi chip radio (RF5111 - RF2111) ->
682 * report both 2GHz/5GHz radios */
683 else if (sc->ah->ah_radio_5ghz_revision &&
684 sc->ah->ah_radio_2ghz_revision){
685 ATH5K_INFO(sc, "RF%s 5GHz radio found (0x%x)\n",
686 ath5k_chip_name(AR5K_VERSION_RAD,
687 sc->ah->ah_radio_5ghz_revision),
688 sc->ah->ah_radio_5ghz_revision);
689 ATH5K_INFO(sc, "RF%s 2GHz radio found (0x%x)\n",
690 ath5k_chip_name(AR5K_VERSION_RAD,
691 sc->ah->ah_radio_2ghz_revision),
692 sc->ah->ah_radio_2ghz_revision);
693 }
694 }
695
696
697 /* ready to process interrupts */
698 __clear_bit(ATH_STAT_INVALID, sc->status);
699
700 return 0;
701err_ah:
702 ath5k_hw_detach(sc->ah);
703err_free_ah:
704 kfree(sc->ah);
705err_irq:
706 free_irq(pdev->irq, sc);
707err_free:
708 ieee80211_free_hw(hw);
709err_map:
710 pci_iounmap(pdev, mem);
711err_reg:
712 pci_release_region(pdev, 0);
713err_dis:
714 pci_disable_device(pdev);
715err:
716 return ret;
717}
718
719static void __devexit
720ath5k_pci_remove(struct pci_dev *pdev)
721{
722 struct ath5k_softc *sc = pci_get_drvdata(pdev);
723
724 ath5k_debug_finish_device(sc);
725 ath5k_detach(pdev, sc->hw);
726 ath5k_hw_detach(sc->ah);
727 kfree(sc->ah);
728 free_irq(pdev->irq, sc);
729 pci_iounmap(pdev, sc->iobase);
730 pci_release_region(pdev, 0);
731 pci_disable_device(pdev);
732 ieee80211_free_hw(sc->hw);
733}
734
735#ifdef CONFIG_PM_SLEEP
736static int ath5k_pci_suspend(struct device *dev)
737{
738 struct ath5k_softc *sc = pci_get_drvdata(to_pci_dev(dev));
739
740 ath5k_led_off(sc);
741 return 0;
742}
743
744static int ath5k_pci_resume(struct device *dev)
745{
746 struct pci_dev *pdev = to_pci_dev(dev);
747 struct ath5k_softc *sc = pci_get_drvdata(pdev);
748
749 /*
750 * Suspend/Resume resets the PCI configuration space, so we have to
751 * re-disable the RETRY_TIMEOUT register (0x41) to keep
752 * PCI Tx retries from interfering with C3 CPU state
753 */
754 pci_write_config_byte(pdev, 0x41, 0);
755
756 ath5k_led_enable(sc);
757 return 0;
758}
759#endif /* CONFIG_PM_SLEEP */
760
761
762/***********************\ 274/***********************\
763* Driver Initialization * 275* Driver Initialization *
764\***********************/ 276\***********************/
@@ -772,170 +284,6 @@ static int ath5k_reg_notifier(struct wiphy *wiphy, struct regulatory_request *re
772 return ath_reg_notifier_apply(wiphy, request, regulatory); 284 return ath_reg_notifier_apply(wiphy, request, regulatory);
773} 285}
774 286
775static int
776ath5k_attach(struct pci_dev *pdev, struct ieee80211_hw *hw)
777{
778 struct ath5k_softc *sc = hw->priv;
779 struct ath5k_hw *ah = sc->ah;
780 struct ath_regulatory *regulatory = ath5k_hw_regulatory(ah);
781 u8 mac[ETH_ALEN] = {};
782 int ret;
783
784 ATH5K_DBG(sc, ATH5K_DEBUG_ANY, "devid 0x%x\n", pdev->device);
785
786 /*
787 * Check if the MAC has multi-rate retry support.
788 * We do this by trying to setup a fake extended
789 * descriptor. MACs that don't have support will
790 * return false w/o doing anything. MACs that do
791 * support it will return true w/o doing anything.
792 */
793 ret = ath5k_hw_setup_mrr_tx_desc(ah, NULL, 0, 0, 0, 0, 0, 0);
794
795 if (ret < 0)
796 goto err;
797 if (ret > 0)
798 __set_bit(ATH_STAT_MRRETRY, sc->status);
799
800 /*
801 * Collect the channel list. The 802.11 layer
802 * is resposible for filtering this list based
803 * on settings like the phy mode and regulatory
804 * domain restrictions.
805 */
806 ret = ath5k_setup_bands(hw);
807 if (ret) {
808 ATH5K_ERR(sc, "can't get channels\n");
809 goto err;
810 }
811
812 /* NB: setup here so ath5k_rate_update is happy */
813 if (test_bit(AR5K_MODE_11A, ah->ah_modes))
814 ath5k_setcurmode(sc, AR5K_MODE_11A);
815 else
816 ath5k_setcurmode(sc, AR5K_MODE_11B);
817
818 /*
819 * Allocate tx+rx descriptors and populate the lists.
820 */
821 ret = ath5k_desc_alloc(sc, pdev);
822 if (ret) {
823 ATH5K_ERR(sc, "can't allocate descriptors\n");
824 goto err;
825 }
826
827 /*
828 * Allocate hardware transmit queues: one queue for
829 * beacon frames and one data queue for each QoS
830 * priority. Note that hw functions handle resetting
831 * these queues at the needed time.
832 */
833 ret = ath5k_beaconq_setup(ah);
834 if (ret < 0) {
835 ATH5K_ERR(sc, "can't setup a beacon xmit queue\n");
836 goto err_desc;
837 }
838 sc->bhalq = ret;
839 sc->cabq = ath5k_txq_setup(sc, AR5K_TX_QUEUE_CAB, 0);
840 if (IS_ERR(sc->cabq)) {
841 ATH5K_ERR(sc, "can't setup cab queue\n");
842 ret = PTR_ERR(sc->cabq);
843 goto err_bhal;
844 }
845
846 sc->txq = ath5k_txq_setup(sc, AR5K_TX_QUEUE_DATA, AR5K_WME_AC_BK);
847 if (IS_ERR(sc->txq)) {
848 ATH5K_ERR(sc, "can't setup xmit queue\n");
849 ret = PTR_ERR(sc->txq);
850 goto err_queues;
851 }
852
853 tasklet_init(&sc->rxtq, ath5k_tasklet_rx, (unsigned long)sc);
854 tasklet_init(&sc->txtq, ath5k_tasklet_tx, (unsigned long)sc);
855 tasklet_init(&sc->calib, ath5k_tasklet_calibrate, (unsigned long)sc);
856 tasklet_init(&sc->beacontq, ath5k_tasklet_beacon, (unsigned long)sc);
857 tasklet_init(&sc->ani_tasklet, ath5k_tasklet_ani, (unsigned long)sc);
858
859 INIT_WORK(&sc->reset_work, ath5k_reset_work);
860
861 ret = ath5k_eeprom_read_mac(ah, mac);
862 if (ret) {
863 ATH5K_ERR(sc, "unable to read address from EEPROM: 0x%04x\n",
864 sc->pdev->device);
865 goto err_queues;
866 }
867
868 SET_IEEE80211_PERM_ADDR(hw, mac);
869 /* All MAC address bits matter for ACKs */
870 memcpy(sc->bssidmask, ath_bcast_mac, ETH_ALEN);
871 ath5k_hw_set_bssid_mask(sc->ah, sc->bssidmask);
872
873 regulatory->current_rd = ah->ah_capabilities.cap_eeprom.ee_regdomain;
874 ret = ath_regd_init(regulatory, hw->wiphy, ath5k_reg_notifier);
875 if (ret) {
876 ATH5K_ERR(sc, "can't initialize regulatory system\n");
877 goto err_queues;
878 }
879
880 ret = ieee80211_register_hw(hw);
881 if (ret) {
882 ATH5K_ERR(sc, "can't register ieee80211 hw\n");
883 goto err_queues;
884 }
885
886 if (!ath_is_world_regd(regulatory))
887 regulatory_hint(hw->wiphy, regulatory->alpha2);
888
889 ath5k_init_leds(sc);
890
891 ath5k_sysfs_register(sc);
892
893 return 0;
894err_queues:
895 ath5k_txq_release(sc);
896err_bhal:
897 ath5k_hw_release_tx_queue(ah, sc->bhalq);
898err_desc:
899 ath5k_desc_free(sc, pdev);
900err:
901 return ret;
902}
903
904static void
905ath5k_detach(struct pci_dev *pdev, struct ieee80211_hw *hw)
906{
907 struct ath5k_softc *sc = hw->priv;
908
909 /*
910 * NB: the order of these is important:
911 * o call the 802.11 layer before detaching ath5k_hw to
912 * ensure callbacks into the driver to delete global
913 * key cache entries can be handled
914 * o reclaim the tx queue data structures after calling
915 * the 802.11 layer as we'll get called back to reclaim
916 * node state and potentially want to use them
917 * o to cleanup the tx queues the hal is called, so detach
918 * it last
919 * XXX: ??? detach ath5k_hw ???
920 * Other than that, it's straightforward...
921 */
922 ieee80211_unregister_hw(hw);
923 ath5k_desc_free(sc, pdev);
924 ath5k_txq_release(sc);
925 ath5k_hw_release_tx_queue(sc->ah, sc->bhalq);
926 ath5k_unregister_leds(sc);
927
928 ath5k_sysfs_unregister(sc);
929 /*
930 * NB: can't reclaim these until after ieee80211_ifdetach
931 * returns because we'll get called back to reclaim node
932 * state and potentially want to use them.
933 */
934}
935
936
937
938
939/********************\ 287/********************\
940* Channel/mode setup * 288* Channel/mode setup *
941\********************/ 289\********************/
@@ -1163,8 +511,101 @@ ath5k_setcurmode(struct ath5k_softc *sc, unsigned int mode)
1163 } 511 }
1164} 512}
1165 513
514struct ath_vif_iter_data {
515 const u8 *hw_macaddr;
516 u8 mask[ETH_ALEN];
517 u8 active_mac[ETH_ALEN]; /* first active MAC */
518 bool need_set_hw_addr;
519 bool found_active;
520 bool any_assoc;
521 enum nl80211_iftype opmode;
522};
523
524static void ath_vif_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
525{
526 struct ath_vif_iter_data *iter_data = data;
527 int i;
528 struct ath5k_vif *avf = (void *)vif->drv_priv;
529
530 if (iter_data->hw_macaddr)
531 for (i = 0; i < ETH_ALEN; i++)
532 iter_data->mask[i] &=
533 ~(iter_data->hw_macaddr[i] ^ mac[i]);
534
535 if (!iter_data->found_active) {
536 iter_data->found_active = true;
537 memcpy(iter_data->active_mac, mac, ETH_ALEN);
538 }
539
540 if (iter_data->need_set_hw_addr && iter_data->hw_macaddr)
541 if (compare_ether_addr(iter_data->hw_macaddr, mac) == 0)
542 iter_data->need_set_hw_addr = false;
543
544 if (!iter_data->any_assoc) {
545 if (avf->assoc)
546 iter_data->any_assoc = true;
547 }
548
549 /* Calculate combined mode - when APs are active, operate in AP mode.
550 * Otherwise use the mode of the new interface. This can currently
551 * only deal with combinations of APs and STAs. Only one ad-hoc
552 * interfaces is allowed above.
553 */
554 if (avf->opmode == NL80211_IFTYPE_AP)
555 iter_data->opmode = NL80211_IFTYPE_AP;
556 else
557 if (iter_data->opmode == NL80211_IFTYPE_UNSPECIFIED)
558 iter_data->opmode = avf->opmode;
559}
560
561static void ath_do_set_opmode(struct ath5k_softc *sc)
562{
563 struct ath5k_hw *ah = sc->ah;
564 ath5k_hw_set_opmode(ah, sc->opmode);
565 ATH5K_DBG(sc, ATH5K_DEBUG_MODE, "mode setup opmode %d (%s)\n",
566 sc->opmode, ath_opmode_to_string(sc->opmode));
567}
568
569void ath5k_update_bssid_mask_and_opmode(struct ath5k_softc *sc,
570 struct ieee80211_vif *vif)
571{
572 struct ath_common *common = ath5k_hw_common(sc->ah);
573 struct ath_vif_iter_data iter_data;
574
575 /*
576 * Use the hardware MAC address as reference, the hardware uses it
577 * together with the BSSID mask when matching addresses.
578 */
579 iter_data.hw_macaddr = common->macaddr;
580 memset(&iter_data.mask, 0xff, ETH_ALEN);
581 iter_data.found_active = false;
582 iter_data.need_set_hw_addr = true;
583 iter_data.opmode = NL80211_IFTYPE_UNSPECIFIED;
584
585 if (vif)
586 ath_vif_iter(&iter_data, vif->addr, vif);
587
588 /* Get list of all active MAC addresses */
589 ieee80211_iterate_active_interfaces_atomic(sc->hw, ath_vif_iter,
590 &iter_data);
591 memcpy(sc->bssidmask, iter_data.mask, ETH_ALEN);
592
593 sc->opmode = iter_data.opmode;
594 if (sc->opmode == NL80211_IFTYPE_UNSPECIFIED)
595 /* Nothing active, default to station mode */
596 sc->opmode = NL80211_IFTYPE_STATION;
597
598 ath_do_set_opmode(sc);
599
600 if (iter_data.need_set_hw_addr && iter_data.found_active)
601 ath5k_hw_set_lladdr(sc->ah, iter_data.active_mac);
602
603 if (ath5k_hw_hasbssidmask(sc->ah))
604 ath5k_hw_set_bssid_mask(sc->ah, sc->bssidmask);
605}
606
1166static void 607static void
1167ath5k_mode_setup(struct ath5k_softc *sc) 608ath5k_mode_setup(struct ath5k_softc *sc, struct ieee80211_vif *vif)
1168{ 609{
1169 struct ath5k_hw *ah = sc->ah; 610 struct ath5k_hw *ah = sc->ah;
1170 u32 rfilt; 611 u32 rfilt;
@@ -1172,15 +613,9 @@ ath5k_mode_setup(struct ath5k_softc *sc)
1172 /* configure rx filter */ 613 /* configure rx filter */
1173 rfilt = sc->filter_flags; 614 rfilt = sc->filter_flags;
1174 ath5k_hw_set_rx_filter(ah, rfilt); 615 ath5k_hw_set_rx_filter(ah, rfilt);
1175
1176 if (ath5k_hw_hasbssidmask(ah))
1177 ath5k_hw_set_bssid_mask(ah, sc->bssidmask);
1178
1179 /* configure operational mode */
1180 ath5k_hw_set_opmode(ah, sc->opmode);
1181
1182 ATH5K_DBG(sc, ATH5K_DEBUG_MODE, "mode setup opmode %d\n", sc->opmode);
1183 ATH5K_DBG(sc, ATH5K_DEBUG_MODE, "RX filter 0x%x\n", rfilt); 616 ATH5K_DBG(sc, ATH5K_DEBUG_MODE, "RX filter 0x%x\n", rfilt);
617
618 ath5k_update_bssid_mask_and_opmode(sc, vif);
1184} 619}
1185 620
1186static inline int 621static inline int
@@ -1352,13 +787,13 @@ ath5k_txbuf_setup(struct ath5k_softc *sc, struct ath5k_buf *bf,
1352 flags |= AR5K_TXDESC_RTSENA; 787 flags |= AR5K_TXDESC_RTSENA;
1353 cts_rate = ieee80211_get_rts_cts_rate(sc->hw, info)->hw_value; 788 cts_rate = ieee80211_get_rts_cts_rate(sc->hw, info)->hw_value;
1354 duration = le16_to_cpu(ieee80211_rts_duration(sc->hw, 789 duration = le16_to_cpu(ieee80211_rts_duration(sc->hw,
1355 sc->vif, pktlen, info)); 790 info->control.vif, pktlen, info));
1356 } 791 }
1357 if (rc_flags & IEEE80211_TX_RC_USE_CTS_PROTECT) { 792 if (rc_flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
1358 flags |= AR5K_TXDESC_CTSENA; 793 flags |= AR5K_TXDESC_CTSENA;
1359 cts_rate = ieee80211_get_rts_cts_rate(sc->hw, info)->hw_value; 794 cts_rate = ieee80211_get_rts_cts_rate(sc->hw, info)->hw_value;
1360 duration = le16_to_cpu(ieee80211_ctstoself_duration(sc->hw, 795 duration = le16_to_cpu(ieee80211_ctstoself_duration(sc->hw,
1361 sc->vif, pktlen, info)); 796 info->control.vif, pktlen, info));
1362 } 797 }
1363 ret = ah->ah_setup_tx_desc(ah, ds, pktlen, 798 ret = ah->ah_setup_tx_desc(ah, ds, pktlen,
1364 ieee80211_get_hdrlen_from_skb(skb), padsize, 799 ieee80211_get_hdrlen_from_skb(skb), padsize,
@@ -1391,6 +826,7 @@ ath5k_txbuf_setup(struct ath5k_softc *sc, struct ath5k_buf *bf,
1391 826
1392 spin_lock_bh(&txq->lock); 827 spin_lock_bh(&txq->lock);
1393 list_add_tail(&bf->list, &txq->q); 828 list_add_tail(&bf->list, &txq->q);
829 txq->txq_len++;
1394 if (txq->link == NULL) /* is this first packet? */ 830 if (txq->link == NULL) /* is this first packet? */
1395 ath5k_hw_set_txdp(ah, txq->qnum, bf->daddr); 831 ath5k_hw_set_txdp(ah, txq->qnum, bf->daddr);
1396 else /* no, so only link it */ 832 else /* no, so only link it */
@@ -1459,10 +895,13 @@ ath5k_desc_alloc(struct ath5k_softc *sc, struct pci_dev *pdev)
1459 list_add_tail(&bf->list, &sc->txbuf); 895 list_add_tail(&bf->list, &sc->txbuf);
1460 } 896 }
1461 897
1462 /* beacon buffer */ 898 /* beacon buffers */
1463 bf->desc = ds; 899 INIT_LIST_HEAD(&sc->bcbuf);
1464 bf->daddr = da; 900 for (i = 0; i < ATH_BCBUF; i++, bf++, ds++, da += sizeof(*ds)) {
1465 sc->bbuf = bf; 901 bf->desc = ds;
902 bf->daddr = da;
903 list_add_tail(&bf->list, &sc->bcbuf);
904 }
1466 905
1467 return 0; 906 return 0;
1468err_free: 907err_free:
@@ -1477,11 +916,12 @@ ath5k_desc_free(struct ath5k_softc *sc, struct pci_dev *pdev)
1477{ 916{
1478 struct ath5k_buf *bf; 917 struct ath5k_buf *bf;
1479 918
1480 ath5k_txbuf_free_skb(sc, sc->bbuf);
1481 list_for_each_entry(bf, &sc->txbuf, list) 919 list_for_each_entry(bf, &sc->txbuf, list)
1482 ath5k_txbuf_free_skb(sc, bf); 920 ath5k_txbuf_free_skb(sc, bf);
1483 list_for_each_entry(bf, &sc->rxbuf, list) 921 list_for_each_entry(bf, &sc->rxbuf, list)
1484 ath5k_rxbuf_free_skb(sc, bf); 922 ath5k_rxbuf_free_skb(sc, bf);
923 list_for_each_entry(bf, &sc->bcbuf, list)
924 ath5k_txbuf_free_skb(sc, bf);
1485 925
1486 /* Free memory associated with all descriptors */ 926 /* Free memory associated with all descriptors */
1487 pci_free_consistent(pdev, sc->desc_len, sc->desc, sc->desc_daddr); 927 pci_free_consistent(pdev, sc->desc_len, sc->desc, sc->desc_daddr);
@@ -1490,13 +930,9 @@ ath5k_desc_free(struct ath5k_softc *sc, struct pci_dev *pdev)
1490 930
1491 kfree(sc->bufptr); 931 kfree(sc->bufptr);
1492 sc->bufptr = NULL; 932 sc->bufptr = NULL;
1493 sc->bbuf = NULL;
1494} 933}
1495 934
1496 935
1497
1498
1499
1500/**************\ 936/**************\
1501* Queues setup * 937* Queues setup *
1502\**************/ 938\**************/
@@ -1509,9 +945,11 @@ ath5k_txq_setup(struct ath5k_softc *sc,
1509 struct ath5k_txq *txq; 945 struct ath5k_txq *txq;
1510 struct ath5k_txq_info qi = { 946 struct ath5k_txq_info qi = {
1511 .tqi_subtype = subtype, 947 .tqi_subtype = subtype,
1512 .tqi_aifs = AR5K_TXQ_USEDEFAULT, 948 /* XXX: default values not correct for B and XR channels,
1513 .tqi_cw_min = AR5K_TXQ_USEDEFAULT, 949 * but who cares? */
1514 .tqi_cw_max = AR5K_TXQ_USEDEFAULT 950 .tqi_aifs = AR5K_TUNE_AIFS,
951 .tqi_cw_min = AR5K_TUNE_CWMIN,
952 .tqi_cw_max = AR5K_TUNE_CWMAX
1515 }; 953 };
1516 int qnum; 954 int qnum;
1517 955
@@ -1550,6 +988,9 @@ ath5k_txq_setup(struct ath5k_softc *sc,
1550 INIT_LIST_HEAD(&txq->q); 988 INIT_LIST_HEAD(&txq->q);
1551 spin_lock_init(&txq->lock); 989 spin_lock_init(&txq->lock);
1552 txq->setup = true; 990 txq->setup = true;
991 txq->txq_len = 0;
992 txq->txq_poll_mark = false;
993 txq->txq_stuck = 0;
1553 } 994 }
1554 return &sc->txqs[qnum]; 995 return &sc->txqs[qnum];
1555} 996}
@@ -1558,9 +999,11 @@ static int
1558ath5k_beaconq_setup(struct ath5k_hw *ah) 999ath5k_beaconq_setup(struct ath5k_hw *ah)
1559{ 1000{
1560 struct ath5k_txq_info qi = { 1001 struct ath5k_txq_info qi = {
1561 .tqi_aifs = AR5K_TXQ_USEDEFAULT, 1002 /* XXX: default values not correct for B and XR channels,
1562 .tqi_cw_min = AR5K_TXQ_USEDEFAULT, 1003 * but who cares? */
1563 .tqi_cw_max = AR5K_TXQ_USEDEFAULT, 1004 .tqi_aifs = AR5K_TUNE_AIFS,
1005 .tqi_cw_min = AR5K_TUNE_CWMIN,
1006 .tqi_cw_max = AR5K_TUNE_CWMAX,
1564 /* NB: for dynamic turbo, don't enable any other interrupts */ 1007 /* NB: for dynamic turbo, don't enable any other interrupts */
1565 .tqi_flags = AR5K_TXQ_FLAG_TXDESCINT_ENABLE 1008 .tqi_flags = AR5K_TXQ_FLAG_TXDESCINT_ENABLE
1566 }; 1009 };
@@ -1594,7 +1037,7 @@ ath5k_beaconq_config(struct ath5k_softc *sc)
1594 */ 1037 */
1595 qi.tqi_aifs = 0; 1038 qi.tqi_aifs = 0;
1596 qi.tqi_cw_min = 0; 1039 qi.tqi_cw_min = 0;
1597 qi.tqi_cw_max = 2 * ah->ah_cw_min; 1040 qi.tqi_cw_max = 2 * AR5K_TUNE_CWMIN;
1598 } 1041 }
1599 1042
1600 ATH5K_DBG(sc, ATH5K_DEBUG_BEACON, 1043 ATH5K_DBG(sc, ATH5K_DEBUG_BEACON,
@@ -1644,9 +1087,11 @@ ath5k_txq_drainq(struct ath5k_softc *sc, struct ath5k_txq *txq)
1644 spin_lock_bh(&sc->txbuflock); 1087 spin_lock_bh(&sc->txbuflock);
1645 list_move_tail(&bf->list, &sc->txbuf); 1088 list_move_tail(&bf->list, &sc->txbuf);
1646 sc->txbuf_len++; 1089 sc->txbuf_len++;
1090 txq->txq_len--;
1647 spin_unlock_bh(&sc->txbuflock); 1091 spin_unlock_bh(&sc->txbuflock);
1648 } 1092 }
1649 txq->link = NULL; 1093 txq->link = NULL;
1094 txq->txq_poll_mark = false;
1650 spin_unlock_bh(&txq->lock); 1095 spin_unlock_bh(&txq->lock);
1651} 1096}
1652 1097
@@ -1696,8 +1141,6 @@ ath5k_txq_release(struct ath5k_softc *sc)
1696} 1141}
1697 1142
1698 1143
1699
1700
1701/*************\ 1144/*************\
1702* RX Handling * 1145* RX Handling *
1703\*************/ 1146\*************/
@@ -1732,7 +1175,7 @@ ath5k_rx_start(struct ath5k_softc *sc)
1732 spin_unlock_bh(&sc->rxbuflock); 1175 spin_unlock_bh(&sc->rxbuflock);
1733 1176
1734 ath5k_hw_start_rx_dma(ah); /* enable recv descriptors */ 1177 ath5k_hw_start_rx_dma(ah); /* enable recv descriptors */
1735 ath5k_mode_setup(sc); /* set filters, etc. */ 1178 ath5k_mode_setup(sc, NULL); /* set filters, etc. */
1736 ath5k_hw_start_rx_pcu(ah); /* re-enable PCU/DMA engine */ 1179 ath5k_hw_start_rx_pcu(ah); /* re-enable PCU/DMA engine */
1737 1180
1738 return 0; 1181 return 0;
@@ -1840,6 +1283,15 @@ ath5k_check_ibss_tsf(struct ath5k_softc *sc, struct sk_buff *skb,
1840 */ 1283 */
1841 if (hw_tu >= sc->nexttbtt) 1284 if (hw_tu >= sc->nexttbtt)
1842 ath5k_beacon_update_timers(sc, bc_tstamp); 1285 ath5k_beacon_update_timers(sc, bc_tstamp);
1286
1287 /* Check if the beacon timers are still correct, because a TSF
1288 * update might have created a window between them - for a
1289 * longer description see the comment of this function: */
1290 if (!ath5k_hw_check_beacon_timers(sc->ah, sc->bintval)) {
1291 ath5k_beacon_update_timers(sc, bc_tstamp);
1292 ATH5K_DBG_UNLIMIT(sc, ATH5K_DEBUG_BEACON,
1293 "fixed beacon timers after beacon receive\n");
1294 }
1843 } 1295 }
1844} 1296}
1845 1297
@@ -2006,6 +1458,7 @@ static bool
2006ath5k_receive_frame_ok(struct ath5k_softc *sc, struct ath5k_rx_status *rs) 1458ath5k_receive_frame_ok(struct ath5k_softc *sc, struct ath5k_rx_status *rs)
2007{ 1459{
2008 sc->stats.rx_all_count++; 1460 sc->stats.rx_all_count++;
1461 sc->stats.rx_bytes_count += rs->rs_datalen;
2009 1462
2010 if (unlikely(rs->rs_status)) { 1463 if (unlikely(rs->rs_status)) {
2011 if (rs->rs_status & AR5K_RXERR_CRC) 1464 if (rs->rs_status & AR5K_RXERR_CRC)
@@ -2121,6 +1574,118 @@ unlock:
2121* TX Handling * 1574* TX Handling *
2122\*************/ 1575\*************/
2123 1576
1577static int ath5k_tx_queue(struct ieee80211_hw *hw, struct sk_buff *skb,
1578 struct ath5k_txq *txq)
1579{
1580 struct ath5k_softc *sc = hw->priv;
1581 struct ath5k_buf *bf;
1582 unsigned long flags;
1583 int padsize;
1584
1585 ath5k_debug_dump_skb(sc, skb, "TX ", 1);
1586
1587 /*
1588 * The hardware expects the header padded to 4 byte boundaries.
1589 * If this is not the case, we add the padding after the header.
1590 */
1591 padsize = ath5k_add_padding(skb);
1592 if (padsize < 0) {
1593 ATH5K_ERR(sc, "tx hdrlen not %%4: not enough"
1594 " headroom to pad");
1595 goto drop_packet;
1596 }
1597
1598 if (txq->txq_len >= ATH5K_TXQ_LEN_MAX)
1599 ieee80211_stop_queue(hw, txq->qnum);
1600
1601 spin_lock_irqsave(&sc->txbuflock, flags);
1602 if (list_empty(&sc->txbuf)) {
1603 ATH5K_ERR(sc, "no further txbuf available, dropping packet\n");
1604 spin_unlock_irqrestore(&sc->txbuflock, flags);
1605 ieee80211_stop_queues(hw);
1606 goto drop_packet;
1607 }
1608 bf = list_first_entry(&sc->txbuf, struct ath5k_buf, list);
1609 list_del(&bf->list);
1610 sc->txbuf_len--;
1611 if (list_empty(&sc->txbuf))
1612 ieee80211_stop_queues(hw);
1613 spin_unlock_irqrestore(&sc->txbuflock, flags);
1614
1615 bf->skb = skb;
1616
1617 if (ath5k_txbuf_setup(sc, bf, txq, padsize)) {
1618 bf->skb = NULL;
1619 spin_lock_irqsave(&sc->txbuflock, flags);
1620 list_add_tail(&bf->list, &sc->txbuf);
1621 sc->txbuf_len++;
1622 spin_unlock_irqrestore(&sc->txbuflock, flags);
1623 goto drop_packet;
1624 }
1625 return NETDEV_TX_OK;
1626
1627drop_packet:
1628 dev_kfree_skb_any(skb);
1629 return NETDEV_TX_OK;
1630}
1631
1632static void
1633ath5k_tx_frame_completed(struct ath5k_softc *sc, struct sk_buff *skb,
1634 struct ath5k_tx_status *ts)
1635{
1636 struct ieee80211_tx_info *info;
1637 int i;
1638
1639 sc->stats.tx_all_count++;
1640 sc->stats.tx_bytes_count += skb->len;
1641 info = IEEE80211_SKB_CB(skb);
1642
1643 ieee80211_tx_info_clear_status(info);
1644 for (i = 0; i < 4; i++) {
1645 struct ieee80211_tx_rate *r =
1646 &info->status.rates[i];
1647
1648 if (ts->ts_rate[i]) {
1649 r->idx = ath5k_hw_to_driver_rix(sc, ts->ts_rate[i]);
1650 r->count = ts->ts_retry[i];
1651 } else {
1652 r->idx = -1;
1653 r->count = 0;
1654 }
1655 }
1656
1657 /* count the successful attempt as well */
1658 info->status.rates[ts->ts_final_idx].count++;
1659
1660 if (unlikely(ts->ts_status)) {
1661 sc->stats.ack_fail++;
1662 if (ts->ts_status & AR5K_TXERR_FILT) {
1663 info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
1664 sc->stats.txerr_filt++;
1665 }
1666 if (ts->ts_status & AR5K_TXERR_XRETRY)
1667 sc->stats.txerr_retry++;
1668 if (ts->ts_status & AR5K_TXERR_FIFO)
1669 sc->stats.txerr_fifo++;
1670 } else {
1671 info->flags |= IEEE80211_TX_STAT_ACK;
1672 info->status.ack_signal = ts->ts_rssi;
1673 }
1674
1675 /*
1676 * Remove MAC header padding before giving the frame
1677 * back to mac80211.
1678 */
1679 ath5k_remove_padding(skb);
1680
1681 if (ts->ts_antenna > 0 && ts->ts_antenna < 5)
1682 sc->stats.antenna_tx[ts->ts_antenna]++;
1683 else
1684 sc->stats.antenna_tx[0]++; /* invalid */
1685
1686 ieee80211_tx_status(sc->hw, skb);
1687}
1688
2124static void 1689static void
2125ath5k_tx_processq(struct ath5k_softc *sc, struct ath5k_txq *txq) 1690ath5k_tx_processq(struct ath5k_softc *sc, struct ath5k_txq *txq)
2126{ 1691{
@@ -2128,96 +1693,51 @@ ath5k_tx_processq(struct ath5k_softc *sc, struct ath5k_txq *txq)
2128 struct ath5k_buf *bf, *bf0; 1693 struct ath5k_buf *bf, *bf0;
2129 struct ath5k_desc *ds; 1694 struct ath5k_desc *ds;
2130 struct sk_buff *skb; 1695 struct sk_buff *skb;
2131 struct ieee80211_tx_info *info; 1696 int ret;
2132 int i, ret;
2133 1697
2134 spin_lock(&txq->lock); 1698 spin_lock(&txq->lock);
2135 list_for_each_entry_safe(bf, bf0, &txq->q, list) { 1699 list_for_each_entry_safe(bf, bf0, &txq->q, list) {
2136 ds = bf->desc;
2137 1700
2138 /* 1701 txq->txq_poll_mark = false;
2139 * It's possible that the hardware can say the buffer is
2140 * completed when it hasn't yet loaded the ds_link from
2141 * host memory and moved on. If there are more TX
2142 * descriptors in the queue, wait for TXDP to change
2143 * before processing this one.
2144 */
2145 if (ath5k_hw_get_txdp(sc->ah, txq->qnum) == bf->daddr &&
2146 !list_is_last(&bf->list, &txq->q))
2147 break;
2148 1702
2149 ret = sc->ah->ah_proc_tx_desc(sc->ah, ds, &ts); 1703 /* skb might already have been processed last time. */
2150 if (unlikely(ret == -EINPROGRESS)) 1704 if (bf->skb != NULL) {
2151 break; 1705 ds = bf->desc;
2152 else if (unlikely(ret)) {
2153 ATH5K_ERR(sc, "error %d while processing queue %u\n",
2154 ret, txq->qnum);
2155 break;
2156 }
2157
2158 sc->stats.tx_all_count++;
2159 skb = bf->skb;
2160 info = IEEE80211_SKB_CB(skb);
2161 bf->skb = NULL;
2162 1706
2163 pci_unmap_single(sc->pdev, bf->skbaddr, skb->len, 1707 ret = sc->ah->ah_proc_tx_desc(sc->ah, ds, &ts);
2164 PCI_DMA_TODEVICE); 1708 if (unlikely(ret == -EINPROGRESS))
2165 1709 break;
2166 ieee80211_tx_info_clear_status(info); 1710 else if (unlikely(ret)) {
2167 for (i = 0; i < 4; i++) { 1711 ATH5K_ERR(sc,
2168 struct ieee80211_tx_rate *r = 1712 "error %d while processing "
2169 &info->status.rates[i]; 1713 "queue %u\n", ret, txq->qnum);
2170 1714 break;
2171 if (ts.ts_rate[i]) {
2172 r->idx = ath5k_hw_to_driver_rix(sc, ts.ts_rate[i]);
2173 r->count = ts.ts_retry[i];
2174 } else {
2175 r->idx = -1;
2176 r->count = 0;
2177 } 1715 }
2178 }
2179
2180 /* count the successful attempt as well */
2181 info->status.rates[ts.ts_final_idx].count++;
2182 1716
2183 if (unlikely(ts.ts_status)) { 1717 skb = bf->skb;
2184 sc->stats.ack_fail++; 1718 bf->skb = NULL;
2185 if (ts.ts_status & AR5K_TXERR_FILT) { 1719 pci_unmap_single(sc->pdev, bf->skbaddr, skb->len,
2186 info->flags |= IEEE80211_TX_STAT_TX_FILTERED; 1720 PCI_DMA_TODEVICE);
2187 sc->stats.txerr_filt++; 1721 ath5k_tx_frame_completed(sc, skb, &ts);
2188 }
2189 if (ts.ts_status & AR5K_TXERR_XRETRY)
2190 sc->stats.txerr_retry++;
2191 if (ts.ts_status & AR5K_TXERR_FIFO)
2192 sc->stats.txerr_fifo++;
2193 } else {
2194 info->flags |= IEEE80211_TX_STAT_ACK;
2195 info->status.ack_signal = ts.ts_rssi;
2196 } 1722 }
2197 1723
2198 /* 1724 /*
2199 * Remove MAC header padding before giving the frame 1725 * It's possible that the hardware can say the buffer is
2200 * back to mac80211. 1726 * completed when it hasn't yet loaded the ds_link from
1727 * host memory and moved on.
1728 * Always keep the last descriptor to avoid HW races...
2201 */ 1729 */
2202 ath5k_remove_padding(skb); 1730 if (ath5k_hw_get_txdp(sc->ah, txq->qnum) != bf->daddr) {
2203 1731 spin_lock(&sc->txbuflock);
2204 if (ts.ts_antenna > 0 && ts.ts_antenna < 5) 1732 list_move_tail(&bf->list, &sc->txbuf);
2205 sc->stats.antenna_tx[ts.ts_antenna]++; 1733 sc->txbuf_len++;
2206 else 1734 txq->txq_len--;
2207 sc->stats.antenna_tx[0]++; /* invalid */ 1735 spin_unlock(&sc->txbuflock);
2208 1736 }
2209 ieee80211_tx_status(sc->hw, skb);
2210
2211 spin_lock(&sc->txbuflock);
2212 list_move_tail(&bf->list, &sc->txbuf);
2213 sc->txbuf_len++;
2214 spin_unlock(&sc->txbuflock);
2215 } 1737 }
2216 if (likely(list_empty(&txq->q)))
2217 txq->link = NULL;
2218 spin_unlock(&txq->lock); 1738 spin_unlock(&txq->lock);
2219 if (sc->txbuf_len > ATH_TXBUF / 5) 1739 if (txq->txq_len < ATH5K_TXQ_LEN_LOW && txq->qnum < 4)
2220 ieee80211_wake_queues(sc->hw); 1740 ieee80211_wake_queue(sc->hw, txq->qnum);
2221} 1741}
2222 1742
2223static void 1743static void
@@ -2313,6 +1833,44 @@ err_unmap:
2313} 1833}
2314 1834
2315/* 1835/*
1836 * Updates the beacon that is sent by ath5k_beacon_send. For adhoc,
1837 * this is called only once at config_bss time, for AP we do it every
1838 * SWBA interrupt so that the TIM will reflect buffered frames.
1839 *
1840 * Called with the beacon lock.
1841 */
1842static int
1843ath5k_beacon_update(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
1844{
1845 int ret;
1846 struct ath5k_softc *sc = hw->priv;
1847 struct ath5k_vif *avf = (void *)vif->drv_priv;
1848 struct sk_buff *skb;
1849
1850 if (WARN_ON(!vif)) {
1851 ret = -EINVAL;
1852 goto out;
1853 }
1854
1855 skb = ieee80211_beacon_get(hw, vif);
1856
1857 if (!skb) {
1858 ret = -ENOMEM;
1859 goto out;
1860 }
1861
1862 ath5k_debug_dump_skb(sc, skb, "BC ", 1);
1863
1864 ath5k_txbuf_free_skb(sc, avf->bbuf);
1865 avf->bbuf->skb = skb;
1866 ret = ath5k_beacon_setup(sc, avf->bbuf);
1867 if (ret)
1868 avf->bbuf->skb = NULL;
1869out:
1870 return ret;
1871}
1872
1873/*
2316 * Transmit a beacon frame at SWBA. Dynamic updates to the 1874 * Transmit a beacon frame at SWBA. Dynamic updates to the
2317 * frame contents are done as needed and the slot time is 1875 * frame contents are done as needed and the slot time is
2318 * also adjusted based on current state. 1876 * also adjusted based on current state.
@@ -2323,16 +1881,14 @@ err_unmap:
2323static void 1881static void
2324ath5k_beacon_send(struct ath5k_softc *sc) 1882ath5k_beacon_send(struct ath5k_softc *sc)
2325{ 1883{
2326 struct ath5k_buf *bf = sc->bbuf;
2327 struct ath5k_hw *ah = sc->ah; 1884 struct ath5k_hw *ah = sc->ah;
1885 struct ieee80211_vif *vif;
1886 struct ath5k_vif *avf;
1887 struct ath5k_buf *bf;
2328 struct sk_buff *skb; 1888 struct sk_buff *skb;
2329 1889
2330 ATH5K_DBG_UNLIMIT(sc, ATH5K_DEBUG_BEACON, "in beacon_send\n"); 1890 ATH5K_DBG_UNLIMIT(sc, ATH5K_DEBUG_BEACON, "in beacon_send\n");
2331 1891
2332 if (unlikely(bf->skb == NULL || sc->opmode == NL80211_IFTYPE_STATION)) {
2333 ATH5K_WARN(sc, "bf=%p bf_skb=%p\n", bf, bf ? bf->skb : NULL);
2334 return;
2335 }
2336 /* 1892 /*
2337 * Check if the previous beacon has gone out. If 1893 * Check if the previous beacon has gone out. If
2338 * not, don't don't try to post another: skip this 1894 * not, don't don't try to post another: skip this
@@ -2361,6 +1917,28 @@ ath5k_beacon_send(struct ath5k_softc *sc)
2361 sc->bmisscount = 0; 1917 sc->bmisscount = 0;
2362 } 1918 }
2363 1919
1920 if (sc->opmode == NL80211_IFTYPE_AP && sc->num_ap_vifs > 1) {
1921 u64 tsf = ath5k_hw_get_tsf64(ah);
1922 u32 tsftu = TSF_TO_TU(tsf);
1923 int slot = ((tsftu % sc->bintval) * ATH_BCBUF) / sc->bintval;
1924 vif = sc->bslot[(slot + 1) % ATH_BCBUF];
1925 ATH5K_DBG(sc, ATH5K_DEBUG_BEACON,
1926 "tsf %llx tsftu %x intval %u slot %u vif %p\n",
1927 (unsigned long long)tsf, tsftu, sc->bintval, slot, vif);
1928 } else /* only one interface */
1929 vif = sc->bslot[0];
1930
1931 if (!vif)
1932 return;
1933
1934 avf = (void *)vif->drv_priv;
1935 bf = avf->bbuf;
1936 if (unlikely(bf->skb == NULL || sc->opmode == NL80211_IFTYPE_STATION ||
1937 sc->opmode == NL80211_IFTYPE_MONITOR)) {
1938 ATH5K_WARN(sc, "bf=%p bf_skb=%p\n", bf, bf ? bf->skb : NULL);
1939 return;
1940 }
1941
2364 /* 1942 /*
2365 * Stop any current dma and put the new frame on the queue. 1943 * Stop any current dma and put the new frame on the queue.
2366 * This should never fail since we check above that no frames 1944 * This should never fail since we check above that no frames
@@ -2373,23 +1951,22 @@ ath5k_beacon_send(struct ath5k_softc *sc)
2373 1951
2374 /* refresh the beacon for AP mode */ 1952 /* refresh the beacon for AP mode */
2375 if (sc->opmode == NL80211_IFTYPE_AP) 1953 if (sc->opmode == NL80211_IFTYPE_AP)
2376 ath5k_beacon_update(sc->hw, sc->vif); 1954 ath5k_beacon_update(sc->hw, vif);
2377 1955
2378 ath5k_hw_set_txdp(ah, sc->bhalq, bf->daddr); 1956 ath5k_hw_set_txdp(ah, sc->bhalq, bf->daddr);
2379 ath5k_hw_start_tx_dma(ah, sc->bhalq); 1957 ath5k_hw_start_tx_dma(ah, sc->bhalq);
2380 ATH5K_DBG(sc, ATH5K_DEBUG_BEACON, "TXDP[%u] = %llx (%p)\n", 1958 ATH5K_DBG(sc, ATH5K_DEBUG_BEACON, "TXDP[%u] = %llx (%p)\n",
2381 sc->bhalq, (unsigned long long)bf->daddr, bf->desc); 1959 sc->bhalq, (unsigned long long)bf->daddr, bf->desc);
2382 1960
2383 skb = ieee80211_get_buffered_bc(sc->hw, sc->vif); 1961 skb = ieee80211_get_buffered_bc(sc->hw, vif);
2384 while (skb) { 1962 while (skb) {
2385 ath5k_tx_queue(sc->hw, skb, sc->cabq); 1963 ath5k_tx_queue(sc->hw, skb, sc->cabq);
2386 skb = ieee80211_get_buffered_bc(sc->hw, sc->vif); 1964 skb = ieee80211_get_buffered_bc(sc->hw, vif);
2387 } 1965 }
2388 1966
2389 sc->bsent++; 1967 sc->bsent++;
2390} 1968}
2391 1969
2392
2393/** 1970/**
2394 * ath5k_beacon_update_timers - update beacon timers 1971 * ath5k_beacon_update_timers - update beacon timers
2395 * 1972 *
@@ -2414,6 +1991,12 @@ ath5k_beacon_update_timers(struct ath5k_softc *sc, u64 bc_tsf)
2414 u64 hw_tsf; 1991 u64 hw_tsf;
2415 1992
2416 intval = sc->bintval & AR5K_BEACON_PERIOD; 1993 intval = sc->bintval & AR5K_BEACON_PERIOD;
1994 if (sc->opmode == NL80211_IFTYPE_AP && sc->num_ap_vifs > 1) {
1995 intval /= ATH_BCBUF; /* staggered multi-bss beacons */
1996 if (intval < 15)
1997 ATH5K_WARN(sc, "intval %u is too low, min 15\n",
1998 intval);
1999 }
2417 if (WARN_ON(!intval)) 2000 if (WARN_ON(!intval))
2418 return; 2001 return;
2419 2002
@@ -2424,8 +2007,11 @@ ath5k_beacon_update_timers(struct ath5k_softc *sc, u64 bc_tsf)
2424 hw_tsf = ath5k_hw_get_tsf64(ah); 2007 hw_tsf = ath5k_hw_get_tsf64(ah);
2425 hw_tu = TSF_TO_TU(hw_tsf); 2008 hw_tu = TSF_TO_TU(hw_tsf);
2426 2009
2427#define FUDGE 3 2010#define FUDGE AR5K_TUNE_SW_BEACON_RESP + 3
2428 /* we use FUDGE to make sure the next TBTT is ahead of the current TU */ 2011 /* We use FUDGE to make sure the next TBTT is ahead of the current TU.
2012 * Since we later substract AR5K_TUNE_SW_BEACON_RESP (10) in the timer
2013 * configuration we need to make sure it is bigger than that. */
2014
2429 if (bc_tsf == -1) { 2015 if (bc_tsf == -1) {
2430 /* 2016 /*
2431 * no beacons received, called internally. 2017 * no beacons received, called internally.
@@ -2491,7 +2077,6 @@ ath5k_beacon_update_timers(struct ath5k_softc *sc, u64 bc_tsf)
2491 intval & AR5K_BEACON_RESET_TSF ? "AR5K_BEACON_RESET_TSF" : ""); 2077 intval & AR5K_BEACON_RESET_TSF ? "AR5K_BEACON_RESET_TSF" : "");
2492} 2078}
2493 2079
2494
2495/** 2080/**
2496 * ath5k_beacon_config - Configure the beacon queues and interrupts 2081 * ath5k_beacon_config - Configure the beacon queues and interrupts
2497 * 2082 *
@@ -2570,155 +2155,6 @@ static void ath5k_tasklet_beacon(unsigned long data)
2570* Interrupt handling * 2155* Interrupt handling *
2571\********************/ 2156\********************/
2572 2157
2573static int
2574ath5k_init(struct ath5k_softc *sc)
2575{
2576 struct ath5k_hw *ah = sc->ah;
2577 int ret, i;
2578
2579 mutex_lock(&sc->lock);
2580
2581 ATH5K_DBG(sc, ATH5K_DEBUG_RESET, "mode %d\n", sc->opmode);
2582
2583 /*
2584 * Stop anything previously setup. This is safe
2585 * no matter this is the first time through or not.
2586 */
2587 ath5k_stop_locked(sc);
2588
2589 /*
2590 * The basic interface to setting the hardware in a good
2591 * state is ``reset''. On return the hardware is known to
2592 * be powered up and with interrupts disabled. This must
2593 * be followed by initialization of the appropriate bits
2594 * and then setup of the interrupt mask.
2595 */
2596 sc->curchan = sc->hw->conf.channel;
2597 sc->curband = &sc->sbands[sc->curchan->band];
2598 sc->imask = AR5K_INT_RXOK | AR5K_INT_RXERR | AR5K_INT_RXEOL |
2599 AR5K_INT_RXORN | AR5K_INT_TXDESC | AR5K_INT_TXEOL |
2600 AR5K_INT_FATAL | AR5K_INT_GLOBAL | AR5K_INT_MIB;
2601
2602 ret = ath5k_reset(sc, NULL);
2603 if (ret)
2604 goto done;
2605
2606 ath5k_rfkill_hw_start(ah);
2607
2608 /*
2609 * Reset the key cache since some parts do not reset the
2610 * contents on initial power up or resume from suspend.
2611 */
2612 for (i = 0; i < AR5K_KEYTABLE_SIZE; i++)
2613 ath5k_hw_reset_key(ah, i);
2614
2615 ath5k_hw_set_ack_bitrate_high(ah, true);
2616 ret = 0;
2617done:
2618 mmiowb();
2619 mutex_unlock(&sc->lock);
2620 return ret;
2621}
2622
2623static int
2624ath5k_stop_locked(struct ath5k_softc *sc)
2625{
2626 struct ath5k_hw *ah = sc->ah;
2627
2628 ATH5K_DBG(sc, ATH5K_DEBUG_RESET, "invalid %u\n",
2629 test_bit(ATH_STAT_INVALID, sc->status));
2630
2631 /*
2632 * Shutdown the hardware and driver:
2633 * stop output from above
2634 * disable interrupts
2635 * turn off timers
2636 * turn off the radio
2637 * clear transmit machinery
2638 * clear receive machinery
2639 * drain and release tx queues
2640 * reclaim beacon resources
2641 * power down hardware
2642 *
2643 * Note that some of this work is not possible if the
2644 * hardware is gone (invalid).
2645 */
2646 ieee80211_stop_queues(sc->hw);
2647
2648 if (!test_bit(ATH_STAT_INVALID, sc->status)) {
2649 ath5k_led_off(sc);
2650 ath5k_hw_set_imr(ah, 0);
2651 synchronize_irq(sc->pdev->irq);
2652 }
2653 ath5k_txq_cleanup(sc);
2654 if (!test_bit(ATH_STAT_INVALID, sc->status)) {
2655 ath5k_rx_stop(sc);
2656 ath5k_hw_phy_disable(ah);
2657 }
2658
2659 return 0;
2660}
2661
2662static void stop_tasklets(struct ath5k_softc *sc)
2663{
2664 tasklet_kill(&sc->rxtq);
2665 tasklet_kill(&sc->txtq);
2666 tasklet_kill(&sc->calib);
2667 tasklet_kill(&sc->beacontq);
2668 tasklet_kill(&sc->ani_tasklet);
2669}
2670
2671/*
2672 * Stop the device, grabbing the top-level lock to protect
2673 * against concurrent entry through ath5k_init (which can happen
2674 * if another thread does a system call and the thread doing the
2675 * stop is preempted).
2676 */
2677static int
2678ath5k_stop_hw(struct ath5k_softc *sc)
2679{
2680 int ret;
2681
2682 mutex_lock(&sc->lock);
2683 ret = ath5k_stop_locked(sc);
2684 if (ret == 0 && !test_bit(ATH_STAT_INVALID, sc->status)) {
2685 /*
2686 * Don't set the card in full sleep mode!
2687 *
2688 * a) When the device is in this state it must be carefully
2689 * woken up or references to registers in the PCI clock
2690 * domain may freeze the bus (and system). This varies
2691 * by chip and is mostly an issue with newer parts
2692 * (madwifi sources mentioned srev >= 0x78) that go to
2693 * sleep more quickly.
2694 *
2695 * b) On older chips full sleep results a weird behaviour
2696 * during wakeup. I tested various cards with srev < 0x78
2697 * and they don't wake up after module reload, a second
2698 * module reload is needed to bring the card up again.
2699 *
2700 * Until we figure out what's going on don't enable
2701 * full chip reset on any chip (this is what Legacy HAL
2702 * and Sam's HAL do anyway). Instead Perform a full reset
2703 * on the device (same as initial state after attach) and
2704 * leave it idle (keep MAC/BB on warm reset) */
2705 ret = ath5k_hw_on_hold(sc->ah);
2706
2707 ATH5K_DBG(sc, ATH5K_DEBUG_RESET,
2708 "putting device to sleep\n");
2709 }
2710 ath5k_txbuf_free_skb(sc, sc->bbuf);
2711
2712 mmiowb();
2713 mutex_unlock(&sc->lock);
2714
2715 stop_tasklets(sc);
2716
2717 ath5k_rfkill_hw_stop(sc->ah);
2718
2719 return ret;
2720}
2721
2722static void 2158static void
2723ath5k_intr_calibration_poll(struct ath5k_hw *ah) 2159ath5k_intr_calibration_poll(struct ath5k_hw *ah)
2724{ 2160{
@@ -2855,14 +2291,13 @@ ath5k_tasklet_calibrate(unsigned long data)
2855 sc->curchan->center_freq)); 2291 sc->curchan->center_freq));
2856 2292
2857 /* Noise floor calibration interrupts rx/tx path while I/Q calibration 2293 /* Noise floor calibration interrupts rx/tx path while I/Q calibration
2858 * doesn't. We stop the queues so that calibration doesn't interfere 2294 * doesn't.
2859 * with TX and don't run it as often */ 2295 * TODO: We should stop TX here, so that it doesn't interfere.
2296 * Note that stopping the queues is not enough to stop TX! */
2860 if (time_is_before_eq_jiffies(ah->ah_cal_next_nf)) { 2297 if (time_is_before_eq_jiffies(ah->ah_cal_next_nf)) {
2861 ah->ah_cal_next_nf = jiffies + 2298 ah->ah_cal_next_nf = jiffies +
2862 msecs_to_jiffies(ATH5K_TUNE_CALIBRATION_INTERVAL_NF); 2299 msecs_to_jiffies(ATH5K_TUNE_CALIBRATION_INTERVAL_NF);
2863 ieee80211_stop_queues(sc->hw);
2864 ath5k_hw_update_noise_floor(ah); 2300 ath5k_hw_update_noise_floor(ah);
2865 ieee80211_wake_queues(sc->hw);
2866 } 2301 }
2867 2302
2868 ah->ah_cal_mask &= ~AR5K_CALIBRATION_FULL; 2303 ah->ah_cal_mask &= ~AR5K_CALIBRATION_FULL;
@@ -2881,68 +2316,208 @@ ath5k_tasklet_ani(unsigned long data)
2881} 2316}
2882 2317
2883 2318
2884/********************\ 2319static void
2885* Mac80211 functions * 2320ath5k_tx_complete_poll_work(struct work_struct *work)
2886\********************/ 2321{
2322 struct ath5k_softc *sc = container_of(work, struct ath5k_softc,
2323 tx_complete_work.work);
2324 struct ath5k_txq *txq;
2325 int i;
2326 bool needreset = false;
2327
2328 for (i = 0; i < ARRAY_SIZE(sc->txqs); i++) {
2329 if (sc->txqs[i].setup) {
2330 txq = &sc->txqs[i];
2331 spin_lock_bh(&txq->lock);
2332 if (txq->txq_len > 1) {
2333 if (txq->txq_poll_mark) {
2334 ATH5K_DBG(sc, ATH5K_DEBUG_XMIT,
2335 "TX queue stuck %d\n",
2336 txq->qnum);
2337 needreset = true;
2338 txq->txq_stuck++;
2339 spin_unlock_bh(&txq->lock);
2340 break;
2341 } else {
2342 txq->txq_poll_mark = true;
2343 }
2344 }
2345 spin_unlock_bh(&txq->lock);
2346 }
2347 }
2348
2349 if (needreset) {
2350 ATH5K_DBG(sc, ATH5K_DEBUG_RESET,
2351 "TX queues stuck, resetting\n");
2352 ath5k_reset(sc, sc->curchan);
2353 }
2354
2355 ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work,
2356 msecs_to_jiffies(ATH5K_TX_COMPLETE_POLL_INT));
2357}
2358
2359
2360/*************************\
2361* Initialization routines *
2362\*************************/
2887 2363
2888static int 2364static int
2889ath5k_tx(struct ieee80211_hw *hw, struct sk_buff *skb) 2365ath5k_stop_locked(struct ath5k_softc *sc)
2890{ 2366{
2891 struct ath5k_softc *sc = hw->priv; 2367 struct ath5k_hw *ah = sc->ah;
2368
2369 ATH5K_DBG(sc, ATH5K_DEBUG_RESET, "invalid %u\n",
2370 test_bit(ATH_STAT_INVALID, sc->status));
2371
2372 /*
2373 * Shutdown the hardware and driver:
2374 * stop output from above
2375 * disable interrupts
2376 * turn off timers
2377 * turn off the radio
2378 * clear transmit machinery
2379 * clear receive machinery
2380 * drain and release tx queues
2381 * reclaim beacon resources
2382 * power down hardware
2383 *
2384 * Note that some of this work is not possible if the
2385 * hardware is gone (invalid).
2386 */
2387 ieee80211_stop_queues(sc->hw);
2892 2388
2893 return ath5k_tx_queue(hw, skb, sc->txq); 2389 if (!test_bit(ATH_STAT_INVALID, sc->status)) {
2390 ath5k_led_off(sc);
2391 ath5k_hw_set_imr(ah, 0);
2392 synchronize_irq(sc->pdev->irq);
2393 }
2394 ath5k_txq_cleanup(sc);
2395 if (!test_bit(ATH_STAT_INVALID, sc->status)) {
2396 ath5k_rx_stop(sc);
2397 ath5k_hw_phy_disable(ah);
2398 }
2399
2400 return 0;
2894} 2401}
2895 2402
2896static int ath5k_tx_queue(struct ieee80211_hw *hw, struct sk_buff *skb, 2403static int
2897 struct ath5k_txq *txq) 2404ath5k_init(struct ath5k_softc *sc)
2898{ 2405{
2899 struct ath5k_softc *sc = hw->priv; 2406 struct ath5k_hw *ah = sc->ah;
2900 struct ath5k_buf *bf; 2407 struct ath_common *common = ath5k_hw_common(ah);
2901 unsigned long flags; 2408 int ret, i;
2902 int padsize;
2903 2409
2904 ath5k_debug_dump_skb(sc, skb, "TX ", 1); 2410 mutex_lock(&sc->lock);
2411
2412 ATH5K_DBG(sc, ATH5K_DEBUG_RESET, "mode %d\n", sc->opmode);
2905 2413
2906 /* 2414 /*
2907 * The hardware expects the header padded to 4 byte boundaries. 2415 * Stop anything previously setup. This is safe
2908 * If this is not the case, we add the padding after the header. 2416 * no matter this is the first time through or not.
2909 */ 2417 */
2910 padsize = ath5k_add_padding(skb); 2418 ath5k_stop_locked(sc);
2911 if (padsize < 0) {
2912 ATH5K_ERR(sc, "tx hdrlen not %%4: not enough"
2913 " headroom to pad");
2914 goto drop_packet;
2915 }
2916 2419
2917 spin_lock_irqsave(&sc->txbuflock, flags); 2420 /*
2918 if (list_empty(&sc->txbuf)) { 2421 * The basic interface to setting the hardware in a good
2919 ATH5K_ERR(sc, "no further txbuf available, dropping packet\n"); 2422 * state is ``reset''. On return the hardware is known to
2920 spin_unlock_irqrestore(&sc->txbuflock, flags); 2423 * be powered up and with interrupts disabled. This must
2921 ieee80211_stop_queue(hw, skb_get_queue_mapping(skb)); 2424 * be followed by initialization of the appropriate bits
2922 goto drop_packet; 2425 * and then setup of the interrupt mask.
2923 } 2426 */
2924 bf = list_first_entry(&sc->txbuf, struct ath5k_buf, list); 2427 sc->curchan = sc->hw->conf.channel;
2925 list_del(&bf->list); 2428 sc->curband = &sc->sbands[sc->curchan->band];
2926 sc->txbuf_len--; 2429 sc->imask = AR5K_INT_RXOK | AR5K_INT_RXERR | AR5K_INT_RXEOL |
2927 if (list_empty(&sc->txbuf)) 2430 AR5K_INT_RXORN | AR5K_INT_TXDESC | AR5K_INT_TXEOL |
2928 ieee80211_stop_queues(hw); 2431 AR5K_INT_FATAL | AR5K_INT_GLOBAL | AR5K_INT_MIB;
2929 spin_unlock_irqrestore(&sc->txbuflock, flags);
2930 2432
2931 bf->skb = skb; 2433 ret = ath5k_reset(sc, NULL);
2434 if (ret)
2435 goto done;
2932 2436
2933 if (ath5k_txbuf_setup(sc, bf, txq, padsize)) { 2437 ath5k_rfkill_hw_start(ah);
2934 bf->skb = NULL; 2438
2935 spin_lock_irqsave(&sc->txbuflock, flags); 2439 /*
2936 list_add_tail(&bf->list, &sc->txbuf); 2440 * Reset the key cache since some parts do not reset the
2937 sc->txbuf_len++; 2441 * contents on initial power up or resume from suspend.
2938 spin_unlock_irqrestore(&sc->txbuflock, flags); 2442 */
2939 goto drop_packet; 2443 for (i = 0; i < common->keymax; i++)
2444 ath_hw_keyreset(common, (u16) i);
2445
2446 ath5k_hw_set_ack_bitrate_high(ah, true);
2447
2448 for (i = 0; i < ARRAY_SIZE(sc->bslot); i++)
2449 sc->bslot[i] = NULL;
2450
2451 ret = 0;
2452done:
2453 mmiowb();
2454 mutex_unlock(&sc->lock);
2455
2456 ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work,
2457 msecs_to_jiffies(ATH5K_TX_COMPLETE_POLL_INT));
2458
2459 return ret;
2460}
2461
2462static void stop_tasklets(struct ath5k_softc *sc)
2463{
2464 tasklet_kill(&sc->rxtq);
2465 tasklet_kill(&sc->txtq);
2466 tasklet_kill(&sc->calib);
2467 tasklet_kill(&sc->beacontq);
2468 tasklet_kill(&sc->ani_tasklet);
2469}
2470
2471/*
2472 * Stop the device, grabbing the top-level lock to protect
2473 * against concurrent entry through ath5k_init (which can happen
2474 * if another thread does a system call and the thread doing the
2475 * stop is preempted).
2476 */
2477static int
2478ath5k_stop_hw(struct ath5k_softc *sc)
2479{
2480 int ret;
2481
2482 mutex_lock(&sc->lock);
2483 ret = ath5k_stop_locked(sc);
2484 if (ret == 0 && !test_bit(ATH_STAT_INVALID, sc->status)) {
2485 /*
2486 * Don't set the card in full sleep mode!
2487 *
2488 * a) When the device is in this state it must be carefully
2489 * woken up or references to registers in the PCI clock
2490 * domain may freeze the bus (and system). This varies
2491 * by chip and is mostly an issue with newer parts
2492 * (madwifi sources mentioned srev >= 0x78) that go to
2493 * sleep more quickly.
2494 *
2495 * b) On older chips full sleep results a weird behaviour
2496 * during wakeup. I tested various cards with srev < 0x78
2497 * and they don't wake up after module reload, a second
2498 * module reload is needed to bring the card up again.
2499 *
2500 * Until we figure out what's going on don't enable
2501 * full chip reset on any chip (this is what Legacy HAL
2502 * and Sam's HAL do anyway). Instead Perform a full reset
2503 * on the device (same as initial state after attach) and
2504 * leave it idle (keep MAC/BB on warm reset) */
2505 ret = ath5k_hw_on_hold(sc->ah);
2506
2507 ATH5K_DBG(sc, ATH5K_DEBUG_RESET,
2508 "putting device to sleep\n");
2940 } 2509 }
2941 return NETDEV_TX_OK;
2942 2510
2943drop_packet: 2511 mmiowb();
2944 dev_kfree_skb_any(skb); 2512 mutex_unlock(&sc->lock);
2945 return NETDEV_TX_OK; 2513
2514 stop_tasklets(sc);
2515
2516 cancel_delayed_work_sync(&sc->tx_complete_work);
2517
2518 ath5k_rfkill_hw_stop(sc->ah);
2519
2520 return ret;
2946} 2521}
2947 2522
2948/* 2523/*
@@ -3019,6 +2594,208 @@ static void ath5k_reset_work(struct work_struct *work)
3019 mutex_unlock(&sc->lock); 2594 mutex_unlock(&sc->lock);
3020} 2595}
3021 2596
2597static int
2598ath5k_attach(struct pci_dev *pdev, struct ieee80211_hw *hw)
2599{
2600 struct ath5k_softc *sc = hw->priv;
2601 struct ath5k_hw *ah = sc->ah;
2602 struct ath_regulatory *regulatory = ath5k_hw_regulatory(ah);
2603 struct ath5k_txq *txq;
2604 u8 mac[ETH_ALEN] = {};
2605 int ret;
2606
2607 ATH5K_DBG(sc, ATH5K_DEBUG_ANY, "devid 0x%x\n", pdev->device);
2608
2609 /*
2610 * Check if the MAC has multi-rate retry support.
2611 * We do this by trying to setup a fake extended
2612 * descriptor. MACs that don't have support will
2613 * return false w/o doing anything. MACs that do
2614 * support it will return true w/o doing anything.
2615 */
2616 ret = ath5k_hw_setup_mrr_tx_desc(ah, NULL, 0, 0, 0, 0, 0, 0);
2617
2618 if (ret < 0)
2619 goto err;
2620 if (ret > 0)
2621 __set_bit(ATH_STAT_MRRETRY, sc->status);
2622
2623 /*
2624 * Collect the channel list. The 802.11 layer
2625 * is resposible for filtering this list based
2626 * on settings like the phy mode and regulatory
2627 * domain restrictions.
2628 */
2629 ret = ath5k_setup_bands(hw);
2630 if (ret) {
2631 ATH5K_ERR(sc, "can't get channels\n");
2632 goto err;
2633 }
2634
2635 /* NB: setup here so ath5k_rate_update is happy */
2636 if (test_bit(AR5K_MODE_11A, ah->ah_modes))
2637 ath5k_setcurmode(sc, AR5K_MODE_11A);
2638 else
2639 ath5k_setcurmode(sc, AR5K_MODE_11B);
2640
2641 /*
2642 * Allocate tx+rx descriptors and populate the lists.
2643 */
2644 ret = ath5k_desc_alloc(sc, pdev);
2645 if (ret) {
2646 ATH5K_ERR(sc, "can't allocate descriptors\n");
2647 goto err;
2648 }
2649
2650 /*
2651 * Allocate hardware transmit queues: one queue for
2652 * beacon frames and one data queue for each QoS
2653 * priority. Note that hw functions handle resetting
2654 * these queues at the needed time.
2655 */
2656 ret = ath5k_beaconq_setup(ah);
2657 if (ret < 0) {
2658 ATH5K_ERR(sc, "can't setup a beacon xmit queue\n");
2659 goto err_desc;
2660 }
2661 sc->bhalq = ret;
2662 sc->cabq = ath5k_txq_setup(sc, AR5K_TX_QUEUE_CAB, 0);
2663 if (IS_ERR(sc->cabq)) {
2664 ATH5K_ERR(sc, "can't setup cab queue\n");
2665 ret = PTR_ERR(sc->cabq);
2666 goto err_bhal;
2667 }
2668
2669 /* This order matches mac80211's queue priority, so we can
2670 * directly use the mac80211 queue number without any mapping */
2671 txq = ath5k_txq_setup(sc, AR5K_TX_QUEUE_DATA, AR5K_WME_AC_VO);
2672 if (IS_ERR(txq)) {
2673 ATH5K_ERR(sc, "can't setup xmit queue\n");
2674 ret = PTR_ERR(txq);
2675 goto err_queues;
2676 }
2677 txq = ath5k_txq_setup(sc, AR5K_TX_QUEUE_DATA, AR5K_WME_AC_VI);
2678 if (IS_ERR(txq)) {
2679 ATH5K_ERR(sc, "can't setup xmit queue\n");
2680 ret = PTR_ERR(txq);
2681 goto err_queues;
2682 }
2683 txq = ath5k_txq_setup(sc, AR5K_TX_QUEUE_DATA, AR5K_WME_AC_BE);
2684 if (IS_ERR(txq)) {
2685 ATH5K_ERR(sc, "can't setup xmit queue\n");
2686 ret = PTR_ERR(txq);
2687 goto err_queues;
2688 }
2689 txq = ath5k_txq_setup(sc, AR5K_TX_QUEUE_DATA, AR5K_WME_AC_BK);
2690 if (IS_ERR(txq)) {
2691 ATH5K_ERR(sc, "can't setup xmit queue\n");
2692 ret = PTR_ERR(txq);
2693 goto err_queues;
2694 }
2695 hw->queues = 4;
2696
2697 tasklet_init(&sc->rxtq, ath5k_tasklet_rx, (unsigned long)sc);
2698 tasklet_init(&sc->txtq, ath5k_tasklet_tx, (unsigned long)sc);
2699 tasklet_init(&sc->calib, ath5k_tasklet_calibrate, (unsigned long)sc);
2700 tasklet_init(&sc->beacontq, ath5k_tasklet_beacon, (unsigned long)sc);
2701 tasklet_init(&sc->ani_tasklet, ath5k_tasklet_ani, (unsigned long)sc);
2702
2703 INIT_WORK(&sc->reset_work, ath5k_reset_work);
2704 INIT_DELAYED_WORK(&sc->tx_complete_work, ath5k_tx_complete_poll_work);
2705
2706 ret = ath5k_eeprom_read_mac(ah, mac);
2707 if (ret) {
2708 ATH5K_ERR(sc, "unable to read address from EEPROM: 0x%04x\n",
2709 sc->pdev->device);
2710 goto err_queues;
2711 }
2712
2713 SET_IEEE80211_PERM_ADDR(hw, mac);
2714 memcpy(&sc->lladdr, mac, ETH_ALEN);
2715 /* All MAC address bits matter for ACKs */
2716 ath5k_update_bssid_mask_and_opmode(sc, NULL);
2717
2718 regulatory->current_rd = ah->ah_capabilities.cap_eeprom.ee_regdomain;
2719 ret = ath_regd_init(regulatory, hw->wiphy, ath5k_reg_notifier);
2720 if (ret) {
2721 ATH5K_ERR(sc, "can't initialize regulatory system\n");
2722 goto err_queues;
2723 }
2724
2725 ret = ieee80211_register_hw(hw);
2726 if (ret) {
2727 ATH5K_ERR(sc, "can't register ieee80211 hw\n");
2728 goto err_queues;
2729 }
2730
2731 if (!ath_is_world_regd(regulatory))
2732 regulatory_hint(hw->wiphy, regulatory->alpha2);
2733
2734 ath5k_init_leds(sc);
2735
2736 ath5k_sysfs_register(sc);
2737
2738 return 0;
2739err_queues:
2740 ath5k_txq_release(sc);
2741err_bhal:
2742 ath5k_hw_release_tx_queue(ah, sc->bhalq);
2743err_desc:
2744 ath5k_desc_free(sc, pdev);
2745err:
2746 return ret;
2747}
2748
2749static void
2750ath5k_detach(struct pci_dev *pdev, struct ieee80211_hw *hw)
2751{
2752 struct ath5k_softc *sc = hw->priv;
2753
2754 /*
2755 * NB: the order of these is important:
2756 * o call the 802.11 layer before detaching ath5k_hw to
2757 * ensure callbacks into the driver to delete global
2758 * key cache entries can be handled
2759 * o reclaim the tx queue data structures after calling
2760 * the 802.11 layer as we'll get called back to reclaim
2761 * node state and potentially want to use them
2762 * o to cleanup the tx queues the hal is called, so detach
2763 * it last
2764 * XXX: ??? detach ath5k_hw ???
2765 * Other than that, it's straightforward...
2766 */
2767 ieee80211_unregister_hw(hw);
2768 ath5k_desc_free(sc, pdev);
2769 ath5k_txq_release(sc);
2770 ath5k_hw_release_tx_queue(sc->ah, sc->bhalq);
2771 ath5k_unregister_leds(sc);
2772
2773 ath5k_sysfs_unregister(sc);
2774 /*
2775 * NB: can't reclaim these until after ieee80211_ifdetach
2776 * returns because we'll get called back to reclaim node
2777 * state and potentially want to use them.
2778 */
2779}
2780
2781/********************\
2782* Mac80211 functions *
2783\********************/
2784
2785static int
2786ath5k_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
2787{
2788 struct ath5k_softc *sc = hw->priv;
2789 u16 qnum = skb_get_queue_mapping(skb);
2790
2791 if (WARN_ON(qnum >= sc->ah->ah_capabilities.cap_queues.q_tx_num)) {
2792 dev_kfree_skb_any(skb);
2793 return 0;
2794 }
2795
2796 return ath5k_tx_queue(hw, skb, &sc->txqs[qnum]);
2797}
2798
3022static int ath5k_start(struct ieee80211_hw *hw) 2799static int ath5k_start(struct ieee80211_hw *hw)
3023{ 2800{
3024 return ath5k_init(hw->priv); 2801 return ath5k_init(hw->priv);
@@ -3034,31 +2811,78 @@ static int ath5k_add_interface(struct ieee80211_hw *hw,
3034{ 2811{
3035 struct ath5k_softc *sc = hw->priv; 2812 struct ath5k_softc *sc = hw->priv;
3036 int ret; 2813 int ret;
2814 struct ath5k_vif *avf = (void *)vif->drv_priv;
3037 2815
3038 mutex_lock(&sc->lock); 2816 mutex_lock(&sc->lock);
3039 if (sc->vif) { 2817
3040 ret = 0; 2818 if ((vif->type == NL80211_IFTYPE_AP ||
2819 vif->type == NL80211_IFTYPE_ADHOC)
2820 && (sc->num_ap_vifs + sc->num_adhoc_vifs) >= ATH_BCBUF) {
2821 ret = -ELNRNG;
3041 goto end; 2822 goto end;
3042 } 2823 }
3043 2824
3044 sc->vif = vif; 2825 /* Don't allow other interfaces if one ad-hoc is configured.
2826 * TODO: Fix the problems with ad-hoc and multiple other interfaces.
2827 * We would need to operate the HW in ad-hoc mode to allow TSF updates
2828 * for the IBSS, but this breaks with additional AP or STA interfaces
2829 * at the moment. */
2830 if (sc->num_adhoc_vifs ||
2831 (sc->nvifs && vif->type == NL80211_IFTYPE_ADHOC)) {
2832 ATH5K_ERR(sc, "Only one single ad-hoc interface is allowed.\n");
2833 ret = -ELNRNG;
2834 goto end;
2835 }
3045 2836
3046 switch (vif->type) { 2837 switch (vif->type) {
3047 case NL80211_IFTYPE_AP: 2838 case NL80211_IFTYPE_AP:
3048 case NL80211_IFTYPE_STATION: 2839 case NL80211_IFTYPE_STATION:
3049 case NL80211_IFTYPE_ADHOC: 2840 case NL80211_IFTYPE_ADHOC:
3050 case NL80211_IFTYPE_MESH_POINT: 2841 case NL80211_IFTYPE_MESH_POINT:
3051 sc->opmode = vif->type; 2842 avf->opmode = vif->type;
3052 break; 2843 break;
3053 default: 2844 default:
3054 ret = -EOPNOTSUPP; 2845 ret = -EOPNOTSUPP;
3055 goto end; 2846 goto end;
3056 } 2847 }
3057 2848
3058 ATH5K_DBG(sc, ATH5K_DEBUG_MODE, "add interface mode %d\n", sc->opmode); 2849 sc->nvifs++;
2850 ATH5K_DBG(sc, ATH5K_DEBUG_MODE, "add interface mode %d\n", avf->opmode);
2851
2852 /* Assign the vap/adhoc to a beacon xmit slot. */
2853 if ((avf->opmode == NL80211_IFTYPE_AP) ||
2854 (avf->opmode == NL80211_IFTYPE_ADHOC)) {
2855 int slot;
3059 2856
2857 WARN_ON(list_empty(&sc->bcbuf));
2858 avf->bbuf = list_first_entry(&sc->bcbuf, struct ath5k_buf,
2859 list);
2860 list_del(&avf->bbuf->list);
2861
2862 avf->bslot = 0;
2863 for (slot = 0; slot < ATH_BCBUF; slot++) {
2864 if (!sc->bslot[slot]) {
2865 avf->bslot = slot;
2866 break;
2867 }
2868 }
2869 BUG_ON(sc->bslot[avf->bslot] != NULL);
2870 sc->bslot[avf->bslot] = vif;
2871 if (avf->opmode == NL80211_IFTYPE_AP)
2872 sc->num_ap_vifs++;
2873 else
2874 sc->num_adhoc_vifs++;
2875 }
2876
2877 /* Any MAC address is fine, all others are included through the
2878 * filter.
2879 */
2880 memcpy(&sc->lladdr, vif->addr, ETH_ALEN);
3060 ath5k_hw_set_lladdr(sc->ah, vif->addr); 2881 ath5k_hw_set_lladdr(sc->ah, vif->addr);
3061 ath5k_mode_setup(sc); 2882
2883 memcpy(&avf->lladdr, vif->addr, ETH_ALEN);
2884
2885 ath5k_mode_setup(sc, vif);
3062 2886
3063 ret = 0; 2887 ret = 0;
3064end: 2888end:
@@ -3071,15 +2895,29 @@ ath5k_remove_interface(struct ieee80211_hw *hw,
3071 struct ieee80211_vif *vif) 2895 struct ieee80211_vif *vif)
3072{ 2896{
3073 struct ath5k_softc *sc = hw->priv; 2897 struct ath5k_softc *sc = hw->priv;
3074 u8 mac[ETH_ALEN] = {}; 2898 struct ath5k_vif *avf = (void *)vif->drv_priv;
2899 unsigned int i;
3075 2900
3076 mutex_lock(&sc->lock); 2901 mutex_lock(&sc->lock);
3077 if (sc->vif != vif) 2902 sc->nvifs--;
3078 goto end; 2903
2904 if (avf->bbuf) {
2905 ath5k_txbuf_free_skb(sc, avf->bbuf);
2906 list_add_tail(&avf->bbuf->list, &sc->bcbuf);
2907 for (i = 0; i < ATH_BCBUF; i++) {
2908 if (sc->bslot[i] == vif) {
2909 sc->bslot[i] = NULL;
2910 break;
2911 }
2912 }
2913 avf->bbuf = NULL;
2914 }
2915 if (avf->opmode == NL80211_IFTYPE_AP)
2916 sc->num_ap_vifs--;
2917 else if (avf->opmode == NL80211_IFTYPE_ADHOC)
2918 sc->num_adhoc_vifs--;
3079 2919
3080 ath5k_hw_set_lladdr(sc->ah, mac); 2920 ath5k_update_bssid_mask_and_opmode(sc, NULL);
3081 sc->vif = NULL;
3082end:
3083 mutex_unlock(&sc->lock); 2921 mutex_unlock(&sc->lock);
3084} 2922}
3085 2923
@@ -3162,6 +3000,19 @@ static u64 ath5k_prepare_multicast(struct ieee80211_hw *hw,
3162 return ((u64)(mfilt[1]) << 32) | mfilt[0]; 3000 return ((u64)(mfilt[1]) << 32) | mfilt[0];
3163} 3001}
3164 3002
3003static bool ath_any_vif_assoc(struct ath5k_softc *sc)
3004{
3005 struct ath_vif_iter_data iter_data;
3006 iter_data.hw_macaddr = NULL;
3007 iter_data.any_assoc = false;
3008 iter_data.need_set_hw_addr = false;
3009 iter_data.found_active = true;
3010
3011 ieee80211_iterate_active_interfaces_atomic(sc->hw, ath_vif_iter,
3012 &iter_data);
3013 return iter_data.any_assoc;
3014}
3015
3165#define SUPPORTED_FIF_FLAGS \ 3016#define SUPPORTED_FIF_FLAGS \
3166 FIF_PROMISC_IN_BSS | FIF_ALLMULTI | FIF_FCSFAIL | \ 3017 FIF_PROMISC_IN_BSS | FIF_ALLMULTI | FIF_FCSFAIL | \
3167 FIF_PLCPFAIL | FIF_CONTROL | FIF_OTHER_BSS | \ 3018 FIF_PLCPFAIL | FIF_CONTROL | FIF_OTHER_BSS | \
@@ -3232,7 +3083,7 @@ static void ath5k_configure_filter(struct ieee80211_hw *hw,
3232 3083
3233 /* FIF_BCN_PRBRESP_PROMISC really means to enable beacons 3084 /* FIF_BCN_PRBRESP_PROMISC really means to enable beacons
3234 * and probes for any BSSID */ 3085 * and probes for any BSSID */
3235 if (*new_flags & FIF_BCN_PRBRESP_PROMISC) 3086 if ((*new_flags & FIF_BCN_PRBRESP_PROMISC) || (sc->nvifs > 1))
3236 rfilt |= AR5K_RX_FILTER_BEACON; 3087 rfilt |= AR5K_RX_FILTER_BEACON;
3237 3088
3238 /* FIF_CONTROL doc says that if FIF_PROMISC_IN_BSS is not 3089 /* FIF_CONTROL doc says that if FIF_PROMISC_IN_BSS is not
@@ -3291,18 +3142,14 @@ ath5k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
3291 if (modparam_nohwcrypt) 3142 if (modparam_nohwcrypt)
3292 return -EOPNOTSUPP; 3143 return -EOPNOTSUPP;
3293 3144
3294 if (sc->opmode == NL80211_IFTYPE_AP)
3295 return -EOPNOTSUPP;
3296
3297 switch (key->cipher) { 3145 switch (key->cipher) {
3298 case WLAN_CIPHER_SUITE_WEP40: 3146 case WLAN_CIPHER_SUITE_WEP40:
3299 case WLAN_CIPHER_SUITE_WEP104: 3147 case WLAN_CIPHER_SUITE_WEP104:
3300 case WLAN_CIPHER_SUITE_TKIP: 3148 case WLAN_CIPHER_SUITE_TKIP:
3301 break; 3149 break;
3302 case WLAN_CIPHER_SUITE_CCMP: 3150 case WLAN_CIPHER_SUITE_CCMP:
3303 if (sc->ah->ah_aes_support) 3151 if (common->crypt_caps & ATH_CRYPT_CAP_CIPHER_AESCCM)
3304 break; 3152 break;
3305
3306 return -EOPNOTSUPP; 3153 return -EOPNOTSUPP;
3307 default: 3154 default:
3308 WARN_ON(1); 3155 WARN_ON(1);
@@ -3313,27 +3160,25 @@ ath5k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
3313 3160
3314 switch (cmd) { 3161 switch (cmd) {
3315 case SET_KEY: 3162 case SET_KEY:
3316 ret = ath5k_hw_set_key(sc->ah, key->keyidx, key, 3163 ret = ath_key_config(common, vif, sta, key);
3317 sta ? sta->addr : NULL); 3164 if (ret >= 0) {
3318 if (ret) { 3165 key->hw_key_idx = ret;
3319 ATH5K_ERR(sc, "can't set the key\n"); 3166 /* push IV and Michael MIC generation to stack */
3320 goto unlock; 3167 key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
3168 if (key->cipher == WLAN_CIPHER_SUITE_TKIP)
3169 key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
3170 if (key->cipher == WLAN_CIPHER_SUITE_CCMP)
3171 key->flags |= IEEE80211_KEY_FLAG_SW_MGMT;
3172 ret = 0;
3321 } 3173 }
3322 __set_bit(key->keyidx, common->keymap);
3323 key->hw_key_idx = key->keyidx;
3324 key->flags |= (IEEE80211_KEY_FLAG_GENERATE_IV |
3325 IEEE80211_KEY_FLAG_GENERATE_MMIC);
3326 break; 3174 break;
3327 case DISABLE_KEY: 3175 case DISABLE_KEY:
3328 ath5k_hw_reset_key(sc->ah, key->keyidx); 3176 ath_key_delete(common, key);
3329 __clear_bit(key->keyidx, common->keymap);
3330 break; 3177 break;
3331 default: 3178 default:
3332 ret = -EINVAL; 3179 ret = -EINVAL;
3333 goto unlock;
3334 } 3180 }
3335 3181
3336unlock:
3337 mmiowb(); 3182 mmiowb();
3338 mutex_unlock(&sc->lock); 3183 mutex_unlock(&sc->lock);
3339 return ret; 3184 return ret;
@@ -3403,43 +3248,6 @@ ath5k_reset_tsf(struct ieee80211_hw *hw)
3403 ath5k_hw_reset_tsf(sc->ah); 3248 ath5k_hw_reset_tsf(sc->ah);
3404} 3249}
3405 3250
3406/*
3407 * Updates the beacon that is sent by ath5k_beacon_send. For adhoc,
3408 * this is called only once at config_bss time, for AP we do it every
3409 * SWBA interrupt so that the TIM will reflect buffered frames.
3410 *
3411 * Called with the beacon lock.
3412 */
3413static int
3414ath5k_beacon_update(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
3415{
3416 int ret;
3417 struct ath5k_softc *sc = hw->priv;
3418 struct sk_buff *skb;
3419
3420 if (WARN_ON(!vif)) {
3421 ret = -EINVAL;
3422 goto out;
3423 }
3424
3425 skb = ieee80211_beacon_get(hw, vif);
3426
3427 if (!skb) {
3428 ret = -ENOMEM;
3429 goto out;
3430 }
3431
3432 ath5k_debug_dump_skb(sc, skb, "BC ", 1);
3433
3434 ath5k_txbuf_free_skb(sc, sc->bbuf);
3435 sc->bbuf->skb = skb;
3436 ret = ath5k_beacon_setup(sc, sc->bbuf);
3437 if (ret)
3438 sc->bbuf->skb = NULL;
3439out:
3440 return ret;
3441}
3442
3443static void 3251static void
3444set_beacon_filter(struct ieee80211_hw *hw, bool enable) 3252set_beacon_filter(struct ieee80211_hw *hw, bool enable)
3445{ 3253{
@@ -3460,14 +3268,13 @@ static void ath5k_bss_info_changed(struct ieee80211_hw *hw,
3460 struct ieee80211_bss_conf *bss_conf, 3268 struct ieee80211_bss_conf *bss_conf,
3461 u32 changes) 3269 u32 changes)
3462{ 3270{
3271 struct ath5k_vif *avf = (void *)vif->drv_priv;
3463 struct ath5k_softc *sc = hw->priv; 3272 struct ath5k_softc *sc = hw->priv;
3464 struct ath5k_hw *ah = sc->ah; 3273 struct ath5k_hw *ah = sc->ah;
3465 struct ath_common *common = ath5k_hw_common(ah); 3274 struct ath_common *common = ath5k_hw_common(ah);
3466 unsigned long flags; 3275 unsigned long flags;
3467 3276
3468 mutex_lock(&sc->lock); 3277 mutex_lock(&sc->lock);
3469 if (WARN_ON(sc->vif != vif))
3470 goto unlock;
3471 3278
3472 if (changes & BSS_CHANGED_BSSID) { 3279 if (changes & BSS_CHANGED_BSSID) {
3473 /* Cache for later use during resets */ 3280 /* Cache for later use during resets */
@@ -3481,7 +3288,12 @@ static void ath5k_bss_info_changed(struct ieee80211_hw *hw,
3481 sc->bintval = bss_conf->beacon_int; 3288 sc->bintval = bss_conf->beacon_int;
3482 3289
3483 if (changes & BSS_CHANGED_ASSOC) { 3290 if (changes & BSS_CHANGED_ASSOC) {
3484 sc->assoc = bss_conf->assoc; 3291 avf->assoc = bss_conf->assoc;
3292 if (bss_conf->assoc)
3293 sc->assoc = bss_conf->assoc;
3294 else
3295 sc->assoc = ath_any_vif_assoc(sc);
3296
3485 if (sc->opmode == NL80211_IFTYPE_STATION) 3297 if (sc->opmode == NL80211_IFTYPE_STATION)
3486 set_beacon_filter(hw, sc->assoc); 3298 set_beacon_filter(hw, sc->assoc);
3487 ath5k_hw_set_ledstate(sc->ah, sc->assoc ? 3299 ath5k_hw_set_ledstate(sc->ah, sc->assoc ?
@@ -3509,7 +3321,6 @@ static void ath5k_bss_info_changed(struct ieee80211_hw *hw,
3509 BSS_CHANGED_BEACON_INT)) 3321 BSS_CHANGED_BEACON_INT))
3510 ath5k_beacon_config(sc); 3322 ath5k_beacon_config(sc);
3511 3323
3512 unlock:
3513 mutex_unlock(&sc->lock); 3324 mutex_unlock(&sc->lock);
3514} 3325}
3515 3326
@@ -3545,3 +3356,399 @@ static void ath5k_set_coverage_class(struct ieee80211_hw *hw, u8 coverage_class)
3545 ath5k_hw_set_coverage_class(sc->ah, coverage_class); 3356 ath5k_hw_set_coverage_class(sc->ah, coverage_class);
3546 mutex_unlock(&sc->lock); 3357 mutex_unlock(&sc->lock);
3547} 3358}
3359
3360static int ath5k_conf_tx(struct ieee80211_hw *hw, u16 queue,
3361 const struct ieee80211_tx_queue_params *params)
3362{
3363 struct ath5k_softc *sc = hw->priv;
3364 struct ath5k_hw *ah = sc->ah;
3365 struct ath5k_txq_info qi;
3366 int ret = 0;
3367
3368 if (queue >= ah->ah_capabilities.cap_queues.q_tx_num)
3369 return 0;
3370
3371 mutex_lock(&sc->lock);
3372
3373 ath5k_hw_get_tx_queueprops(ah, queue, &qi);
3374
3375 qi.tqi_aifs = params->aifs;
3376 qi.tqi_cw_min = params->cw_min;
3377 qi.tqi_cw_max = params->cw_max;
3378 qi.tqi_burst_time = params->txop;
3379
3380 ATH5K_DBG(sc, ATH5K_DEBUG_ANY,
3381 "Configure tx [queue %d], "
3382 "aifs: %d, cw_min: %d, cw_max: %d, txop: %d\n",
3383 queue, params->aifs, params->cw_min,
3384 params->cw_max, params->txop);
3385
3386 if (ath5k_hw_set_tx_queueprops(ah, queue, &qi)) {
3387 ATH5K_ERR(sc,
3388 "Unable to update hardware queue %u!\n", queue);
3389 ret = -EIO;
3390 } else
3391 ath5k_hw_reset_tx_queue(ah, queue);
3392
3393 mutex_unlock(&sc->lock);
3394
3395 return ret;
3396}
3397
3398static const struct ieee80211_ops ath5k_hw_ops = {
3399 .tx = ath5k_tx,
3400 .start = ath5k_start,
3401 .stop = ath5k_stop,
3402 .add_interface = ath5k_add_interface,
3403 .remove_interface = ath5k_remove_interface,
3404 .config = ath5k_config,
3405 .prepare_multicast = ath5k_prepare_multicast,
3406 .configure_filter = ath5k_configure_filter,
3407 .set_key = ath5k_set_key,
3408 .get_stats = ath5k_get_stats,
3409 .get_survey = ath5k_get_survey,
3410 .conf_tx = ath5k_conf_tx,
3411 .get_tsf = ath5k_get_tsf,
3412 .set_tsf = ath5k_set_tsf,
3413 .reset_tsf = ath5k_reset_tsf,
3414 .bss_info_changed = ath5k_bss_info_changed,
3415 .sw_scan_start = ath5k_sw_scan_start,
3416 .sw_scan_complete = ath5k_sw_scan_complete,
3417 .set_coverage_class = ath5k_set_coverage_class,
3418};
3419
3420/********************\
3421* PCI Initialization *
3422\********************/
3423
3424static int __devinit
3425ath5k_pci_probe(struct pci_dev *pdev,
3426 const struct pci_device_id *id)
3427{
3428 void __iomem *mem;
3429 struct ath5k_softc *sc;
3430 struct ath_common *common;
3431 struct ieee80211_hw *hw;
3432 int ret;
3433 u8 csz;
3434
3435 /*
3436 * L0s needs to be disabled on all ath5k cards.
3437 *
3438 * For distributions shipping with CONFIG_PCIEASPM (this will be enabled
3439 * by default in the future in 2.6.36) this will also mean both L1 and
3440 * L0s will be disabled when a pre 1.1 PCIe device is detected. We do
3441 * know L1 works correctly even for all ath5k pre 1.1 PCIe devices
3442 * though but cannot currently undue the effect of a blacklist, for
3443 * details you can read pcie_aspm_sanity_check() and see how it adjusts
3444 * the device link capability.
3445 *
3446 * It may be possible in the future to implement some PCI API to allow
3447 * drivers to override blacklists for pre 1.1 PCIe but for now it is
3448 * best to accept that both L0s and L1 will be disabled completely for
3449 * distributions shipping with CONFIG_PCIEASPM rather than having this
3450 * issue present. Motivation for adding this new API will be to help
3451 * with power consumption for some of these devices.
3452 */
3453 pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S);
3454
3455 ret = pci_enable_device(pdev);
3456 if (ret) {
3457 dev_err(&pdev->dev, "can't enable device\n");
3458 goto err;
3459 }
3460
3461 /* XXX 32-bit addressing only */
3462 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
3463 if (ret) {
3464 dev_err(&pdev->dev, "32-bit DMA not available\n");
3465 goto err_dis;
3466 }
3467
3468 /*
3469 * Cache line size is used to size and align various
3470 * structures used to communicate with the hardware.
3471 */
3472 pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &csz);
3473 if (csz == 0) {
3474 /*
3475 * Linux 2.4.18 (at least) writes the cache line size
3476 * register as a 16-bit wide register which is wrong.
3477 * We must have this setup properly for rx buffer
3478 * DMA to work so force a reasonable value here if it
3479 * comes up zero.
3480 */
3481 csz = L1_CACHE_BYTES >> 2;
3482 pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, csz);
3483 }
3484 /*
3485 * The default setting of latency timer yields poor results,
3486 * set it to the value used by other systems. It may be worth
3487 * tweaking this setting more.
3488 */
3489 pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0xa8);
3490
3491 /* Enable bus mastering */
3492 pci_set_master(pdev);
3493
3494 /*
3495 * Disable the RETRY_TIMEOUT register (0x41) to keep
3496 * PCI Tx retries from interfering with C3 CPU state.
3497 */
3498 pci_write_config_byte(pdev, 0x41, 0);
3499
3500 ret = pci_request_region(pdev, 0, "ath5k");
3501 if (ret) {
3502 dev_err(&pdev->dev, "cannot reserve PCI memory region\n");
3503 goto err_dis;
3504 }
3505
3506 mem = pci_iomap(pdev, 0, 0);
3507 if (!mem) {
3508 dev_err(&pdev->dev, "cannot remap PCI memory region\n") ;
3509 ret = -EIO;
3510 goto err_reg;
3511 }
3512
3513 /*
3514 * Allocate hw (mac80211 main struct)
3515 * and hw->priv (driver private data)
3516 */
3517 hw = ieee80211_alloc_hw(sizeof(*sc), &ath5k_hw_ops);
3518 if (hw == NULL) {
3519 dev_err(&pdev->dev, "cannot allocate ieee80211_hw\n");
3520 ret = -ENOMEM;
3521 goto err_map;
3522 }
3523
3524 dev_info(&pdev->dev, "registered as '%s'\n", wiphy_name(hw->wiphy));
3525
3526 /* Initialize driver private data */
3527 SET_IEEE80211_DEV(hw, &pdev->dev);
3528 hw->flags = IEEE80211_HW_RX_INCLUDES_FCS |
3529 IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
3530 IEEE80211_HW_SIGNAL_DBM;
3531
3532 hw->wiphy->interface_modes =
3533 BIT(NL80211_IFTYPE_AP) |
3534 BIT(NL80211_IFTYPE_STATION) |
3535 BIT(NL80211_IFTYPE_ADHOC) |
3536 BIT(NL80211_IFTYPE_MESH_POINT);
3537
3538 hw->extra_tx_headroom = 2;
3539 hw->channel_change_time = 5000;
3540 sc = hw->priv;
3541 sc->hw = hw;
3542 sc->pdev = pdev;
3543
3544 /*
3545 * Mark the device as detached to avoid processing
3546 * interrupts until setup is complete.
3547 */
3548 __set_bit(ATH_STAT_INVALID, sc->status);
3549
3550 sc->iobase = mem; /* So we can unmap it on detach */
3551 sc->opmode = NL80211_IFTYPE_STATION;
3552 sc->bintval = 1000;
3553 mutex_init(&sc->lock);
3554 spin_lock_init(&sc->rxbuflock);
3555 spin_lock_init(&sc->txbuflock);
3556 spin_lock_init(&sc->block);
3557
3558 /* Set private data */
3559 pci_set_drvdata(pdev, sc);
3560
3561 /* Setup interrupt handler */
3562 ret = request_irq(pdev->irq, ath5k_intr, IRQF_SHARED, "ath", sc);
3563 if (ret) {
3564 ATH5K_ERR(sc, "request_irq failed\n");
3565 goto err_free;
3566 }
3567
3568 /* If we passed the test, malloc an ath5k_hw struct */
3569 sc->ah = kzalloc(sizeof(struct ath5k_hw), GFP_KERNEL);
3570 if (!sc->ah) {
3571 ret = -ENOMEM;
3572 ATH5K_ERR(sc, "out of memory\n");
3573 goto err_irq;
3574 }
3575
3576 sc->ah->ah_sc = sc;
3577 sc->ah->ah_iobase = sc->iobase;
3578 common = ath5k_hw_common(sc->ah);
3579 common->ops = &ath5k_common_ops;
3580 common->ah = sc->ah;
3581 common->hw = hw;
3582 common->cachelsz = csz << 2; /* convert to bytes */
3583
3584 /* Initialize device */
3585 ret = ath5k_hw_attach(sc);
3586 if (ret) {
3587 goto err_free_ah;
3588 }
3589
3590 /* set up multi-rate retry capabilities */
3591 if (sc->ah->ah_version == AR5K_AR5212) {
3592 hw->max_rates = 4;
3593 hw->max_rate_tries = 11;
3594 }
3595
3596 hw->vif_data_size = sizeof(struct ath5k_vif);
3597
3598 /* Finish private driver data initialization */
3599 ret = ath5k_attach(pdev, hw);
3600 if (ret)
3601 goto err_ah;
3602
3603 ATH5K_INFO(sc, "Atheros AR%s chip found (MAC: 0x%x, PHY: 0x%x)\n",
3604 ath5k_chip_name(AR5K_VERSION_MAC, sc->ah->ah_mac_srev),
3605 sc->ah->ah_mac_srev,
3606 sc->ah->ah_phy_revision);
3607
3608 if (!sc->ah->ah_single_chip) {
3609 /* Single chip radio (!RF5111) */
3610 if (sc->ah->ah_radio_5ghz_revision &&
3611 !sc->ah->ah_radio_2ghz_revision) {
3612 /* No 5GHz support -> report 2GHz radio */
3613 if (!test_bit(AR5K_MODE_11A,
3614 sc->ah->ah_capabilities.cap_mode)) {
3615 ATH5K_INFO(sc, "RF%s 2GHz radio found (0x%x)\n",
3616 ath5k_chip_name(AR5K_VERSION_RAD,
3617 sc->ah->ah_radio_5ghz_revision),
3618 sc->ah->ah_radio_5ghz_revision);
3619 /* No 2GHz support (5110 and some
3620 * 5Ghz only cards) -> report 5Ghz radio */
3621 } else if (!test_bit(AR5K_MODE_11B,
3622 sc->ah->ah_capabilities.cap_mode)) {
3623 ATH5K_INFO(sc, "RF%s 5GHz radio found (0x%x)\n",
3624 ath5k_chip_name(AR5K_VERSION_RAD,
3625 sc->ah->ah_radio_5ghz_revision),
3626 sc->ah->ah_radio_5ghz_revision);
3627 /* Multiband radio */
3628 } else {
3629 ATH5K_INFO(sc, "RF%s multiband radio found"
3630 " (0x%x)\n",
3631 ath5k_chip_name(AR5K_VERSION_RAD,
3632 sc->ah->ah_radio_5ghz_revision),
3633 sc->ah->ah_radio_5ghz_revision);
3634 }
3635 }
3636 /* Multi chip radio (RF5111 - RF2111) ->
3637 * report both 2GHz/5GHz radios */
3638 else if (sc->ah->ah_radio_5ghz_revision &&
3639 sc->ah->ah_radio_2ghz_revision){
3640 ATH5K_INFO(sc, "RF%s 5GHz radio found (0x%x)\n",
3641 ath5k_chip_name(AR5K_VERSION_RAD,
3642 sc->ah->ah_radio_5ghz_revision),
3643 sc->ah->ah_radio_5ghz_revision);
3644 ATH5K_INFO(sc, "RF%s 2GHz radio found (0x%x)\n",
3645 ath5k_chip_name(AR5K_VERSION_RAD,
3646 sc->ah->ah_radio_2ghz_revision),
3647 sc->ah->ah_radio_2ghz_revision);
3648 }
3649 }
3650
3651 ath5k_debug_init_device(sc);
3652
3653 /* ready to process interrupts */
3654 __clear_bit(ATH_STAT_INVALID, sc->status);
3655
3656 return 0;
3657err_ah:
3658 ath5k_hw_detach(sc->ah);
3659err_free_ah:
3660 kfree(sc->ah);
3661err_irq:
3662 free_irq(pdev->irq, sc);
3663err_free:
3664 ieee80211_free_hw(hw);
3665err_map:
3666 pci_iounmap(pdev, mem);
3667err_reg:
3668 pci_release_region(pdev, 0);
3669err_dis:
3670 pci_disable_device(pdev);
3671err:
3672 return ret;
3673}
3674
3675static void __devexit
3676ath5k_pci_remove(struct pci_dev *pdev)
3677{
3678 struct ath5k_softc *sc = pci_get_drvdata(pdev);
3679
3680 ath5k_debug_finish_device(sc);
3681 ath5k_detach(pdev, sc->hw);
3682 ath5k_hw_detach(sc->ah);
3683 kfree(sc->ah);
3684 free_irq(pdev->irq, sc);
3685 pci_iounmap(pdev, sc->iobase);
3686 pci_release_region(pdev, 0);
3687 pci_disable_device(pdev);
3688 ieee80211_free_hw(sc->hw);
3689}
3690
3691#ifdef CONFIG_PM_SLEEP
3692static int ath5k_pci_suspend(struct device *dev)
3693{
3694 struct ath5k_softc *sc = pci_get_drvdata(to_pci_dev(dev));
3695
3696 ath5k_led_off(sc);
3697 return 0;
3698}
3699
3700static int ath5k_pci_resume(struct device *dev)
3701{
3702 struct pci_dev *pdev = to_pci_dev(dev);
3703 struct ath5k_softc *sc = pci_get_drvdata(pdev);
3704
3705 /*
3706 * Suspend/Resume resets the PCI configuration space, so we have to
3707 * re-disable the RETRY_TIMEOUT register (0x41) to keep
3708 * PCI Tx retries from interfering with C3 CPU state
3709 */
3710 pci_write_config_byte(pdev, 0x41, 0);
3711
3712 ath5k_led_enable(sc);
3713 return 0;
3714}
3715
3716static SIMPLE_DEV_PM_OPS(ath5k_pm_ops, ath5k_pci_suspend, ath5k_pci_resume);
3717#define ATH5K_PM_OPS (&ath5k_pm_ops)
3718#else
3719#define ATH5K_PM_OPS NULL
3720#endif /* CONFIG_PM_SLEEP */
3721
3722static struct pci_driver ath5k_pci_driver = {
3723 .name = KBUILD_MODNAME,
3724 .id_table = ath5k_pci_id_table,
3725 .probe = ath5k_pci_probe,
3726 .remove = __devexit_p(ath5k_pci_remove),
3727 .driver.pm = ATH5K_PM_OPS,
3728};
3729
3730/*
3731 * Module init/exit functions
3732 */
3733static int __init
3734init_ath5k_pci(void)
3735{
3736 int ret;
3737
3738 ret = pci_register_driver(&ath5k_pci_driver);
3739 if (ret) {
3740 printk(KERN_ERR "ath5k_pci: can't register pci driver\n");
3741 return ret;
3742 }
3743
3744 return 0;
3745}
3746
3747static void __exit
3748exit_ath5k_pci(void)
3749{
3750 pci_unregister_driver(&ath5k_pci_driver);
3751}
3752
3753module_init(init_ath5k_pci);
3754module_exit(exit_ath5k_pci);
diff --git a/drivers/net/wireless/ath/ath5k/base.h b/drivers/net/wireless/ath/ath5k/base.h
index dc1241f9c4e8..9a79773cdc2a 100644
--- a/drivers/net/wireless/ath/ath5k/base.h
+++ b/drivers/net/wireless/ath/ath5k/base.h
@@ -58,7 +58,9 @@
58 58
59#define ATH_RXBUF 40 /* number of RX buffers */ 59#define ATH_RXBUF 40 /* number of RX buffers */
60#define ATH_TXBUF 200 /* number of TX buffers */ 60#define ATH_TXBUF 200 /* number of TX buffers */
61#define ATH_BCBUF 1 /* number of beacon buffers */ 61#define ATH_BCBUF 4 /* number of beacon buffers */
62#define ATH5K_TXQ_LEN_MAX (ATH_TXBUF / 4) /* bufs per queue */
63#define ATH5K_TXQ_LEN_LOW (ATH5K_TXQ_LEN_MAX / 2) /* low mark */
62 64
63struct ath5k_buf { 65struct ath5k_buf {
64 struct list_head list; 66 struct list_head list;
@@ -83,6 +85,9 @@ struct ath5k_txq {
83 struct list_head q; /* transmit queue */ 85 struct list_head q; /* transmit queue */
84 spinlock_t lock; /* lock on q and link */ 86 spinlock_t lock; /* lock on q and link */
85 bool setup; 87 bool setup;
88 int txq_len; /* number of queued buffers */
89 bool txq_poll_mark;
90 unsigned int txq_stuck; /* informational counter */
86}; 91};
87 92
88#define ATH5K_LED_MAX_NAME_LEN 31 93#define ATH5K_LED_MAX_NAME_LEN 31
@@ -116,6 +121,13 @@ struct ath5k_statistics {
116 /* frame errors */ 121 /* frame errors */
117 unsigned int rx_all_count; /* all RX frames, including errors */ 122 unsigned int rx_all_count; /* all RX frames, including errors */
118 unsigned int tx_all_count; /* all TX frames, including errors */ 123 unsigned int tx_all_count; /* all TX frames, including errors */
124 unsigned int rx_bytes_count; /* all RX bytes, including errored pks
125 * and the MAC headers for each packet
126 */
127 unsigned int tx_bytes_count; /* all TX bytes, including errored pkts
128 * and the MAC headers and padding for
129 * each packet.
130 */
119 unsigned int rxerr_crc; 131 unsigned int rxerr_crc;
120 unsigned int rxerr_phy; 132 unsigned int rxerr_phy;
121 unsigned int rxerr_phy_code[32]; 133 unsigned int rxerr_phy_code[32];
@@ -146,6 +158,14 @@ struct ath5k_statistics {
146#define ATH_CHAN_MAX (14+14+14+252+20) 158#define ATH_CHAN_MAX (14+14+14+252+20)
147#endif 159#endif
148 160
161struct ath5k_vif {
162 bool assoc; /* are we associated or not */
163 enum nl80211_iftype opmode;
164 int bslot;
165 struct ath5k_buf *bbuf; /* beacon buffer */
166 u8 lladdr[ETH_ALEN];
167};
168
149/* Software Carrier, keeps track of the driver state 169/* Software Carrier, keeps track of the driver state
150 * associated with an instance of a device */ 170 * associated with an instance of a device */
151struct ath5k_softc { 171struct ath5k_softc {
@@ -182,10 +202,11 @@ struct ath5k_softc {
182 unsigned int curmode; /* current phy mode */ 202 unsigned int curmode; /* current phy mode */
183 struct ieee80211_channel *curchan; /* current h/w channel */ 203 struct ieee80211_channel *curchan; /* current h/w channel */
184 204
185 struct ieee80211_vif *vif; 205 u16 nvifs;
186 206
187 enum ath5k_int imask; /* interrupt mask copy */ 207 enum ath5k_int imask; /* interrupt mask copy */
188 208
209 u8 lladdr[ETH_ALEN];
189 u8 bssidmask[ETH_ALEN]; 210 u8 bssidmask[ETH_ALEN];
190 211
191 unsigned int led_pin, /* GPIO pin for driving LED */ 212 unsigned int led_pin, /* GPIO pin for driving LED */
@@ -204,7 +225,6 @@ struct ath5k_softc {
204 spinlock_t txbuflock; 225 spinlock_t txbuflock;
205 unsigned int txbuf_len; /* buf count in txbuf list */ 226 unsigned int txbuf_len; /* buf count in txbuf list */
206 struct ath5k_txq txqs[AR5K_NUM_TX_QUEUES]; /* tx queues */ 227 struct ath5k_txq txqs[AR5K_NUM_TX_QUEUES]; /* tx queues */
207 struct ath5k_txq *txq; /* main tx queue */
208 struct tasklet_struct txtq; /* tx intr tasklet */ 228 struct tasklet_struct txtq; /* tx intr tasklet */
209 struct ath5k_led tx_led; /* tx led */ 229 struct ath5k_led tx_led; /* tx led */
210 230
@@ -214,7 +234,10 @@ struct ath5k_softc {
214 234
215 spinlock_t block; /* protects beacon */ 235 spinlock_t block; /* protects beacon */
216 struct tasklet_struct beacontq; /* beacon intr tasklet */ 236 struct tasklet_struct beacontq; /* beacon intr tasklet */
217 struct ath5k_buf *bbuf; /* beacon buffer */ 237 struct list_head bcbuf; /* beacon buffer */
238 struct ieee80211_vif *bslot[ATH_BCBUF];
239 u16 num_ap_vifs;
240 u16 num_adhoc_vifs;
218 unsigned int bhalq, /* SW q for outgoing beacons */ 241 unsigned int bhalq, /* SW q for outgoing beacons */
219 bmisscount, /* missed beacon transmits */ 242 bmisscount, /* missed beacon transmits */
220 bintval, /* beacon interval in TU */ 243 bintval, /* beacon interval in TU */
@@ -230,6 +253,8 @@ struct ath5k_softc {
230 253
231 struct ath5k_ani_state ani_state; 254 struct ath5k_ani_state ani_state;
232 struct tasklet_struct ani_tasklet; /* ANI calibration */ 255 struct tasklet_struct ani_tasklet; /* ANI calibration */
256
257 struct delayed_work tx_complete_work;
233}; 258};
234 259
235#define ath5k_hw_hasbssidmask(_ah) \ 260#define ath5k_hw_hasbssidmask(_ah) \
diff --git a/drivers/net/wireless/ath/ath5k/debug.c b/drivers/net/wireless/ath/ath5k/debug.c
index 1b7c6d7fde93..42ea5b1bdb12 100644
--- a/drivers/net/wireless/ath/ath5k/debug.c
+++ b/drivers/net/wireless/ath/ath5k/debug.c
@@ -60,6 +60,7 @@
60 60
61#include "base.h" 61#include "base.h"
62#include "debug.h" 62#include "debug.h"
63#include "../debug.h"
63 64
64static unsigned int ath5k_debug; 65static unsigned int ath5k_debug;
65module_param_named(debug, ath5k_debug, uint, 0); 66module_param_named(debug, ath5k_debug, uint, 0);
@@ -71,8 +72,6 @@ module_param_named(debug, ath5k_debug, uint, 0);
71#include "reg.h" 72#include "reg.h"
72#include "ani.h" 73#include "ani.h"
73 74
74static struct dentry *ath5k_global_debugfs;
75
76static int ath5k_debugfs_open(struct inode *inode, struct file *file) 75static int ath5k_debugfs_open(struct inode *inode, struct file *file)
77{ 76{
78 file->private_data = inode->i_private; 77 file->private_data = inode->i_private;
@@ -483,6 +482,60 @@ static const struct file_operations fops_antenna = {
483 .owner = THIS_MODULE, 482 .owner = THIS_MODULE,
484}; 483};
485 484
485/* debugfs: misc */
486
487static ssize_t read_file_misc(struct file *file, char __user *user_buf,
488 size_t count, loff_t *ppos)
489{
490 struct ath5k_softc *sc = file->private_data;
491 char buf[700];
492 unsigned int len = 0;
493 u32 filt = ath5k_hw_get_rx_filter(sc->ah);
494
495 len += snprintf(buf+len, sizeof(buf)-len, "bssid-mask: %pM\n",
496 sc->bssidmask);
497 len += snprintf(buf+len, sizeof(buf)-len, "filter-flags: 0x%x ",
498 filt);
499 if (filt & AR5K_RX_FILTER_UCAST)
500 len += snprintf(buf+len, sizeof(buf)-len, " UCAST");
501 if (filt & AR5K_RX_FILTER_MCAST)
502 len += snprintf(buf+len, sizeof(buf)-len, " MCAST");
503 if (filt & AR5K_RX_FILTER_BCAST)
504 len += snprintf(buf+len, sizeof(buf)-len, " BCAST");
505 if (filt & AR5K_RX_FILTER_CONTROL)
506 len += snprintf(buf+len, sizeof(buf)-len, " CONTROL");
507 if (filt & AR5K_RX_FILTER_BEACON)
508 len += snprintf(buf+len, sizeof(buf)-len, " BEACON");
509 if (filt & AR5K_RX_FILTER_PROM)
510 len += snprintf(buf+len, sizeof(buf)-len, " PROM");
511 if (filt & AR5K_RX_FILTER_XRPOLL)
512 len += snprintf(buf+len, sizeof(buf)-len, " XRPOLL");
513 if (filt & AR5K_RX_FILTER_PROBEREQ)
514 len += snprintf(buf+len, sizeof(buf)-len, " PROBEREQ");
515 if (filt & AR5K_RX_FILTER_PHYERR_5212)
516 len += snprintf(buf+len, sizeof(buf)-len, " PHYERR-5212");
517 if (filt & AR5K_RX_FILTER_RADARERR_5212)
518 len += snprintf(buf+len, sizeof(buf)-len, " RADARERR-5212");
519 if (filt & AR5K_RX_FILTER_PHYERR_5211)
520 snprintf(buf+len, sizeof(buf)-len, " PHYERR-5211");
521 if (filt & AR5K_RX_FILTER_RADARERR_5211)
522 len += snprintf(buf+len, sizeof(buf)-len, " RADARERR-5211");
523
524 len += snprintf(buf+len, sizeof(buf)-len, "\nopmode: %s (%d)\n",
525 ath_opmode_to_string(sc->opmode), sc->opmode);
526
527 if (len > sizeof(buf))
528 len = sizeof(buf);
529
530 return simple_read_from_buffer(user_buf, count, ppos, buf, len);
531}
532
533static const struct file_operations fops_misc = {
534 .read = read_file_misc,
535 .open = ath5k_debugfs_open,
536 .owner = THIS_MODULE,
537};
538
486 539
487/* debugfs: frameerrors */ 540/* debugfs: frameerrors */
488 541
@@ -534,6 +587,8 @@ static ssize_t read_file_frameerrors(struct file *file, char __user *user_buf,
534 st->rxerr_jumbo*100/st->rx_all_count : 0); 587 st->rxerr_jumbo*100/st->rx_all_count : 0);
535 len += snprintf(buf+len, sizeof(buf)-len, "[RX all\t%d]\n", 588 len += snprintf(buf+len, sizeof(buf)-len, "[RX all\t%d]\n",
536 st->rx_all_count); 589 st->rx_all_count);
590 len += snprintf(buf+len, sizeof(buf)-len, "RX-all-bytes\t%d\n",
591 st->rx_bytes_count);
537 592
538 len += snprintf(buf+len, sizeof(buf)-len, 593 len += snprintf(buf+len, sizeof(buf)-len,
539 "\nTX\n---------------------\n"); 594 "\nTX\n---------------------\n");
@@ -551,6 +606,8 @@ static ssize_t read_file_frameerrors(struct file *file, char __user *user_buf,
551 st->txerr_filt*100/st->tx_all_count : 0); 606 st->txerr_filt*100/st->tx_all_count : 0);
552 len += snprintf(buf+len, sizeof(buf)-len, "[TX all\t%d]\n", 607 len += snprintf(buf+len, sizeof(buf)-len, "[TX all\t%d]\n",
553 st->tx_all_count); 608 st->tx_all_count);
609 len += snprintf(buf+len, sizeof(buf)-len, "TX-all-bytes\t%d\n",
610 st->tx_bytes_count);
554 611
555 if (len > sizeof(buf)) 612 if (len > sizeof(buf))
556 len = sizeof(buf); 613 len = sizeof(buf);
@@ -658,20 +715,21 @@ static ssize_t read_file_ani(struct file *file, char __user *user_buf,
658 len += snprintf(buf+len, sizeof(buf)-len, 715 len += snprintf(buf+len, sizeof(buf)-len,
659 "beacon RSSI average:\t%d\n", 716 "beacon RSSI average:\t%d\n",
660 sc->ah->ah_beacon_rssi_avg.avg); 717 sc->ah->ah_beacon_rssi_avg.avg);
718
719#define CC_PRINT(_struct, _field) \
720 _struct._field, \
721 _struct.cycles > 0 ? \
722 _struct._field*100/_struct.cycles : 0
723
661 len += snprintf(buf+len, sizeof(buf)-len, "profcnt tx\t\t%u\t(%d%%)\n", 724 len += snprintf(buf+len, sizeof(buf)-len, "profcnt tx\t\t%u\t(%d%%)\n",
662 as->pfc_tx, 725 CC_PRINT(as->last_cc, tx_frame));
663 as->pfc_cycles > 0 ?
664 as->pfc_tx*100/as->pfc_cycles : 0);
665 len += snprintf(buf+len, sizeof(buf)-len, "profcnt rx\t\t%u\t(%d%%)\n", 726 len += snprintf(buf+len, sizeof(buf)-len, "profcnt rx\t\t%u\t(%d%%)\n",
666 as->pfc_rx, 727 CC_PRINT(as->last_cc, rx_frame));
667 as->pfc_cycles > 0 ?
668 as->pfc_rx*100/as->pfc_cycles : 0);
669 len += snprintf(buf+len, sizeof(buf)-len, "profcnt busy\t\t%u\t(%d%%)\n", 728 len += snprintf(buf+len, sizeof(buf)-len, "profcnt busy\t\t%u\t(%d%%)\n",
670 as->pfc_busy, 729 CC_PRINT(as->last_cc, rx_busy));
671 as->pfc_cycles > 0 ? 730#undef CC_PRINT
672 as->pfc_busy*100/as->pfc_cycles : 0);
673 len += snprintf(buf+len, sizeof(buf)-len, "profcnt cycles\t\t%u\n", 731 len += snprintf(buf+len, sizeof(buf)-len, "profcnt cycles\t\t%u\n",
674 as->pfc_cycles); 732 as->last_cc.cycles);
675 len += snprintf(buf+len, sizeof(buf)-len, 733 len += snprintf(buf+len, sizeof(buf)-len,
676 "listen time\t\t%d\tlast: %d\n", 734 "listen time\t\t%d\tlast: %d\n",
677 as->listen_time, as->last_listen); 735 as->listen_time, as->last_listen);
@@ -763,7 +821,7 @@ static ssize_t read_file_queue(struct file *file, char __user *user_buf,
763 821
764 struct ath5k_txq *txq; 822 struct ath5k_txq *txq;
765 struct ath5k_buf *bf, *bf0; 823 struct ath5k_buf *bf, *bf0;
766 int i, n = 0; 824 int i, n;
767 825
768 len += snprintf(buf+len, sizeof(buf)-len, 826 len += snprintf(buf+len, sizeof(buf)-len,
769 "available txbuffers: %d\n", sc->txbuf_len); 827 "available txbuffers: %d\n", sc->txbuf_len);
@@ -777,9 +835,16 @@ static ssize_t read_file_queue(struct file *file, char __user *user_buf,
777 if (!txq->setup) 835 if (!txq->setup)
778 continue; 836 continue;
779 837
838 n = 0;
839 spin_lock_bh(&txq->lock);
780 list_for_each_entry_safe(bf, bf0, &txq->q, list) 840 list_for_each_entry_safe(bf, bf0, &txq->q, list)
781 n++; 841 n++;
782 len += snprintf(buf+len, sizeof(buf)-len, " len: %d\n", n); 842 spin_unlock_bh(&txq->lock);
843
844 len += snprintf(buf+len, sizeof(buf)-len,
845 " len: %d bufs: %d\n", txq->txq_len, n);
846 len += snprintf(buf+len, sizeof(buf)-len,
847 " stuck: %d\n", txq->txq_stuck);
783 } 848 }
784 849
785 if (len > sizeof(buf)) 850 if (len > sizeof(buf))
@@ -815,21 +880,13 @@ static const struct file_operations fops_queue = {
815}; 880};
816 881
817 882
818/* init */
819
820void
821ath5k_debug_init(void)
822{
823 ath5k_global_debugfs = debugfs_create_dir("ath5k", NULL);
824}
825
826void 883void
827ath5k_debug_init_device(struct ath5k_softc *sc) 884ath5k_debug_init_device(struct ath5k_softc *sc)
828{ 885{
829 sc->debug.level = ath5k_debug; 886 sc->debug.level = ath5k_debug;
830 887
831 sc->debug.debugfs_phydir = debugfs_create_dir(wiphy_name(sc->hw->wiphy), 888 sc->debug.debugfs_phydir = debugfs_create_dir("ath5k",
832 ath5k_global_debugfs); 889 sc->hw->wiphy->debugfsdir);
833 890
834 sc->debug.debugfs_debug = debugfs_create_file("debug", 891 sc->debug.debugfs_debug = debugfs_create_file("debug",
835 S_IWUSR | S_IRUSR, 892 S_IWUSR | S_IRUSR,
@@ -849,6 +906,10 @@ ath5k_debug_init_device(struct ath5k_softc *sc)
849 S_IWUSR | S_IRUSR, 906 S_IWUSR | S_IRUSR,
850 sc->debug.debugfs_phydir, sc, &fops_antenna); 907 sc->debug.debugfs_phydir, sc, &fops_antenna);
851 908
909 sc->debug.debugfs_misc = debugfs_create_file("misc",
910 S_IRUSR,
911 sc->debug.debugfs_phydir, sc, &fops_misc);
912
852 sc->debug.debugfs_frameerrors = debugfs_create_file("frameerrors", 913 sc->debug.debugfs_frameerrors = debugfs_create_file("frameerrors",
853 S_IWUSR | S_IRUSR, 914 S_IWUSR | S_IRUSR,
854 sc->debug.debugfs_phydir, sc, 915 sc->debug.debugfs_phydir, sc,
@@ -866,12 +927,6 @@ ath5k_debug_init_device(struct ath5k_softc *sc)
866} 927}
867 928
868void 929void
869ath5k_debug_finish(void)
870{
871 debugfs_remove(ath5k_global_debugfs);
872}
873
874void
875ath5k_debug_finish_device(struct ath5k_softc *sc) 930ath5k_debug_finish_device(struct ath5k_softc *sc)
876{ 931{
877 debugfs_remove(sc->debug.debugfs_debug); 932 debugfs_remove(sc->debug.debugfs_debug);
@@ -879,6 +934,7 @@ ath5k_debug_finish_device(struct ath5k_softc *sc)
879 debugfs_remove(sc->debug.debugfs_beacon); 934 debugfs_remove(sc->debug.debugfs_beacon);
880 debugfs_remove(sc->debug.debugfs_reset); 935 debugfs_remove(sc->debug.debugfs_reset);
881 debugfs_remove(sc->debug.debugfs_antenna); 936 debugfs_remove(sc->debug.debugfs_antenna);
937 debugfs_remove(sc->debug.debugfs_misc);
882 debugfs_remove(sc->debug.debugfs_frameerrors); 938 debugfs_remove(sc->debug.debugfs_frameerrors);
883 debugfs_remove(sc->debug.debugfs_ani); 939 debugfs_remove(sc->debug.debugfs_ani);
884 debugfs_remove(sc->debug.debugfs_queue); 940 debugfs_remove(sc->debug.debugfs_queue);
diff --git a/drivers/net/wireless/ath/ath5k/debug.h b/drivers/net/wireless/ath/ath5k/debug.h
index 9b22722a95f0..236edbd2507d 100644
--- a/drivers/net/wireless/ath/ath5k/debug.h
+++ b/drivers/net/wireless/ath/ath5k/debug.h
@@ -75,6 +75,7 @@ struct ath5k_dbg_info {
75 struct dentry *debugfs_beacon; 75 struct dentry *debugfs_beacon;
76 struct dentry *debugfs_reset; 76 struct dentry *debugfs_reset;
77 struct dentry *debugfs_antenna; 77 struct dentry *debugfs_antenna;
78 struct dentry *debugfs_misc;
78 struct dentry *debugfs_frameerrors; 79 struct dentry *debugfs_frameerrors;
79 struct dentry *debugfs_ani; 80 struct dentry *debugfs_ani;
80 struct dentry *debugfs_queue; 81 struct dentry *debugfs_queue;
@@ -137,15 +138,9 @@ enum ath5k_debug_level {
137 } while (0) 138 } while (0)
138 139
139void 140void
140ath5k_debug_init(void);
141
142void
143ath5k_debug_init_device(struct ath5k_softc *sc); 141ath5k_debug_init_device(struct ath5k_softc *sc);
144 142
145void 143void
146ath5k_debug_finish(void);
147
148void
149ath5k_debug_finish_device(struct ath5k_softc *sc); 144ath5k_debug_finish_device(struct ath5k_softc *sc);
150 145
151void 146void
@@ -173,15 +168,9 @@ ATH5K_DBG_UNLIMIT(struct ath5k_softc *sc, unsigned int m, const char *fmt, ...)
173{} 168{}
174 169
175static inline void 170static inline void
176ath5k_debug_init(void) {}
177
178static inline void
179ath5k_debug_init_device(struct ath5k_softc *sc) {} 171ath5k_debug_init_device(struct ath5k_softc *sc) {}
180 172
181static inline void 173static inline void
182ath5k_debug_finish(void) {}
183
184static inline void
185ath5k_debug_finish_device(struct ath5k_softc *sc) {} 174ath5k_debug_finish_device(struct ath5k_softc *sc) {}
186 175
187static inline void 176static inline void
diff --git a/drivers/net/wireless/ath/ath5k/dma.c b/drivers/net/wireless/ath/ath5k/dma.c
index 58bb6c5dda7b..923c9ca5c4f0 100644
--- a/drivers/net/wireless/ath/ath5k/dma.c
+++ b/drivers/net/wireless/ath/ath5k/dma.c
@@ -244,7 +244,7 @@ int ath5k_hw_stop_tx_dma(struct ath5k_hw *ah, unsigned int queue)
244 244
245 /* Force channel idle high */ 245 /* Force channel idle high */
246 AR5K_REG_ENABLE_BITS(ah, AR5K_DIAG_SW_5211, 246 AR5K_REG_ENABLE_BITS(ah, AR5K_DIAG_SW_5211,
247 AR5K_DIAG_SW_CHANEL_IDLE_HIGH); 247 AR5K_DIAG_SW_CHANNEL_IDLE_HIGH);
248 248
249 /* Wait a while and disable mechanism */ 249 /* Wait a while and disable mechanism */
250 udelay(200); 250 udelay(200);
@@ -261,7 +261,7 @@ int ath5k_hw_stop_tx_dma(struct ath5k_hw *ah, unsigned int queue)
261 } while (--i && pending); 261 } while (--i && pending);
262 262
263 AR5K_REG_DISABLE_BITS(ah, AR5K_DIAG_SW_5211, 263 AR5K_REG_DISABLE_BITS(ah, AR5K_DIAG_SW_5211,
264 AR5K_DIAG_SW_CHANEL_IDLE_HIGH); 264 AR5K_DIAG_SW_CHANNEL_IDLE_HIGH);
265 } 265 }
266 266
267 /* Clear register */ 267 /* Clear register */
diff --git a/drivers/net/wireless/ath/ath5k/pcu.c b/drivers/net/wireless/ath/ath5k/pcu.c
index bb2e21553d1b..074b4c644399 100644
--- a/drivers/net/wireless/ath/ath5k/pcu.c
+++ b/drivers/net/wireless/ath/ath5k/pcu.c
@@ -207,7 +207,8 @@ static int ath5k_hw_set_cts_timeout(struct ath5k_hw *ah, unsigned int timeout)
207 */ 207 */
208unsigned int ath5k_hw_htoclock(struct ath5k_hw *ah, unsigned int usec) 208unsigned int ath5k_hw_htoclock(struct ath5k_hw *ah, unsigned int usec)
209{ 209{
210 return usec * ath5k_hw_get_clockrate(ah); 210 struct ath_common *common = ath5k_hw_common(ah);
211 return usec * common->clockrate;
211} 212}
212 213
213/** 214/**
@@ -216,17 +217,19 @@ unsigned int ath5k_hw_htoclock(struct ath5k_hw *ah, unsigned int usec)
216 */ 217 */
217unsigned int ath5k_hw_clocktoh(struct ath5k_hw *ah, unsigned int clock) 218unsigned int ath5k_hw_clocktoh(struct ath5k_hw *ah, unsigned int clock)
218{ 219{
219 return clock / ath5k_hw_get_clockrate(ah); 220 struct ath_common *common = ath5k_hw_common(ah);
221 return clock / common->clockrate;
220} 222}
221 223
222/** 224/**
223 * ath5k_hw_get_clockrate - Get the clock rate for current mode 225 * ath5k_hw_set_clockrate - Set common->clockrate for the current channel
224 * 226 *
225 * @ah: The &struct ath5k_hw 227 * @ah: The &struct ath5k_hw
226 */ 228 */
227unsigned int ath5k_hw_get_clockrate(struct ath5k_hw *ah) 229void ath5k_hw_set_clockrate(struct ath5k_hw *ah)
228{ 230{
229 struct ieee80211_channel *channel = ah->ah_current_channel; 231 struct ieee80211_channel *channel = ah->ah_current_channel;
232 struct ath_common *common = ath5k_hw_common(ah);
230 int clock; 233 int clock;
231 234
232 if (channel->hw_value & CHANNEL_5GHZ) 235 if (channel->hw_value & CHANNEL_5GHZ)
@@ -240,7 +243,7 @@ unsigned int ath5k_hw_get_clockrate(struct ath5k_hw *ah)
240 if (channel->hw_value & CHANNEL_TURBO) 243 if (channel->hw_value & CHANNEL_TURBO)
241 clock *= 2; 244 clock *= 2;
242 245
243 return clock; 246 common->clockrate = clock;
244} 247}
245 248
246/** 249/**
@@ -495,6 +498,10 @@ u64 ath5k_hw_get_tsf64(struct ath5k_hw *ah)
495{ 498{
496 u32 tsf_lower, tsf_upper1, tsf_upper2; 499 u32 tsf_lower, tsf_upper1, tsf_upper2;
497 int i; 500 int i;
501 unsigned long flags;
502
503 /* This code is time critical - we don't want to be interrupted here */
504 local_irq_save(flags);
498 505
499 /* 506 /*
500 * While reading TSF upper and then lower part, the clock is still 507 * While reading TSF upper and then lower part, the clock is still
@@ -517,6 +524,8 @@ u64 ath5k_hw_get_tsf64(struct ath5k_hw *ah)
517 tsf_upper1 = tsf_upper2; 524 tsf_upper1 = tsf_upper2;
518 } 525 }
519 526
527 local_irq_restore(flags);
528
520 WARN_ON( i == ATH5K_MAX_TSF_READ ); 529 WARN_ON( i == ATH5K_MAX_TSF_READ );
521 530
522 return (((u64)tsf_upper1 << 32) | tsf_lower); 531 return (((u64)tsf_upper1 << 32) | tsf_lower);
@@ -600,7 +609,7 @@ void ath5k_hw_init_beacon(struct ath5k_hw *ah, u32 next_beacon, u32 interval)
600 /* Timer3 marks the end of our ATIM window 609 /* Timer3 marks the end of our ATIM window
601 * a zero length window is not allowed because 610 * a zero length window is not allowed because
602 * we 'll get no beacons */ 611 * we 'll get no beacons */
603 timer3 = next_beacon + (ah->ah_atim_window ? ah->ah_atim_window : 1); 612 timer3 = next_beacon + 1;
604 613
605 /* 614 /*
606 * Set the beacon register and enable all timers. 615 * Set the beacon register and enable all timers.
@@ -640,195 +649,95 @@ void ath5k_hw_init_beacon(struct ath5k_hw *ah, u32 next_beacon, u32 interval)
640 649
641} 650}
642 651
643 652/**
644/*********************\ 653 * ath5k_check_timer_win - Check if timer B is timer A + window
645* Key table functions * 654 *
646\*********************/ 655 * @a: timer a (before b)
647 656 * @b: timer b (after a)
648/* 657 * @window: difference between a and b
649 * Reset a key entry on the table 658 * @intval: timers are increased by this interval
659 *
660 * This helper function checks if timer B is timer A + window and covers
661 * cases where timer A or B might have already been updated or wrapped
662 * around (Timers are 16 bit).
663 *
664 * Returns true if O.K.
650 */ 665 */
651int ath5k_hw_reset_key(struct ath5k_hw *ah, u16 entry) 666static inline bool
667ath5k_check_timer_win(int a, int b, int window, int intval)
652{ 668{
653 unsigned int i, type;
654 u16 micentry = entry + AR5K_KEYTABLE_MIC_OFFSET;
655
656 AR5K_ASSERT_ENTRY(entry, AR5K_KEYTABLE_SIZE);
657
658 type = ath5k_hw_reg_read(ah, AR5K_KEYTABLE_TYPE(entry));
659
660 for (i = 0; i < AR5K_KEYCACHE_SIZE; i++)
661 ath5k_hw_reg_write(ah, 0, AR5K_KEYTABLE_OFF(entry, i));
662
663 /* Reset associated MIC entry if TKIP
664 * is enabled located at offset (entry + 64) */
665 if (type == AR5K_KEYTABLE_TYPE_TKIP) {
666 AR5K_ASSERT_ENTRY(micentry, AR5K_KEYTABLE_SIZE);
667 for (i = 0; i < AR5K_KEYCACHE_SIZE / 2 ; i++)
668 ath5k_hw_reg_write(ah, 0,
669 AR5K_KEYTABLE_OFF(micentry, i));
670 }
671
672 /* 669 /*
673 * Set NULL encryption on AR5212+ 670 * 1.) usually B should be A + window
674 * 671 * 2.) A already updated, B not updated yet
675 * Note: AR5K_KEYTABLE_TYPE -> AR5K_KEYTABLE_OFF(entry, 5) 672 * 3.) A already updated and has wrapped around
676 * AR5K_KEYTABLE_TYPE_NULL -> 0x00000007 673 * 4.) B has wrapped around
677 *
678 * Note2: Windows driver (ndiswrapper) sets this to
679 * 0x00000714 instead of 0x00000007
680 */ 674 */
681 if (ah->ah_version >= AR5K_AR5211) { 675 if ((b - a == window) || /* 1.) */
682 ath5k_hw_reg_write(ah, AR5K_KEYTABLE_TYPE_NULL, 676 (a - b == intval - window) || /* 2.) */
683 AR5K_KEYTABLE_TYPE(entry)); 677 ((a | 0x10000) - b == intval - window) || /* 3.) */
684 678 ((b | 0x10000) - a == window)) /* 4.) */
685 if (type == AR5K_KEYTABLE_TYPE_TKIP) { 679 return true; /* O.K. */
686 ath5k_hw_reg_write(ah, AR5K_KEYTABLE_TYPE_NULL, 680 return false;
687 AR5K_KEYTABLE_TYPE(micentry));
688 }
689 }
690
691 return 0;
692}
693
694static
695int ath5k_keycache_type(const struct ieee80211_key_conf *key)
696{
697 switch (key->cipher) {
698 case WLAN_CIPHER_SUITE_TKIP:
699 return AR5K_KEYTABLE_TYPE_TKIP;
700 case WLAN_CIPHER_SUITE_CCMP:
701 return AR5K_KEYTABLE_TYPE_CCM;
702 case WLAN_CIPHER_SUITE_WEP40:
703 return AR5K_KEYTABLE_TYPE_40;
704 case WLAN_CIPHER_SUITE_WEP104:
705 return AR5K_KEYTABLE_TYPE_104;
706 default:
707 return -EINVAL;
708 }
709} 681}
710 682
711/* 683/**
712 * Set a key entry on the table 684 * ath5k_hw_check_beacon_timers - Check if the beacon timers are correct
685 *
686 * @ah: The &struct ath5k_hw
687 * @intval: beacon interval
688 *
689 * This is a workaround for IBSS mode:
690 *
691 * The need for this function arises from the fact that we have 4 separate
692 * HW timer registers (TIMER0 - TIMER3), which are closely related to the
693 * next beacon target time (NBTT), and that the HW updates these timers
694 * seperately based on the current TSF value. The hardware increments each
695 * timer by the beacon interval, when the local TSF coverted to TU is equal
696 * to the value stored in the timer.
697 *
698 * The reception of a beacon with the same BSSID can update the local HW TSF
699 * at any time - this is something we can't avoid. If the TSF jumps to a
700 * time which is later than the time stored in a timer, this timer will not
701 * be updated until the TSF in TU wraps around at 16 bit (the size of the
702 * timers) and reaches the time which is stored in the timer.
703 *
704 * The problem is that these timers are closely related to TIMER0 (NBTT) and
705 * that they define a time "window". When the TSF jumps between two timers
706 * (e.g. ATIM and NBTT), the one in the past will be left behind (not
707 * updated), while the one in the future will be updated every beacon
708 * interval. This causes the window to get larger, until the TSF wraps
709 * around as described above and the timer which was left behind gets
710 * updated again. But - because the beacon interval is usually not an exact
711 * divisor of the size of the timers (16 bit), an unwanted "window" between
712 * these timers has developed!
713 *
714 * This is especially important with the ATIM window, because during
715 * the ATIM window only ATIM frames and no data frames are allowed to be
716 * sent, which creates transmission pauses after each beacon. This symptom
717 * has been described as "ramping ping" because ping times increase linearly
718 * for some time and then drop down again. A wrong window on the DMA beacon
719 * timer has the same effect, so we check for these two conditions.
720 *
721 * Returns true if O.K.
713 */ 722 */
714int ath5k_hw_set_key(struct ath5k_hw *ah, u16 entry, 723bool
715 const struct ieee80211_key_conf *key, const u8 *mac) 724ath5k_hw_check_beacon_timers(struct ath5k_hw *ah, int intval)
716{
717 unsigned int i;
718 int keylen;
719 __le32 key_v[5] = {};
720 __le32 key0 = 0, key1 = 0;
721 __le32 *rxmic, *txmic;
722 int keytype;
723 u16 micentry = entry + AR5K_KEYTABLE_MIC_OFFSET;
724 bool is_tkip;
725 const u8 *key_ptr;
726
727 is_tkip = (key->cipher == WLAN_CIPHER_SUITE_TKIP);
728
729 /*
730 * key->keylen comes in from mac80211 in bytes.
731 * TKIP is 128 bit + 128 bit mic
732 */
733 keylen = (is_tkip) ? (128 / 8) : key->keylen;
734
735 if (entry > AR5K_KEYTABLE_SIZE ||
736 (is_tkip && micentry > AR5K_KEYTABLE_SIZE))
737 return -EOPNOTSUPP;
738
739 if (unlikely(keylen > 16))
740 return -EOPNOTSUPP;
741
742 keytype = ath5k_keycache_type(key);
743 if (keytype < 0)
744 return keytype;
745
746 /*
747 * each key block is 6 bytes wide, written as pairs of
748 * alternating 32 and 16 bit le values.
749 */
750 key_ptr = key->key;
751 for (i = 0; keylen >= 6; keylen -= 6) {
752 memcpy(&key_v[i], key_ptr, 6);
753 i += 2;
754 key_ptr += 6;
755 }
756 if (keylen)
757 memcpy(&key_v[i], key_ptr, keylen);
758
759 /* intentionally corrupt key until mic is installed */
760 if (is_tkip) {
761 key0 = key_v[0] = ~key_v[0];
762 key1 = key_v[1] = ~key_v[1];
763 }
764
765 for (i = 0; i < ARRAY_SIZE(key_v); i++)
766 ath5k_hw_reg_write(ah, le32_to_cpu(key_v[i]),
767 AR5K_KEYTABLE_OFF(entry, i));
768
769 ath5k_hw_reg_write(ah, keytype, AR5K_KEYTABLE_TYPE(entry));
770
771 if (is_tkip) {
772 /* Install rx/tx MIC */
773 rxmic = (__le32 *) &key->key[16];
774 txmic = (__le32 *) &key->key[24];
775
776 if (ah->ah_combined_mic) {
777 key_v[0] = rxmic[0];
778 key_v[1] = cpu_to_le32(le32_to_cpu(txmic[0]) >> 16);
779 key_v[2] = rxmic[1];
780 key_v[3] = cpu_to_le32(le32_to_cpu(txmic[0]) & 0xffff);
781 key_v[4] = txmic[1];
782 } else {
783 key_v[0] = rxmic[0];
784 key_v[1] = 0;
785 key_v[2] = rxmic[1];
786 key_v[3] = 0;
787 key_v[4] = 0;
788 }
789 for (i = 0; i < ARRAY_SIZE(key_v); i++)
790 ath5k_hw_reg_write(ah, le32_to_cpu(key_v[i]),
791 AR5K_KEYTABLE_OFF(micentry, i));
792
793 ath5k_hw_reg_write(ah, AR5K_KEYTABLE_TYPE_NULL,
794 AR5K_KEYTABLE_TYPE(micentry));
795 ath5k_hw_reg_write(ah, 0, AR5K_KEYTABLE_MAC0(micentry));
796 ath5k_hw_reg_write(ah, 0, AR5K_KEYTABLE_MAC1(micentry));
797
798 /* restore first 2 words of key */
799 ath5k_hw_reg_write(ah, le32_to_cpu(~key0),
800 AR5K_KEYTABLE_OFF(entry, 0));
801 ath5k_hw_reg_write(ah, le32_to_cpu(~key1),
802 AR5K_KEYTABLE_OFF(entry, 1));
803 }
804
805 return ath5k_hw_set_key_lladdr(ah, entry, mac);
806}
807
808int ath5k_hw_set_key_lladdr(struct ath5k_hw *ah, u16 entry, const u8 *mac)
809{ 725{
810 u32 low_id, high_id; 726 unsigned int nbtt, atim, dma;
811
812 /* Invalid entry (key table overflow) */
813 AR5K_ASSERT_ENTRY(entry, AR5K_KEYTABLE_SIZE);
814 727
815 /* 728 nbtt = ath5k_hw_reg_read(ah, AR5K_TIMER0);
816 * MAC may be NULL if it's a broadcast key. In this case no need to 729 atim = ath5k_hw_reg_read(ah, AR5K_TIMER3);
817 * to compute get_unaligned_le32 and get_unaligned_le16 as we 730 dma = ath5k_hw_reg_read(ah, AR5K_TIMER1) >> 3;
818 * already know it.
819 */
820 if (!mac) {
821 low_id = 0xffffffff;
822 high_id = 0xffff | AR5K_KEYTABLE_VALID;
823 } else {
824 low_id = get_unaligned_le32(mac);
825 high_id = get_unaligned_le16(mac + 4) | AR5K_KEYTABLE_VALID;
826 }
827 731
828 ath5k_hw_reg_write(ah, low_id, AR5K_KEYTABLE_MAC0(entry)); 732 /* NOTE: SWBA is different. Having a wrong window there does not
829 ath5k_hw_reg_write(ah, high_id, AR5K_KEYTABLE_MAC1(entry)); 733 * stop us from sending data and this condition is catched thru
734 * other means (SWBA interrupt) */
830 735
831 return 0; 736 if (ath5k_check_timer_win(nbtt, atim, 1, intval) &&
737 ath5k_check_timer_win(dma, nbtt, AR5K_TUNE_DMA_BEACON_RESP,
738 intval))
739 return true; /* O.K. */
740 return false;
832} 741}
833 742
834/** 743/**
diff --git a/drivers/net/wireless/ath/ath5k/phy.c b/drivers/net/wireless/ath/ath5k/phy.c
index 984ba92c7df3..219367884e64 100644
--- a/drivers/net/wireless/ath/ath5k/phy.c
+++ b/drivers/net/wireless/ath/ath5k/phy.c
@@ -1093,6 +1093,7 @@ int ath5k_hw_channel(struct ath5k_hw *ah, struct ieee80211_channel *channel)
1093 1093
1094 ah->ah_current_channel = channel; 1094 ah->ah_current_channel = channel;
1095 ah->ah_turbo = channel->hw_value == CHANNEL_T ? true : false; 1095 ah->ah_turbo = channel->hw_value == CHANNEL_T ? true : false;
1096 ath5k_hw_set_clockrate(ah);
1096 1097
1097 return 0; 1098 return 0;
1098} 1099}
@@ -1257,7 +1258,7 @@ static int ath5k_hw_rf5110_calibrate(struct ath5k_hw *ah,
1257 * Disable beacons and RX/TX queues, wait 1258 * Disable beacons and RX/TX queues, wait
1258 */ 1259 */
1259 AR5K_REG_ENABLE_BITS(ah, AR5K_DIAG_SW_5210, 1260 AR5K_REG_ENABLE_BITS(ah, AR5K_DIAG_SW_5210,
1260 AR5K_DIAG_SW_DIS_TX | AR5K_DIAG_SW_DIS_RX_5210); 1261 AR5K_DIAG_SW_DIS_TX_5210 | AR5K_DIAG_SW_DIS_RX_5210);
1261 beacon = ath5k_hw_reg_read(ah, AR5K_BEACON_5210); 1262 beacon = ath5k_hw_reg_read(ah, AR5K_BEACON_5210);
1262 ath5k_hw_reg_write(ah, beacon & ~AR5K_BEACON_ENABLE, AR5K_BEACON_5210); 1263 ath5k_hw_reg_write(ah, beacon & ~AR5K_BEACON_ENABLE, AR5K_BEACON_5210);
1263 1264
@@ -1336,7 +1337,7 @@ static int ath5k_hw_rf5110_calibrate(struct ath5k_hw *ah,
1336 * Re-enable RX/TX and beacons 1337 * Re-enable RX/TX and beacons
1337 */ 1338 */
1338 AR5K_REG_DISABLE_BITS(ah, AR5K_DIAG_SW_5210, 1339 AR5K_REG_DISABLE_BITS(ah, AR5K_DIAG_SW_5210,
1339 AR5K_DIAG_SW_DIS_TX | AR5K_DIAG_SW_DIS_RX_5210); 1340 AR5K_DIAG_SW_DIS_TX_5210 | AR5K_DIAG_SW_DIS_RX_5210);
1340 ath5k_hw_reg_write(ah, beacon, AR5K_BEACON_5210); 1341 ath5k_hw_reg_write(ah, beacon, AR5K_BEACON_5210);
1341 1342
1342 return 0; 1343 return 0;
@@ -1377,7 +1378,7 @@ ath5k_hw_rf511x_iq_calibrate(struct ath5k_hw *ah)
1377 1378
1378 /* protect against divide by 0 and loss of sign bits */ 1379 /* protect against divide by 0 and loss of sign bits */
1379 if (i_coffd == 0 || q_coffd < 2) 1380 if (i_coffd == 0 || q_coffd < 2)
1380 return -1; 1381 return 0;
1381 1382
1382 i_coff = (-iq_corr) / i_coffd; 1383 i_coff = (-iq_corr) / i_coffd;
1383 i_coff = clamp(i_coff, -32, 31); /* signed 6 bit */ 1384 i_coff = clamp(i_coff, -32, 31); /* signed 6 bit */
diff --git a/drivers/net/wireless/ath/ath5k/qcu.c b/drivers/net/wireless/ath/ath5k/qcu.c
index 4186ff4c6e9c..84c717ded1c5 100644
--- a/drivers/net/wireless/ath/ath5k/qcu.c
+++ b/drivers/net/wireless/ath/ath5k/qcu.c
@@ -36,24 +36,58 @@ int ath5k_hw_get_tx_queueprops(struct ath5k_hw *ah, int queue,
36} 36}
37 37
38/* 38/*
39 * Make sure cw is a power of 2 minus 1 and smaller than 1024
40 */
41static u16 ath5k_cw_validate(u16 cw_req)
42{
43 u32 cw = 1;
44 cw_req = min(cw_req, (u16)1023);
45
46 while (cw < cw_req)
47 cw = (cw << 1) | 1;
48
49 return cw;
50}
51
52/*
39 * Set properties for a transmit queue 53 * Set properties for a transmit queue
40 */ 54 */
41int ath5k_hw_set_tx_queueprops(struct ath5k_hw *ah, int queue, 55int ath5k_hw_set_tx_queueprops(struct ath5k_hw *ah, int queue,
42 const struct ath5k_txq_info *queue_info) 56 const struct ath5k_txq_info *qinfo)
43{ 57{
58 struct ath5k_txq_info *qi;
59
44 AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num); 60 AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num);
45 61
46 if (ah->ah_txq[queue].tqi_type == AR5K_TX_QUEUE_INACTIVE) 62 qi = &ah->ah_txq[queue];
63
64 if (qi->tqi_type == AR5K_TX_QUEUE_INACTIVE)
47 return -EIO; 65 return -EIO;
48 66
49 memcpy(&ah->ah_txq[queue], queue_info, sizeof(struct ath5k_txq_info)); 67 /* copy and validate values */
68 qi->tqi_type = qinfo->tqi_type;
69 qi->tqi_subtype = qinfo->tqi_subtype;
70 qi->tqi_flags = qinfo->tqi_flags;
71 /*
72 * According to the docs: Although the AIFS field is 8 bit wide,
73 * the maximum supported value is 0xFC. Setting it higher than that
74 * will cause the DCU to hang.
75 */
76 qi->tqi_aifs = min(qinfo->tqi_aifs, (u8)0xFC);
77 qi->tqi_cw_min = ath5k_cw_validate(qinfo->tqi_cw_min);
78 qi->tqi_cw_max = ath5k_cw_validate(qinfo->tqi_cw_max);
79 qi->tqi_cbr_period = qinfo->tqi_cbr_period;
80 qi->tqi_cbr_overflow_limit = qinfo->tqi_cbr_overflow_limit;
81 qi->tqi_burst_time = qinfo->tqi_burst_time;
82 qi->tqi_ready_time = qinfo->tqi_ready_time;
50 83
51 /*XXX: Is this supported on 5210 ?*/ 84 /*XXX: Is this supported on 5210 ?*/
52 if ((queue_info->tqi_type == AR5K_TX_QUEUE_DATA && 85 /*XXX: Is this correct for AR5K_WME_AC_VI,VO ???*/
53 ((queue_info->tqi_subtype == AR5K_WME_AC_VI) || 86 if ((qinfo->tqi_type == AR5K_TX_QUEUE_DATA &&
54 (queue_info->tqi_subtype == AR5K_WME_AC_VO))) || 87 ((qinfo->tqi_subtype == AR5K_WME_AC_VI) ||
55 queue_info->tqi_type == AR5K_TX_QUEUE_UAPSD) 88 (qinfo->tqi_subtype == AR5K_WME_AC_VO))) ||
56 ah->ah_txq[queue].tqi_flags |= AR5K_TXQ_FLAG_POST_FR_BKOFF_DIS; 89 qinfo->tqi_type == AR5K_TX_QUEUE_UAPSD)
90 qi->tqi_flags |= AR5K_TXQ_FLAG_POST_FR_BKOFF_DIS;
57 91
58 return 0; 92 return 0;
59} 93}
@@ -186,7 +220,7 @@ void ath5k_hw_release_tx_queue(struct ath5k_hw *ah, unsigned int queue)
186 */ 220 */
187int ath5k_hw_reset_tx_queue(struct ath5k_hw *ah, unsigned int queue) 221int ath5k_hw_reset_tx_queue(struct ath5k_hw *ah, unsigned int queue)
188{ 222{
189 u32 cw_min, cw_max, retry_lg, retry_sh; 223 u32 retry_lg, retry_sh;
190 struct ath5k_txq_info *tq = &ah->ah_txq[queue]; 224 struct ath5k_txq_info *tq = &ah->ah_txq[queue];
191 225
192 AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num); 226 AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num);
@@ -217,14 +251,13 @@ int ath5k_hw_reset_tx_queue(struct ath5k_hw *ah, unsigned int queue)
217 /* Set IFS0 */ 251 /* Set IFS0 */
218 if (ah->ah_turbo) { 252 if (ah->ah_turbo) {
219 ath5k_hw_reg_write(ah, ((AR5K_INIT_SIFS_TURBO + 253 ath5k_hw_reg_write(ah, ((AR5K_INIT_SIFS_TURBO +
220 (ah->ah_aifs + tq->tqi_aifs) * 254 tq->tqi_aifs * AR5K_INIT_SLOT_TIME_TURBO) <<
221 AR5K_INIT_SLOT_TIME_TURBO) <<
222 AR5K_IFS0_DIFS_S) | AR5K_INIT_SIFS_TURBO, 255 AR5K_IFS0_DIFS_S) | AR5K_INIT_SIFS_TURBO,
223 AR5K_IFS0); 256 AR5K_IFS0);
224 } else { 257 } else {
225 ath5k_hw_reg_write(ah, ((AR5K_INIT_SIFS + 258 ath5k_hw_reg_write(ah, ((AR5K_INIT_SIFS +
226 (ah->ah_aifs + tq->tqi_aifs) * 259 tq->tqi_aifs * AR5K_INIT_SLOT_TIME) <<
227 AR5K_INIT_SLOT_TIME) << AR5K_IFS0_DIFS_S) | 260 AR5K_IFS0_DIFS_S) |
228 AR5K_INIT_SIFS, AR5K_IFS0); 261 AR5K_INIT_SIFS, AR5K_IFS0);
229 } 262 }
230 263
@@ -248,35 +281,6 @@ int ath5k_hw_reset_tx_queue(struct ath5k_hw *ah, unsigned int queue)
248 } 281 }
249 282
250 /* 283 /*
251 * Calculate cwmin/max by channel mode
252 */
253 cw_min = ah->ah_cw_min = AR5K_TUNE_CWMIN;
254 cw_max = ah->ah_cw_max = AR5K_TUNE_CWMAX;
255 ah->ah_aifs = AR5K_TUNE_AIFS;
256 /*XR is only supported on 5212*/
257 if (IS_CHAN_XR(ah->ah_current_channel) &&
258 ah->ah_version == AR5K_AR5212) {
259 cw_min = ah->ah_cw_min = AR5K_TUNE_CWMIN_XR;
260 cw_max = ah->ah_cw_max = AR5K_TUNE_CWMAX_XR;
261 ah->ah_aifs = AR5K_TUNE_AIFS_XR;
262 /*B mode is not supported on 5210*/
263 } else if (IS_CHAN_B(ah->ah_current_channel) &&
264 ah->ah_version != AR5K_AR5210) {
265 cw_min = ah->ah_cw_min = AR5K_TUNE_CWMIN_11B;
266 cw_max = ah->ah_cw_max = AR5K_TUNE_CWMAX_11B;
267 ah->ah_aifs = AR5K_TUNE_AIFS_11B;
268 }
269
270 cw_min = 1;
271 while (cw_min < ah->ah_cw_min)
272 cw_min = (cw_min << 1) | 1;
273
274 cw_min = tq->tqi_cw_min < 0 ? (cw_min >> (-tq->tqi_cw_min)) :
275 ((cw_min << tq->tqi_cw_min) + (1 << tq->tqi_cw_min) - 1);
276 cw_max = tq->tqi_cw_max < 0 ? (cw_max >> (-tq->tqi_cw_max)) :
277 ((cw_max << tq->tqi_cw_max) + (1 << tq->tqi_cw_max) - 1);
278
279 /*
280 * Calculate and set retry limits 284 * Calculate and set retry limits
281 */ 285 */
282 if (ah->ah_software_retry) { 286 if (ah->ah_software_retry) {
@@ -292,7 +296,7 @@ int ath5k_hw_reset_tx_queue(struct ath5k_hw *ah, unsigned int queue)
292 /*No QCU/DCU [5210]*/ 296 /*No QCU/DCU [5210]*/
293 if (ah->ah_version == AR5K_AR5210) { 297 if (ah->ah_version == AR5K_AR5210) {
294 ath5k_hw_reg_write(ah, 298 ath5k_hw_reg_write(ah,
295 (cw_min << AR5K_NODCU_RETRY_LMT_CW_MIN_S) 299 (tq->tqi_cw_min << AR5K_NODCU_RETRY_LMT_CW_MIN_S)
296 | AR5K_REG_SM(AR5K_INIT_SLG_RETRY, 300 | AR5K_REG_SM(AR5K_INIT_SLG_RETRY,
297 AR5K_NODCU_RETRY_LMT_SLG_RETRY) 301 AR5K_NODCU_RETRY_LMT_SLG_RETRY)
298 | AR5K_REG_SM(AR5K_INIT_SSH_RETRY, 302 | AR5K_REG_SM(AR5K_INIT_SSH_RETRY,
@@ -314,14 +318,13 @@ int ath5k_hw_reset_tx_queue(struct ath5k_hw *ah, unsigned int queue)
314 /*===Rest is also for QCU/DCU only [5211+]===*/ 318 /*===Rest is also for QCU/DCU only [5211+]===*/
315 319
316 /* 320 /*
317 * Set initial content window (cw_min/cw_max) 321 * Set contention window (cw_min/cw_max)
318 * and arbitrated interframe space (aifs)... 322 * and arbitrated interframe space (aifs)...
319 */ 323 */
320 ath5k_hw_reg_write(ah, 324 ath5k_hw_reg_write(ah,
321 AR5K_REG_SM(cw_min, AR5K_DCU_LCL_IFS_CW_MIN) | 325 AR5K_REG_SM(tq->tqi_cw_min, AR5K_DCU_LCL_IFS_CW_MIN) |
322 AR5K_REG_SM(cw_max, AR5K_DCU_LCL_IFS_CW_MAX) | 326 AR5K_REG_SM(tq->tqi_cw_max, AR5K_DCU_LCL_IFS_CW_MAX) |
323 AR5K_REG_SM(ah->ah_aifs + tq->tqi_aifs, 327 AR5K_REG_SM(tq->tqi_aifs, AR5K_DCU_LCL_IFS_AIFS),
324 AR5K_DCU_LCL_IFS_AIFS),
325 AR5K_QUEUE_DFS_LOCAL_IFS(queue)); 328 AR5K_QUEUE_DFS_LOCAL_IFS(queue));
326 329
327 /* 330 /*
diff --git a/drivers/net/wireless/ath/ath5k/reg.h b/drivers/net/wireless/ath/ath5k/reg.h
index 05ef587ad2b4..a34929f06533 100644
--- a/drivers/net/wireless/ath/ath5k/reg.h
+++ b/drivers/net/wireless/ath/ath5k/reg.h
@@ -1387,10 +1387,9 @@
1387 1387
1388 1388
1389/* 1389/*
1390 * PCU control register 1390 * PCU Diagnostic register
1391 * 1391 *
1392 * Only DIS_RX is used in the code, the rest i guess are 1392 * Used for tweaking/diagnostics.
1393 * for tweaking/diagnostics.
1394 */ 1393 */
1395#define AR5K_DIAG_SW_5210 0x8068 /* Register Address [5210] */ 1394#define AR5K_DIAG_SW_5210 0x8068 /* Register Address [5210] */
1396#define AR5K_DIAG_SW_5211 0x8048 /* Register Address [5211+] */ 1395#define AR5K_DIAG_SW_5211 0x8048 /* Register Address [5211+] */
@@ -1399,22 +1398,22 @@
1399#define AR5K_DIAG_SW_DIS_WEP_ACK 0x00000001 /* Disable ACKs if WEP key is invalid */ 1398#define AR5K_DIAG_SW_DIS_WEP_ACK 0x00000001 /* Disable ACKs if WEP key is invalid */
1400#define AR5K_DIAG_SW_DIS_ACK 0x00000002 /* Disable ACKs */ 1399#define AR5K_DIAG_SW_DIS_ACK 0x00000002 /* Disable ACKs */
1401#define AR5K_DIAG_SW_DIS_CTS 0x00000004 /* Disable CTSs */ 1400#define AR5K_DIAG_SW_DIS_CTS 0x00000004 /* Disable CTSs */
1402#define AR5K_DIAG_SW_DIS_ENC 0x00000008 /* Disable encryption */ 1401#define AR5K_DIAG_SW_DIS_ENC 0x00000008 /* Disable HW encryption */
1403#define AR5K_DIAG_SW_DIS_DEC 0x00000010 /* Disable decryption */ 1402#define AR5K_DIAG_SW_DIS_DEC 0x00000010 /* Disable HW decryption */
1404#define AR5K_DIAG_SW_DIS_TX 0x00000020 /* Disable transmit [5210] */ 1403#define AR5K_DIAG_SW_DIS_TX_5210 0x00000020 /* Disable transmit [5210] */
1405#define AR5K_DIAG_SW_DIS_RX_5210 0x00000040 /* Disable recieve */ 1404#define AR5K_DIAG_SW_DIS_RX_5210 0x00000040 /* Disable receive */
1406#define AR5K_DIAG_SW_DIS_RX_5211 0x00000020 1405#define AR5K_DIAG_SW_DIS_RX_5211 0x00000020
1407#define AR5K_DIAG_SW_DIS_RX (ah->ah_version == AR5K_AR5210 ? \ 1406#define AR5K_DIAG_SW_DIS_RX (ah->ah_version == AR5K_AR5210 ? \
1408 AR5K_DIAG_SW_DIS_RX_5210 : AR5K_DIAG_SW_DIS_RX_5211) 1407 AR5K_DIAG_SW_DIS_RX_5210 : AR5K_DIAG_SW_DIS_RX_5211)
1409#define AR5K_DIAG_SW_LOOP_BACK_5210 0x00000080 /* Loopback (i guess it goes with DIS_TX) [5210] */ 1408#define AR5K_DIAG_SW_LOOP_BACK_5210 0x00000080 /* TX Data Loopback (i guess it goes with DIS_TX) [5210] */
1410#define AR5K_DIAG_SW_LOOP_BACK_5211 0x00000040 1409#define AR5K_DIAG_SW_LOOP_BACK_5211 0x00000040
1411#define AR5K_DIAG_SW_LOOP_BACK (ah->ah_version == AR5K_AR5210 ? \ 1410#define AR5K_DIAG_SW_LOOP_BACK (ah->ah_version == AR5K_AR5210 ? \
1412 AR5K_DIAG_SW_LOOP_BACK_5210 : AR5K_DIAG_SW_LOOP_BACK_5211) 1411 AR5K_DIAG_SW_LOOP_BACK_5210 : AR5K_DIAG_SW_LOOP_BACK_5211)
1413#define AR5K_DIAG_SW_CORR_FCS_5210 0x00000100 /* Corrupted FCS */ 1412#define AR5K_DIAG_SW_CORR_FCS_5210 0x00000100 /* Generate invalid TX FCS */
1414#define AR5K_DIAG_SW_CORR_FCS_5211 0x00000080 1413#define AR5K_DIAG_SW_CORR_FCS_5211 0x00000080
1415#define AR5K_DIAG_SW_CORR_FCS (ah->ah_version == AR5K_AR5210 ? \ 1414#define AR5K_DIAG_SW_CORR_FCS (ah->ah_version == AR5K_AR5210 ? \
1416 AR5K_DIAG_SW_CORR_FCS_5210 : AR5K_DIAG_SW_CORR_FCS_5211) 1415 AR5K_DIAG_SW_CORR_FCS_5210 : AR5K_DIAG_SW_CORR_FCS_5211)
1417#define AR5K_DIAG_SW_CHAN_INFO_5210 0x00000200 /* Dump channel info */ 1416#define AR5K_DIAG_SW_CHAN_INFO_5210 0x00000200 /* Add 56 bytes of channel info before the frame data in the RX buffer */
1418#define AR5K_DIAG_SW_CHAN_INFO_5211 0x00000100 1417#define AR5K_DIAG_SW_CHAN_INFO_5211 0x00000100
1419#define AR5K_DIAG_SW_CHAN_INFO (ah->ah_version == AR5K_AR5210 ? \ 1418#define AR5K_DIAG_SW_CHAN_INFO (ah->ah_version == AR5K_AR5210 ? \
1420 AR5K_DIAG_SW_CHAN_INFO_5210 : AR5K_DIAG_SW_CHAN_INFO_5211) 1419 AR5K_DIAG_SW_CHAN_INFO_5210 : AR5K_DIAG_SW_CHAN_INFO_5211)
@@ -1426,17 +1425,17 @@
1426#define AR5K_DIAG_SW_SCVRAM_SEED 0x0003f800 /* [5210] */ 1425#define AR5K_DIAG_SW_SCVRAM_SEED 0x0003f800 /* [5210] */
1427#define AR5K_DIAG_SW_SCRAM_SEED_M 0x0001fc00 /* Scrambler seed mask */ 1426#define AR5K_DIAG_SW_SCRAM_SEED_M 0x0001fc00 /* Scrambler seed mask */
1428#define AR5K_DIAG_SW_SCRAM_SEED_S 10 1427#define AR5K_DIAG_SW_SCRAM_SEED_S 10
1429#define AR5K_DIAG_SW_DIS_SEQ_INC 0x00040000 /* Disable seqnum increment (?)[5210] */ 1428#define AR5K_DIAG_SW_DIS_SEQ_INC_5210 0x00040000 /* Disable seqnum increment (?)[5210] */
1430#define AR5K_DIAG_SW_FRAME_NV0_5210 0x00080000 1429#define AR5K_DIAG_SW_FRAME_NV0_5210 0x00080000
1431#define AR5K_DIAG_SW_FRAME_NV0_5211 0x00020000 /* Accept frames of non-zero protocol number */ 1430#define AR5K_DIAG_SW_FRAME_NV0_5211 0x00020000 /* Accept frames of non-zero protocol number */
1432#define AR5K_DIAG_SW_FRAME_NV0 (ah->ah_version == AR5K_AR5210 ? \ 1431#define AR5K_DIAG_SW_FRAME_NV0 (ah->ah_version == AR5K_AR5210 ? \
1433 AR5K_DIAG_SW_FRAME_NV0_5210 : AR5K_DIAG_SW_FRAME_NV0_5211) 1432 AR5K_DIAG_SW_FRAME_NV0_5210 : AR5K_DIAG_SW_FRAME_NV0_5211)
1434#define AR5K_DIAG_SW_OBSPT_M 0x000c0000 /* Observation point select (?) */ 1433#define AR5K_DIAG_SW_OBSPT_M 0x000c0000 /* Observation point select (?) */
1435#define AR5K_DIAG_SW_OBSPT_S 18 1434#define AR5K_DIAG_SW_OBSPT_S 18
1436#define AR5K_DIAG_SW_RX_CLEAR_HIGH 0x0010000 /* Force RX Clear high */ 1435#define AR5K_DIAG_SW_RX_CLEAR_HIGH 0x00100000 /* Ignore carrier sense */
1437#define AR5K_DIAG_SW_IGNORE_CARR_SENSE 0x0020000 /* Ignore virtual carrier sense */ 1436#define AR5K_DIAG_SW_IGNORE_CARR_SENSE 0x00200000 /* Ignore virtual carrier sense */
1438#define AR5K_DIAG_SW_CHANEL_IDLE_HIGH 0x0040000 /* Force channel idle high */ 1437#define AR5K_DIAG_SW_CHANNEL_IDLE_HIGH 0x00400000 /* Force channel idle high */
1439#define AR5K_DIAG_SW_PHEAR_ME 0x0080000 /* ??? */ 1438#define AR5K_DIAG_SW_PHEAR_ME 0x00800000 /* ??? */
1440 1439
1441/* 1440/*
1442 * TSF (clock) register (lower 32 bits) 1441 * TSF (clock) register (lower 32 bits)
@@ -1822,50 +1821,8 @@
1822 1821
1823/*===5212 end===*/ 1822/*===5212 end===*/
1824 1823
1825/*
1826 * Key table (WEP) register
1827 */
1828#define AR5K_KEYTABLE_0_5210 0x9000
1829#define AR5K_KEYTABLE_0_5211 0x8800
1830#define AR5K_KEYTABLE_5210(_n) (AR5K_KEYTABLE_0_5210 + ((_n) << 5))
1831#define AR5K_KEYTABLE_5211(_n) (AR5K_KEYTABLE_0_5211 + ((_n) << 5))
1832#define AR5K_KEYTABLE(_n) (ah->ah_version == AR5K_AR5210 ? \
1833 AR5K_KEYTABLE_5210(_n) : AR5K_KEYTABLE_5211(_n))
1834#define AR5K_KEYTABLE_OFF(_n, x) (AR5K_KEYTABLE(_n) + (x << 2))
1835#define AR5K_KEYTABLE_TYPE(_n) AR5K_KEYTABLE_OFF(_n, 5)
1836#define AR5K_KEYTABLE_TYPE_40 0x00000000
1837#define AR5K_KEYTABLE_TYPE_104 0x00000001
1838#define AR5K_KEYTABLE_TYPE_128 0x00000003
1839#define AR5K_KEYTABLE_TYPE_TKIP 0x00000004 /* [5212+] */
1840#define AR5K_KEYTABLE_TYPE_AES 0x00000005 /* [5211+] */
1841#define AR5K_KEYTABLE_TYPE_CCM 0x00000006 /* [5212+] */
1842#define AR5K_KEYTABLE_TYPE_NULL 0x00000007 /* [5211+] */
1843#define AR5K_KEYTABLE_ANTENNA 0x00000008 /* [5212+] */
1844#define AR5K_KEYTABLE_MAC0(_n) AR5K_KEYTABLE_OFF(_n, 6)
1845#define AR5K_KEYTABLE_MAC1(_n) AR5K_KEYTABLE_OFF(_n, 7)
1846#define AR5K_KEYTABLE_VALID 0x00008000
1847
1848/* If key type is TKIP and MIC is enabled
1849 * MIC key goes in offset entry + 64 */
1850#define AR5K_KEYTABLE_MIC_OFFSET 64
1851
1852/* WEP 40-bit = 40-bit entered key + 24 bit IV = 64-bit
1853 * WEP 104-bit = 104-bit entered key + 24-bit IV = 128-bit
1854 * WEP 128-bit = 128-bit entered key + 24 bit IV = 152-bit
1855 *
1856 * Some vendors have introduced bigger WEP keys to address
1857 * security vulnerabilities in WEP. This includes:
1858 *
1859 * WEP 232-bit = 232-bit entered key + 24 bit IV = 256-bit
1860 *
1861 * We can expand this if we find ar5k Atheros cards with a larger
1862 * key table size.
1863 */
1864#define AR5K_KEYTABLE_SIZE_5210 64 1824#define AR5K_KEYTABLE_SIZE_5210 64
1865#define AR5K_KEYTABLE_SIZE_5211 128 1825#define AR5K_KEYTABLE_SIZE_5211 128
1866#define AR5K_KEYTABLE_SIZE (ah->ah_version == AR5K_AR5210 ? \
1867 AR5K_KEYTABLE_SIZE_5210 : AR5K_KEYTABLE_SIZE_5211)
1868
1869 1826
1870/*===PHY REGISTERS===*/ 1827/*===PHY REGISTERS===*/
1871 1828
diff --git a/drivers/net/wireless/ath/ath5k/reset.c b/drivers/net/wireless/ath/ath5k/reset.c
index 58912cd762d9..5b179d01f97d 100644
--- a/drivers/net/wireless/ath/ath5k/reset.c
+++ b/drivers/net/wireless/ath/ath5k/reset.c
@@ -167,7 +167,7 @@ static inline void ath5k_hw_write_rate_duration(struct ath5k_hw *ah,
167 * ieee80211_duration() for a brief description of 167 * ieee80211_duration() for a brief description of
168 * what rate we should choose to TX ACKs. */ 168 * what rate we should choose to TX ACKs. */
169 tx_time = le16_to_cpu(ieee80211_generic_frame_duration(sc->hw, 169 tx_time = le16_to_cpu(ieee80211_generic_frame_duration(sc->hw,
170 sc->vif, 10, rate)); 170 NULL, 10, rate));
171 171
172 ath5k_hw_reg_write(ah, tx_time, reg); 172 ath5k_hw_reg_write(ah, tx_time, reg);
173 173
@@ -1060,7 +1060,7 @@ int ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode,
1060 * XXX: rethink this after new mode changes to 1060 * XXX: rethink this after new mode changes to
1061 * mac80211 are integrated */ 1061 * mac80211 are integrated */
1062 if (ah->ah_version == AR5K_AR5212 && 1062 if (ah->ah_version == AR5K_AR5212 &&
1063 ah->ah_sc->vif != NULL) 1063 ah->ah_sc->nvifs)
1064 ath5k_hw_write_rate_duration(ah, mode); 1064 ath5k_hw_write_rate_duration(ah, mode);
1065 1065
1066 /* 1066 /*
diff --git a/drivers/net/wireless/ath/ath9k/Kconfig b/drivers/net/wireless/ath/ath9k/Kconfig
index 35f23bdc442f..ad57a6d23110 100644
--- a/drivers/net/wireless/ath/ath9k/Kconfig
+++ b/drivers/net/wireless/ath/ath9k/Kconfig
@@ -32,6 +32,14 @@ config ATH9K_DEBUGFS
32 32
33 Also required for changing debug message flags at run time. 33 Also required for changing debug message flags at run time.
34 34
35config ATH9K_RATE_CONTROL
36 bool "Atheros ath9k rate control"
37 depends on ATH9K
38 default y
39 ---help---
40 Say Y, if you want to use the ath9k specific rate control
41 module instead of minstrel_ht.
42
35config ATH9K_HTC 43config ATH9K_HTC
36 tristate "Atheros HTC based wireless cards support" 44 tristate "Atheros HTC based wireless cards support"
37 depends on USB && MAC80211 45 depends on USB && MAC80211
diff --git a/drivers/net/wireless/ath/ath9k/Makefile b/drivers/net/wireless/ath/ath9k/Makefile
index 4555e9983903..aca01621c205 100644
--- a/drivers/net/wireless/ath/ath9k/Makefile
+++ b/drivers/net/wireless/ath/ath9k/Makefile
@@ -5,8 +5,8 @@ ath9k-y += beacon.o \
5 recv.o \ 5 recv.o \
6 xmit.o \ 6 xmit.o \
7 virtual.o \ 7 virtual.o \
8 rc.o
9 8
9ath9k-$(CONFIG_ATH9K_RATE_CONTROL) += rc.o
10ath9k-$(CONFIG_PCI) += pci.o 10ath9k-$(CONFIG_PCI) += pci.o
11ath9k-$(CONFIG_ATHEROS_AR71XX) += ahb.o 11ath9k-$(CONFIG_ATHEROS_AR71XX) += ahb.o
12ath9k-$(CONFIG_ATH9K_DEBUGFS) += debug.o 12ath9k-$(CONFIG_ATH9K_DEBUGFS) += debug.o
diff --git a/drivers/net/wireless/ath/ath9k/ani.c b/drivers/net/wireless/ath/ath9k/ani.c
index cc648b6ae31c..63ccb39cdcd4 100644
--- a/drivers/net/wireless/ath/ath9k/ani.c
+++ b/drivers/net/wireless/ath/ath9k/ani.c
@@ -14,6 +14,7 @@
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */ 15 */
16 16
17#include <linux/kernel.h>
17#include "hw.h" 18#include "hw.h"
18#include "hw-ops.h" 19#include "hw-ops.h"
19 20
@@ -48,7 +49,7 @@ static const struct ani_ofdm_level_entry ofdm_level_table[] = {
48 { 7, 8, 0 } /* lvl 9 */ 49 { 7, 8, 0 } /* lvl 9 */
49}; 50};
50#define ATH9K_ANI_OFDM_NUM_LEVEL \ 51#define ATH9K_ANI_OFDM_NUM_LEVEL \
51 (sizeof(ofdm_level_table)/sizeof(ofdm_level_table[0])) 52 ARRAY_SIZE(ofdm_level_table)
52#define ATH9K_ANI_OFDM_MAX_LEVEL \ 53#define ATH9K_ANI_OFDM_MAX_LEVEL \
53 (ATH9K_ANI_OFDM_NUM_LEVEL-1) 54 (ATH9K_ANI_OFDM_NUM_LEVEL-1)
54#define ATH9K_ANI_OFDM_DEF_LEVEL \ 55#define ATH9K_ANI_OFDM_DEF_LEVEL \
@@ -94,7 +95,7 @@ static const struct ani_cck_level_entry cck_level_table[] = {
94}; 95};
95 96
96#define ATH9K_ANI_CCK_NUM_LEVEL \ 97#define ATH9K_ANI_CCK_NUM_LEVEL \
97 (sizeof(cck_level_table)/sizeof(cck_level_table[0])) 98 ARRAY_SIZE(cck_level_table)
98#define ATH9K_ANI_CCK_MAX_LEVEL \ 99#define ATH9K_ANI_CCK_MAX_LEVEL \
99 (ATH9K_ANI_CCK_NUM_LEVEL-1) 100 (ATH9K_ANI_CCK_NUM_LEVEL-1)
100#define ATH9K_ANI_CCK_MAX_LEVEL_LOW_RSSI \ 101#define ATH9K_ANI_CCK_MAX_LEVEL_LOW_RSSI \
@@ -102,31 +103,9 @@ static const struct ani_cck_level_entry cck_level_table[] = {
102#define ATH9K_ANI_CCK_DEF_LEVEL \ 103#define ATH9K_ANI_CCK_DEF_LEVEL \
103 2 /* default level - matches the INI settings */ 104 2 /* default level - matches the INI settings */
104 105
105/* Private to ani.c */ 106static bool use_new_ani(struct ath_hw *ah)
106static void ath9k_hw_ani_lower_immunity(struct ath_hw *ah)
107{ 107{
108 ath9k_hw_private_ops(ah)->ani_lower_immunity(ah); 108 return AR_SREV_9300_20_OR_LATER(ah) || modparam_force_new_ani;
109}
110
111int ath9k_hw_get_ani_channel_idx(struct ath_hw *ah,
112 struct ath9k_channel *chan)
113{
114 int i;
115
116 for (i = 0; i < ARRAY_SIZE(ah->ani); i++) {
117 if (ah->ani[i].c &&
118 ah->ani[i].c->channel == chan->channel)
119 return i;
120 if (ah->ani[i].c == NULL) {
121 ah->ani[i].c = chan;
122 return i;
123 }
124 }
125
126 ath_print(ath9k_hw_common(ah), ATH_DBG_ANI,
127 "No more channel states left. Using channel 0\n");
128
129 return 0;
130} 109}
131 110
132static void ath9k_hw_update_mibstats(struct ath_hw *ah, 111static void ath9k_hw_update_mibstats(struct ath_hw *ah,
@@ -139,82 +118,34 @@ static void ath9k_hw_update_mibstats(struct ath_hw *ah,
139 stats->beacons += REG_READ(ah, AR_BEACON_CNT); 118 stats->beacons += REG_READ(ah, AR_BEACON_CNT);
140} 119}
141 120
142static void ath9k_ani_restart_old(struct ath_hw *ah) 121static void ath9k_ani_restart(struct ath_hw *ah)
143{ 122{
144 struct ar5416AniState *aniState; 123 struct ar5416AniState *aniState;
145 struct ath_common *common = ath9k_hw_common(ah); 124 struct ath_common *common = ath9k_hw_common(ah);
125 u32 ofdm_base = 0, cck_base = 0;
146 126
147 if (!DO_ANI(ah)) 127 if (!DO_ANI(ah))
148 return; 128 return;
149 129
150 aniState = ah->curani; 130 aniState = &ah->curchan->ani;
151 aniState->listenTime = 0; 131 aniState->listenTime = 0;
152 132
153 if (aniState->ofdmTrigHigh > AR_PHY_COUNTMAX) { 133 if (!use_new_ani(ah)) {
154 aniState->ofdmPhyErrBase = 0; 134 ofdm_base = AR_PHY_COUNTMAX - ah->config.ofdm_trig_high;
155 ath_print(common, ATH_DBG_ANI, 135 cck_base = AR_PHY_COUNTMAX - ah->config.cck_trig_high;
156 "OFDM Trigger is too high for hw counters\n");
157 } else {
158 aniState->ofdmPhyErrBase =
159 AR_PHY_COUNTMAX - aniState->ofdmTrigHigh;
160 }
161 if (aniState->cckTrigHigh > AR_PHY_COUNTMAX) {
162 aniState->cckPhyErrBase = 0;
163 ath_print(common, ATH_DBG_ANI,
164 "CCK Trigger is too high for hw counters\n");
165 } else {
166 aniState->cckPhyErrBase =
167 AR_PHY_COUNTMAX - aniState->cckTrigHigh;
168 } 136 }
169 ath_print(common, ATH_DBG_ANI,
170 "Writing ofdmbase=%u cckbase=%u\n",
171 aniState->ofdmPhyErrBase,
172 aniState->cckPhyErrBase);
173
174 ENABLE_REGWRITE_BUFFER(ah);
175
176 REG_WRITE(ah, AR_PHY_ERR_1, aniState->ofdmPhyErrBase);
177 REG_WRITE(ah, AR_PHY_ERR_2, aniState->cckPhyErrBase);
178 REG_WRITE(ah, AR_PHY_ERR_MASK_1, AR_PHY_ERR_OFDM_TIMING);
179 REG_WRITE(ah, AR_PHY_ERR_MASK_2, AR_PHY_ERR_CCK_TIMING);
180
181 REGWRITE_BUFFER_FLUSH(ah);
182 DISABLE_REGWRITE_BUFFER(ah);
183
184 ath9k_hw_update_mibstats(ah, &ah->ah_mibStats);
185
186 aniState->ofdmPhyErrCount = 0;
187 aniState->cckPhyErrCount = 0;
188}
189
190static void ath9k_ani_restart_new(struct ath_hw *ah)
191{
192 struct ar5416AniState *aniState;
193 struct ath_common *common = ath9k_hw_common(ah);
194
195 if (!DO_ANI(ah))
196 return;
197
198 aniState = ah->curani;
199 aniState->listenTime = 0;
200
201 aniState->ofdmPhyErrBase = 0;
202 aniState->cckPhyErrBase = 0;
203 137
204 ath_print(common, ATH_DBG_ANI, 138 ath_print(common, ATH_DBG_ANI,
205 "Writing ofdmbase=%08x cckbase=%08x\n", 139 "Writing ofdmbase=%u cckbase=%u\n", ofdm_base, cck_base);
206 aniState->ofdmPhyErrBase,
207 aniState->cckPhyErrBase);
208 140
209 ENABLE_REGWRITE_BUFFER(ah); 141 ENABLE_REGWRITE_BUFFER(ah);
210 142
211 REG_WRITE(ah, AR_PHY_ERR_1, aniState->ofdmPhyErrBase); 143 REG_WRITE(ah, AR_PHY_ERR_1, ofdm_base);
212 REG_WRITE(ah, AR_PHY_ERR_2, aniState->cckPhyErrBase); 144 REG_WRITE(ah, AR_PHY_ERR_2, cck_base);
213 REG_WRITE(ah, AR_PHY_ERR_MASK_1, AR_PHY_ERR_OFDM_TIMING); 145 REG_WRITE(ah, AR_PHY_ERR_MASK_1, AR_PHY_ERR_OFDM_TIMING);
214 REG_WRITE(ah, AR_PHY_ERR_MASK_2, AR_PHY_ERR_CCK_TIMING); 146 REG_WRITE(ah, AR_PHY_ERR_MASK_2, AR_PHY_ERR_CCK_TIMING);
215 147
216 REGWRITE_BUFFER_FLUSH(ah); 148 REGWRITE_BUFFER_FLUSH(ah);
217 DISABLE_REGWRITE_BUFFER(ah);
218 149
219 ath9k_hw_update_mibstats(ah, &ah->ah_mibStats); 150 ath9k_hw_update_mibstats(ah, &ah->ah_mibStats);
220 151
@@ -228,10 +159,7 @@ static void ath9k_hw_ani_ofdm_err_trigger_old(struct ath_hw *ah)
228 struct ar5416AniState *aniState; 159 struct ar5416AniState *aniState;
229 int32_t rssi; 160 int32_t rssi;
230 161
231 if (!DO_ANI(ah)) 162 aniState = &ah->curchan->ani;
232 return;
233
234 aniState = ah->curani;
235 163
236 if (aniState->noiseImmunityLevel < HAL_NOISE_IMMUNE_MAX) { 164 if (aniState->noiseImmunityLevel < HAL_NOISE_IMMUNE_MAX) {
237 if (ath9k_hw_ani_control(ah, ATH9K_ANI_NOISE_IMMUNITY_LEVEL, 165 if (ath9k_hw_ani_control(ah, ATH9K_ANI_NOISE_IMMUNITY_LEVEL,
@@ -300,10 +228,7 @@ static void ath9k_hw_ani_cck_err_trigger_old(struct ath_hw *ah)
300 struct ar5416AniState *aniState; 228 struct ar5416AniState *aniState;
301 int32_t rssi; 229 int32_t rssi;
302 230
303 if (!DO_ANI(ah)) 231 aniState = &ah->curchan->ani;
304 return;
305
306 aniState = ah->curani;
307 if (aniState->noiseImmunityLevel < HAL_NOISE_IMMUNE_MAX) { 232 if (aniState->noiseImmunityLevel < HAL_NOISE_IMMUNE_MAX) {
308 if (ath9k_hw_ani_control(ah, ATH9K_ANI_NOISE_IMMUNITY_LEVEL, 233 if (ath9k_hw_ani_control(ah, ATH9K_ANI_NOISE_IMMUNITY_LEVEL,
309 aniState->noiseImmunityLevel + 1)) { 234 aniState->noiseImmunityLevel + 1)) {
@@ -335,7 +260,7 @@ static void ath9k_hw_ani_cck_err_trigger_old(struct ath_hw *ah)
335/* Adjust the OFDM Noise Immunity Level */ 260/* Adjust the OFDM Noise Immunity Level */
336static void ath9k_hw_set_ofdm_nil(struct ath_hw *ah, u8 immunityLevel) 261static void ath9k_hw_set_ofdm_nil(struct ath_hw *ah, u8 immunityLevel)
337{ 262{
338 struct ar5416AniState *aniState = ah->curani; 263 struct ar5416AniState *aniState = &ah->curchan->ani;
339 struct ath_common *common = ath9k_hw_common(ah); 264 struct ath_common *common = ath9k_hw_common(ah);
340 const struct ani_ofdm_level_entry *entry_ofdm; 265 const struct ani_ofdm_level_entry *entry_ofdm;
341 const struct ani_cck_level_entry *entry_cck; 266 const struct ani_cck_level_entry *entry_cck;
@@ -380,14 +305,19 @@ static void ath9k_hw_set_ofdm_nil(struct ath_hw *ah, u8 immunityLevel)
380 } 305 }
381} 306}
382 307
383static void ath9k_hw_ani_ofdm_err_trigger_new(struct ath_hw *ah) 308static void ath9k_hw_ani_ofdm_err_trigger(struct ath_hw *ah)
384{ 309{
385 struct ar5416AniState *aniState; 310 struct ar5416AniState *aniState;
386 311
387 if (!DO_ANI(ah)) 312 if (!DO_ANI(ah))
388 return; 313 return;
389 314
390 aniState = ah->curani; 315 if (!use_new_ani(ah)) {
316 ath9k_hw_ani_ofdm_err_trigger_old(ah);
317 return;
318 }
319
320 aniState = &ah->curchan->ani;
391 321
392 if (aniState->ofdmNoiseImmunityLevel < ATH9K_ANI_OFDM_MAX_LEVEL) 322 if (aniState->ofdmNoiseImmunityLevel < ATH9K_ANI_OFDM_MAX_LEVEL)
393 ath9k_hw_set_ofdm_nil(ah, aniState->ofdmNoiseImmunityLevel + 1); 323 ath9k_hw_set_ofdm_nil(ah, aniState->ofdmNoiseImmunityLevel + 1);
@@ -398,7 +328,7 @@ static void ath9k_hw_ani_ofdm_err_trigger_new(struct ath_hw *ah)
398 */ 328 */
399static void ath9k_hw_set_cck_nil(struct ath_hw *ah, u_int8_t immunityLevel) 329static void ath9k_hw_set_cck_nil(struct ath_hw *ah, u_int8_t immunityLevel)
400{ 330{
401 struct ar5416AniState *aniState = ah->curani; 331 struct ar5416AniState *aniState = &ah->curchan->ani;
402 struct ath_common *common = ath9k_hw_common(ah); 332 struct ath_common *common = ath9k_hw_common(ah);
403 const struct ani_ofdm_level_entry *entry_ofdm; 333 const struct ani_ofdm_level_entry *entry_ofdm;
404 const struct ani_cck_level_entry *entry_cck; 334 const struct ani_cck_level_entry *entry_cck;
@@ -437,14 +367,19 @@ static void ath9k_hw_set_cck_nil(struct ath_hw *ah, u_int8_t immunityLevel)
437 entry_cck->mrc_cck_on); 367 entry_cck->mrc_cck_on);
438} 368}
439 369
440static void ath9k_hw_ani_cck_err_trigger_new(struct ath_hw *ah) 370static void ath9k_hw_ani_cck_err_trigger(struct ath_hw *ah)
441{ 371{
442 struct ar5416AniState *aniState; 372 struct ar5416AniState *aniState;
443 373
444 if (!DO_ANI(ah)) 374 if (!DO_ANI(ah))
445 return; 375 return;
446 376
447 aniState = ah->curani; 377 if (!use_new_ani(ah)) {
378 ath9k_hw_ani_cck_err_trigger_old(ah);
379 return;
380 }
381
382 aniState = &ah->curchan->ani;
448 383
449 if (aniState->cckNoiseImmunityLevel < ATH9K_ANI_CCK_MAX_LEVEL) 384 if (aniState->cckNoiseImmunityLevel < ATH9K_ANI_CCK_MAX_LEVEL)
450 ath9k_hw_set_cck_nil(ah, aniState->cckNoiseImmunityLevel + 1); 385 ath9k_hw_set_cck_nil(ah, aniState->cckNoiseImmunityLevel + 1);
@@ -455,7 +390,7 @@ static void ath9k_hw_ani_lower_immunity_old(struct ath_hw *ah)
455 struct ar5416AniState *aniState; 390 struct ar5416AniState *aniState;
456 int32_t rssi; 391 int32_t rssi;
457 392
458 aniState = ah->curani; 393 aniState = &ah->curchan->ani;
459 394
460 if (ah->opmode == NL80211_IFTYPE_AP) { 395 if (ah->opmode == NL80211_IFTYPE_AP) {
461 if (aniState->firstepLevel > 0) { 396 if (aniState->firstepLevel > 0) {
@@ -507,11 +442,16 @@ static void ath9k_hw_ani_lower_immunity_old(struct ath_hw *ah)
507 * only lower either OFDM or CCK errors per turn 442 * only lower either OFDM or CCK errors per turn
508 * we lower the other one next time 443 * we lower the other one next time
509 */ 444 */
510static void ath9k_hw_ani_lower_immunity_new(struct ath_hw *ah) 445static void ath9k_hw_ani_lower_immunity(struct ath_hw *ah)
511{ 446{
512 struct ar5416AniState *aniState; 447 struct ar5416AniState *aniState;
513 448
514 aniState = ah->curani; 449 aniState = &ah->curchan->ani;
450
451 if (!use_new_ani(ah)) {
452 ath9k_hw_ani_lower_immunity_old(ah);
453 return;
454 }
515 455
516 /* lower OFDM noise immunity */ 456 /* lower OFDM noise immunity */
517 if (aniState->ofdmNoiseImmunityLevel > 0 && 457 if (aniState->ofdmNoiseImmunityLevel > 0 &&
@@ -525,87 +465,18 @@ static void ath9k_hw_ani_lower_immunity_new(struct ath_hw *ah)
525 ath9k_hw_set_cck_nil(ah, aniState->cckNoiseImmunityLevel - 1); 465 ath9k_hw_set_cck_nil(ah, aniState->cckNoiseImmunityLevel - 1);
526} 466}
527 467
528static u8 ath9k_hw_chan_2_clockrate_mhz(struct ath_hw *ah)
529{
530 struct ath9k_channel *chan = ah->curchan;
531 struct ieee80211_conf *conf = &ath9k_hw_common(ah)->hw->conf;
532 u8 clockrate; /* in MHz */
533
534 if (!ah->curchan) /* should really check for CCK instead */
535 clockrate = ATH9K_CLOCK_RATE_CCK;
536 else if (conf->channel->band == IEEE80211_BAND_2GHZ)
537 clockrate = ATH9K_CLOCK_RATE_2GHZ_OFDM;
538 else if (IS_CHAN_A_FAST_CLOCK(ah, chan))
539 clockrate = ATH9K_CLOCK_FAST_RATE_5GHZ_OFDM;
540 else
541 clockrate = ATH9K_CLOCK_RATE_5GHZ_OFDM;
542
543 if (conf_is_ht40(conf))
544 return clockrate * 2;
545
546 return clockrate * 2;
547}
548
549static int32_t ath9k_hw_ani_get_listen_time(struct ath_hw *ah)
550{
551 struct ar5416AniState *aniState;
552 struct ath_common *common = ath9k_hw_common(ah);
553 u32 txFrameCount, rxFrameCount, cycleCount;
554 int32_t listenTime;
555
556 txFrameCount = REG_READ(ah, AR_TFCNT);
557 rxFrameCount = REG_READ(ah, AR_RFCNT);
558 cycleCount = REG_READ(ah, AR_CCCNT);
559
560 aniState = ah->curani;
561 if (aniState->cycleCount == 0 || aniState->cycleCount > cycleCount) {
562 listenTime = 0;
563 ah->stats.ast_ani_lzero++;
564 ath_print(common, ATH_DBG_ANI,
565 "1st call: aniState->cycleCount=%d\n",
566 aniState->cycleCount);
567 } else {
568 int32_t ccdelta = cycleCount - aniState->cycleCount;
569 int32_t rfdelta = rxFrameCount - aniState->rxFrameCount;
570 int32_t tfdelta = txFrameCount - aniState->txFrameCount;
571 int32_t clock_rate;
572
573 /*
574 * convert HW counter values to ms using mode
575 * specifix clock rate
576 */
577 clock_rate = ath9k_hw_chan_2_clockrate_mhz(ah) * 1000;;
578
579 listenTime = (ccdelta - rfdelta - tfdelta) / clock_rate;
580
581 ath_print(common, ATH_DBG_ANI,
582 "cyclecount=%d, rfcount=%d, "
583 "tfcount=%d, listenTime=%d CLOCK_RATE=%d\n",
584 ccdelta, rfdelta, tfdelta, listenTime, clock_rate);
585 }
586
587 aniState->cycleCount = cycleCount;
588 aniState->txFrameCount = txFrameCount;
589 aniState->rxFrameCount = rxFrameCount;
590
591 return listenTime;
592}
593
594static void ath9k_ani_reset_old(struct ath_hw *ah, bool is_scanning) 468static void ath9k_ani_reset_old(struct ath_hw *ah, bool is_scanning)
595{ 469{
596 struct ar5416AniState *aniState; 470 struct ar5416AniState *aniState;
597 struct ath9k_channel *chan = ah->curchan; 471 struct ath9k_channel *chan = ah->curchan;
598 struct ath_common *common = ath9k_hw_common(ah); 472 struct ath_common *common = ath9k_hw_common(ah);
599 int index;
600 473
601 if (!DO_ANI(ah)) 474 if (!DO_ANI(ah))
602 return; 475 return;
603 476
604 index = ath9k_hw_get_ani_channel_idx(ah, chan); 477 aniState = &ah->curchan->ani;
605 aniState = &ah->ani[index];
606 ah->curani = aniState;
607 478
608 if (DO_ANI(ah) && ah->opmode != NL80211_IFTYPE_STATION 479 if (ah->opmode != NL80211_IFTYPE_STATION
609 && ah->opmode != NL80211_IFTYPE_ADHOC) { 480 && ah->opmode != NL80211_IFTYPE_ADHOC) {
610 ath_print(common, ATH_DBG_ANI, 481 ath_print(common, ATH_DBG_ANI,
611 "Reset ANI state opmode %u\n", ah->opmode); 482 "Reset ANI state opmode %u\n", ah->opmode);
@@ -634,17 +505,7 @@ static void ath9k_ani_reset_old(struct ath_hw *ah, bool is_scanning)
634 ath9k_hw_setrxfilter(ah, ath9k_hw_getrxfilter(ah) | 505 ath9k_hw_setrxfilter(ah, ath9k_hw_getrxfilter(ah) |
635 ATH9K_RX_FILTER_PHYERR); 506 ATH9K_RX_FILTER_PHYERR);
636 507
637 if (ah->opmode == NL80211_IFTYPE_AP) { 508 ath9k_ani_restart(ah);
638 ah->curani->ofdmTrigHigh =
639 ah->config.ofdm_trig_high;
640 ah->curani->ofdmTrigLow =
641 ah->config.ofdm_trig_low;
642 ah->curani->cckTrigHigh =
643 ah->config.cck_trig_high;
644 ah->curani->cckTrigLow =
645 ah->config.cck_trig_low;
646 }
647 ath9k_ani_restart_old(ah);
648 return; 509 return;
649 } 510 }
650 511
@@ -666,7 +527,7 @@ static void ath9k_ani_reset_old(struct ath_hw *ah, bool is_scanning)
666 527
667 ath9k_hw_setrxfilter(ah, ath9k_hw_getrxfilter(ah) & 528 ath9k_hw_setrxfilter(ah, ath9k_hw_getrxfilter(ah) &
668 ~ATH9K_RX_FILTER_PHYERR); 529 ~ATH9K_RX_FILTER_PHYERR);
669 ath9k_ani_restart_old(ah); 530 ath9k_ani_restart(ah);
670 531
671 ENABLE_REGWRITE_BUFFER(ah); 532 ENABLE_REGWRITE_BUFFER(ah);
672 533
@@ -674,7 +535,6 @@ static void ath9k_ani_reset_old(struct ath_hw *ah, bool is_scanning)
674 REG_WRITE(ah, AR_PHY_ERR_MASK_2, AR_PHY_ERR_CCK_TIMING); 535 REG_WRITE(ah, AR_PHY_ERR_MASK_2, AR_PHY_ERR_CCK_TIMING);
675 536
676 REGWRITE_BUFFER_FLUSH(ah); 537 REGWRITE_BUFFER_FLUSH(ah);
677 DISABLE_REGWRITE_BUFFER(ah);
678} 538}
679 539
680/* 540/*
@@ -682,15 +542,18 @@ static void ath9k_ani_reset_old(struct ath_hw *ah, bool is_scanning)
682 * This routine should be called for every hardware reset and for 542 * This routine should be called for every hardware reset and for
683 * every channel change. 543 * every channel change.
684 */ 544 */
685static void ath9k_ani_reset_new(struct ath_hw *ah, bool is_scanning) 545void ath9k_ani_reset(struct ath_hw *ah, bool is_scanning)
686{ 546{
687 struct ar5416AniState *aniState = ah->curani; 547 struct ar5416AniState *aniState = &ah->curchan->ani;
688 struct ath9k_channel *chan = ah->curchan; 548 struct ath9k_channel *chan = ah->curchan;
689 struct ath_common *common = ath9k_hw_common(ah); 549 struct ath_common *common = ath9k_hw_common(ah);
690 550
691 if (!DO_ANI(ah)) 551 if (!DO_ANI(ah))
692 return; 552 return;
693 553
554 if (!use_new_ani(ah))
555 return ath9k_ani_reset_old(ah, is_scanning);
556
694 BUG_ON(aniState == NULL); 557 BUG_ON(aniState == NULL);
695 ah->stats.ast_ani_reset++; 558 ah->stats.ast_ani_reset++;
696 559
@@ -760,7 +623,7 @@ static void ath9k_ani_reset_new(struct ath_hw *ah, bool is_scanning)
760 * enable phy counters if hw supports or if not, enable phy 623 * enable phy counters if hw supports or if not, enable phy
761 * interrupts (so we can count each one) 624 * interrupts (so we can count each one)
762 */ 625 */
763 ath9k_ani_restart_new(ah); 626 ath9k_ani_restart(ah);
764 627
765 ENABLE_REGWRITE_BUFFER(ah); 628 ENABLE_REGWRITE_BUFFER(ah);
766 629
@@ -768,28 +631,30 @@ static void ath9k_ani_reset_new(struct ath_hw *ah, bool is_scanning)
768 REG_WRITE(ah, AR_PHY_ERR_MASK_2, AR_PHY_ERR_CCK_TIMING); 631 REG_WRITE(ah, AR_PHY_ERR_MASK_2, AR_PHY_ERR_CCK_TIMING);
769 632
770 REGWRITE_BUFFER_FLUSH(ah); 633 REGWRITE_BUFFER_FLUSH(ah);
771 DISABLE_REGWRITE_BUFFER(ah);
772} 634}
773 635
774static void ath9k_hw_ani_monitor_old(struct ath_hw *ah, 636static bool ath9k_hw_ani_read_counters(struct ath_hw *ah)
775 struct ath9k_channel *chan)
776{ 637{
777 struct ar5416AniState *aniState;
778 struct ath_common *common = ath9k_hw_common(ah); 638 struct ath_common *common = ath9k_hw_common(ah);
779 int32_t listenTime; 639 struct ar5416AniState *aniState = &ah->curchan->ani;
780 u32 phyCnt1, phyCnt2; 640 u32 ofdm_base = 0;
641 u32 cck_base = 0;
781 u32 ofdmPhyErrCnt, cckPhyErrCnt; 642 u32 ofdmPhyErrCnt, cckPhyErrCnt;
643 u32 phyCnt1, phyCnt2;
644 int32_t listenTime;
782 645
783 if (!DO_ANI(ah)) 646 ath_hw_cycle_counters_update(common);
784 return; 647 listenTime = ath_hw_get_listen_time(common);
785
786 aniState = ah->curani;
787 648
788 listenTime = ath9k_hw_ani_get_listen_time(ah); 649 if (listenTime <= 0) {
789 if (listenTime < 0) {
790 ah->stats.ast_ani_lneg++; 650 ah->stats.ast_ani_lneg++;
791 ath9k_ani_restart_old(ah); 651 ath9k_ani_restart(ah);
792 return; 652 return false;
653 }
654
655 if (!use_new_ani(ah)) {
656 ofdm_base = AR_PHY_COUNTMAX - ah->config.ofdm_trig_high;
657 cck_base = AR_PHY_COUNTMAX - ah->config.cck_trig_high;
793 } 658 }
794 659
795 aniState->listenTime += listenTime; 660 aniState->listenTime += listenTime;
@@ -799,145 +664,55 @@ static void ath9k_hw_ani_monitor_old(struct ath_hw *ah,
799 phyCnt1 = REG_READ(ah, AR_PHY_ERR_1); 664 phyCnt1 = REG_READ(ah, AR_PHY_ERR_1);
800 phyCnt2 = REG_READ(ah, AR_PHY_ERR_2); 665 phyCnt2 = REG_READ(ah, AR_PHY_ERR_2);
801 666
802 if (phyCnt1 < aniState->ofdmPhyErrBase || 667 if (!use_new_ani(ah) && (phyCnt1 < ofdm_base || phyCnt2 < cck_base)) {
803 phyCnt2 < aniState->cckPhyErrBase) { 668 if (phyCnt1 < ofdm_base) {
804 if (phyCnt1 < aniState->ofdmPhyErrBase) {
805 ath_print(common, ATH_DBG_ANI, 669 ath_print(common, ATH_DBG_ANI,
806 "phyCnt1 0x%x, resetting " 670 "phyCnt1 0x%x, resetting "
807 "counter value to 0x%x\n", 671 "counter value to 0x%x\n",
808 phyCnt1, 672 phyCnt1, ofdm_base);
809 aniState->ofdmPhyErrBase); 673 REG_WRITE(ah, AR_PHY_ERR_1, ofdm_base);
810 REG_WRITE(ah, AR_PHY_ERR_1,
811 aniState->ofdmPhyErrBase);
812 REG_WRITE(ah, AR_PHY_ERR_MASK_1, 674 REG_WRITE(ah, AR_PHY_ERR_MASK_1,
813 AR_PHY_ERR_OFDM_TIMING); 675 AR_PHY_ERR_OFDM_TIMING);
814 } 676 }
815 if (phyCnt2 < aniState->cckPhyErrBase) { 677 if (phyCnt2 < cck_base) {
816 ath_print(common, ATH_DBG_ANI, 678 ath_print(common, ATH_DBG_ANI,
817 "phyCnt2 0x%x, resetting " 679 "phyCnt2 0x%x, resetting "
818 "counter value to 0x%x\n", 680 "counter value to 0x%x\n",
819 phyCnt2, 681 phyCnt2, cck_base);
820 aniState->cckPhyErrBase); 682 REG_WRITE(ah, AR_PHY_ERR_2, cck_base);
821 REG_WRITE(ah, AR_PHY_ERR_2,
822 aniState->cckPhyErrBase);
823 REG_WRITE(ah, AR_PHY_ERR_MASK_2, 683 REG_WRITE(ah, AR_PHY_ERR_MASK_2,
824 AR_PHY_ERR_CCK_TIMING); 684 AR_PHY_ERR_CCK_TIMING);
825 } 685 }
826 return; 686 return false;
827 } 687 }
828 688
829 ofdmPhyErrCnt = phyCnt1 - aniState->ofdmPhyErrBase; 689 ofdmPhyErrCnt = phyCnt1 - ofdm_base;
830 ah->stats.ast_ani_ofdmerrs += 690 ah->stats.ast_ani_ofdmerrs +=
831 ofdmPhyErrCnt - aniState->ofdmPhyErrCount; 691 ofdmPhyErrCnt - aniState->ofdmPhyErrCount;
832 aniState->ofdmPhyErrCount = ofdmPhyErrCnt; 692 aniState->ofdmPhyErrCount = ofdmPhyErrCnt;
833 693
834 cckPhyErrCnt = phyCnt2 - aniState->cckPhyErrBase; 694 cckPhyErrCnt = phyCnt2 - cck_base;
835 ah->stats.ast_ani_cckerrs += 695 ah->stats.ast_ani_cckerrs +=
836 cckPhyErrCnt - aniState->cckPhyErrCount; 696 cckPhyErrCnt - aniState->cckPhyErrCount;
837 aniState->cckPhyErrCount = cckPhyErrCnt; 697 aniState->cckPhyErrCount = cckPhyErrCnt;
838 698 return true;
839 if (aniState->listenTime > 5 * ah->aniperiod) {
840 if (aniState->ofdmPhyErrCount <= aniState->listenTime *
841 aniState->ofdmTrigLow / 1000 &&
842 aniState->cckPhyErrCount <= aniState->listenTime *
843 aniState->cckTrigLow / 1000)
844 ath9k_hw_ani_lower_immunity(ah);
845 ath9k_ani_restart_old(ah);
846 } else if (aniState->listenTime > ah->aniperiod) {
847 if (aniState->ofdmPhyErrCount > aniState->listenTime *
848 aniState->ofdmTrigHigh / 1000) {
849 ath9k_hw_ani_ofdm_err_trigger_old(ah);
850 ath9k_ani_restart_old(ah);
851 } else if (aniState->cckPhyErrCount >
852 aniState->listenTime * aniState->cckTrigHigh /
853 1000) {
854 ath9k_hw_ani_cck_err_trigger_old(ah);
855 ath9k_ani_restart_old(ah);
856 }
857 }
858} 699}
859 700
860static void ath9k_hw_ani_monitor_new(struct ath_hw *ah, 701void ath9k_hw_ani_monitor(struct ath_hw *ah, struct ath9k_channel *chan)
861 struct ath9k_channel *chan)
862{ 702{
863 struct ar5416AniState *aniState; 703 struct ar5416AniState *aniState;
864 struct ath_common *common = ath9k_hw_common(ah); 704 struct ath_common *common = ath9k_hw_common(ah);
865 int32_t listenTime;
866 u32 phyCnt1, phyCnt2;
867 u32 ofdmPhyErrCnt, cckPhyErrCnt;
868 u32 ofdmPhyErrRate, cckPhyErrRate; 705 u32 ofdmPhyErrRate, cckPhyErrRate;
869 706
870 if (!DO_ANI(ah)) 707 if (!DO_ANI(ah))
871 return; 708 return;
872 709
873 aniState = ah->curani; 710 aniState = &ah->curchan->ani;
874 if (WARN_ON(!aniState)) 711 if (WARN_ON(!aniState))
875 return; 712 return;
876 713
877 listenTime = ath9k_hw_ani_get_listen_time(ah); 714 if (!ath9k_hw_ani_read_counters(ah))
878 if (listenTime <= 0) {
879 ah->stats.ast_ani_lneg++;
880 /* restart ANI period if listenTime is invalid */
881 ath_print(common, ATH_DBG_ANI,
882 "listenTime=%d - on new ani monitor\n",
883 listenTime);
884 ath9k_ani_restart_new(ah);
885 return;
886 }
887
888 aniState->listenTime += listenTime;
889
890 ath9k_hw_update_mibstats(ah, &ah->ah_mibStats);
891
892 phyCnt1 = REG_READ(ah, AR_PHY_ERR_1);
893 phyCnt2 = REG_READ(ah, AR_PHY_ERR_2);
894
895 if (phyCnt1 < aniState->ofdmPhyErrBase ||
896 phyCnt2 < aniState->cckPhyErrBase) {
897 if (phyCnt1 < aniState->ofdmPhyErrBase) {
898 ath_print(common, ATH_DBG_ANI,
899 "phyCnt1 0x%x, resetting "
900 "counter value to 0x%x\n",
901 phyCnt1,
902 aniState->ofdmPhyErrBase);
903 REG_WRITE(ah, AR_PHY_ERR_1,
904 aniState->ofdmPhyErrBase);
905 REG_WRITE(ah, AR_PHY_ERR_MASK_1,
906 AR_PHY_ERR_OFDM_TIMING);
907 }
908 if (phyCnt2 < aniState->cckPhyErrBase) {
909 ath_print(common, ATH_DBG_ANI,
910 "phyCnt2 0x%x, resetting "
911 "counter value to 0x%x\n",
912 phyCnt2,
913 aniState->cckPhyErrBase);
914 REG_WRITE(ah, AR_PHY_ERR_2,
915 aniState->cckPhyErrBase);
916 REG_WRITE(ah, AR_PHY_ERR_MASK_2,
917 AR_PHY_ERR_CCK_TIMING);
918 }
919 return; 715 return;
920 }
921
922 ofdmPhyErrCnt = phyCnt1 - aniState->ofdmPhyErrBase;
923 ah->stats.ast_ani_ofdmerrs +=
924 ofdmPhyErrCnt - aniState->ofdmPhyErrCount;
925 aniState->ofdmPhyErrCount = ofdmPhyErrCnt;
926
927 cckPhyErrCnt = phyCnt2 - aniState->cckPhyErrBase;
928 ah->stats.ast_ani_cckerrs +=
929 cckPhyErrCnt - aniState->cckPhyErrCount;
930 aniState->cckPhyErrCount = cckPhyErrCnt;
931
932 ath_print(common, ATH_DBG_ANI,
933 "Errors: OFDM=0x%08x-0x%08x=%d "
934 "CCK=0x%08x-0x%08x=%d\n",
935 phyCnt1,
936 aniState->ofdmPhyErrBase,
937 ofdmPhyErrCnt,
938 phyCnt2,
939 aniState->cckPhyErrBase,
940 cckPhyErrCnt);
941 716
942 ofdmPhyErrRate = aniState->ofdmPhyErrCount * 1000 / 717 ofdmPhyErrRate = aniState->ofdmPhyErrCount * 1000 /
943 aniState->listenTime; 718 aniState->listenTime;
@@ -947,61 +722,34 @@ static void ath9k_hw_ani_monitor_new(struct ath_hw *ah,
947 ath_print(common, ATH_DBG_ANI, 722 ath_print(common, ATH_DBG_ANI,
948 "listenTime=%d OFDM:%d errs=%d/s CCK:%d " 723 "listenTime=%d OFDM:%d errs=%d/s CCK:%d "
949 "errs=%d/s ofdm_turn=%d\n", 724 "errs=%d/s ofdm_turn=%d\n",
950 listenTime, aniState->ofdmNoiseImmunityLevel, 725 aniState->listenTime,
726 aniState->ofdmNoiseImmunityLevel,
951 ofdmPhyErrRate, aniState->cckNoiseImmunityLevel, 727 ofdmPhyErrRate, aniState->cckNoiseImmunityLevel,
952 cckPhyErrRate, aniState->ofdmsTurn); 728 cckPhyErrRate, aniState->ofdmsTurn);
953 729
954 if (aniState->listenTime > 5 * ah->aniperiod) { 730 if (aniState->listenTime > 5 * ah->aniperiod) {
955 if (ofdmPhyErrRate <= aniState->ofdmTrigLow && 731 if (ofdmPhyErrRate <= ah->config.ofdm_trig_low &&
956 cckPhyErrRate <= aniState->cckTrigLow) { 732 cckPhyErrRate <= ah->config.cck_trig_low) {
957 ath_print(common, ATH_DBG_ANI,
958 "1. listenTime=%d OFDM:%d errs=%d/s(<%d) "
959 "CCK:%d errs=%d/s(<%d) -> "
960 "ath9k_hw_ani_lower_immunity()\n",
961 aniState->listenTime,
962 aniState->ofdmNoiseImmunityLevel,
963 ofdmPhyErrRate,
964 aniState->ofdmTrigLow,
965 aniState->cckNoiseImmunityLevel,
966 cckPhyErrRate,
967 aniState->cckTrigLow);
968 ath9k_hw_ani_lower_immunity(ah); 733 ath9k_hw_ani_lower_immunity(ah);
969 aniState->ofdmsTurn = !aniState->ofdmsTurn; 734 aniState->ofdmsTurn = !aniState->ofdmsTurn;
970 } 735 }
971 ath_print(common, ATH_DBG_ANI, 736 ath9k_ani_restart(ah);
972 "1 listenTime=%d ofdm=%d/s cck=%d/s - "
973 "calling ath9k_ani_restart_new()\n",
974 aniState->listenTime, ofdmPhyErrRate, cckPhyErrRate);
975 ath9k_ani_restart_new(ah);
976 } else if (aniState->listenTime > ah->aniperiod) { 737 } else if (aniState->listenTime > ah->aniperiod) {
977 /* check to see if need to raise immunity */ 738 /* check to see if need to raise immunity */
978 if (ofdmPhyErrRate > aniState->ofdmTrigHigh && 739 if (ofdmPhyErrRate > ah->config.ofdm_trig_high &&
979 (cckPhyErrRate <= aniState->cckTrigHigh || 740 (cckPhyErrRate <= ah->config.cck_trig_high ||
980 aniState->ofdmsTurn)) { 741 aniState->ofdmsTurn)) {
981 ath_print(common, ATH_DBG_ANI, 742 ath9k_hw_ani_ofdm_err_trigger(ah);
982 "2 listenTime=%d OFDM:%d errs=%d/s(>%d) -> " 743 ath9k_ani_restart(ah);
983 "ath9k_hw_ani_ofdm_err_trigger_new()\n",
984 aniState->listenTime,
985 aniState->ofdmNoiseImmunityLevel,
986 ofdmPhyErrRate,
987 aniState->ofdmTrigHigh);
988 ath9k_hw_ani_ofdm_err_trigger_new(ah);
989 ath9k_ani_restart_new(ah);
990 aniState->ofdmsTurn = false; 744 aniState->ofdmsTurn = false;
991 } else if (cckPhyErrRate > aniState->cckTrigHigh) { 745 } else if (cckPhyErrRate > ah->config.cck_trig_high) {
992 ath_print(common, ATH_DBG_ANI, 746 ath9k_hw_ani_cck_err_trigger(ah);
993 "3 listenTime=%d CCK:%d errs=%d/s(>%d) -> " 747 ath9k_ani_restart(ah);
994 "ath9k_hw_ani_cck_err_trigger_new()\n",
995 aniState->listenTime,
996 aniState->cckNoiseImmunityLevel,
997 cckPhyErrRate,
998 aniState->cckTrigHigh);
999 ath9k_hw_ani_cck_err_trigger_new(ah);
1000 ath9k_ani_restart_new(ah);
1001 aniState->ofdmsTurn = true; 748 aniState->ofdmsTurn = true;
1002 } 749 }
1003 } 750 }
1004} 751}
752EXPORT_SYMBOL(ath9k_hw_ani_monitor);
1005 753
1006void ath9k_enable_mib_counters(struct ath_hw *ah) 754void ath9k_enable_mib_counters(struct ath_hw *ah)
1007{ 755{
@@ -1022,7 +770,6 @@ void ath9k_enable_mib_counters(struct ath_hw *ah)
1022 REG_WRITE(ah, AR_PHY_ERR_MASK_2, AR_PHY_ERR_CCK_TIMING); 770 REG_WRITE(ah, AR_PHY_ERR_MASK_2, AR_PHY_ERR_CCK_TIMING);
1023 771
1024 REGWRITE_BUFFER_FLUSH(ah); 772 REGWRITE_BUFFER_FLUSH(ah);
1025 DISABLE_REGWRITE_BUFFER(ah);
1026} 773}
1027 774
1028/* Freeze the MIB counters, get the stats and then clear them */ 775/* Freeze the MIB counters, get the stats and then clear them */
@@ -1040,53 +787,12 @@ void ath9k_hw_disable_mib_counters(struct ath_hw *ah)
1040} 787}
1041EXPORT_SYMBOL(ath9k_hw_disable_mib_counters); 788EXPORT_SYMBOL(ath9k_hw_disable_mib_counters);
1042 789
1043u32 ath9k_hw_GetMibCycleCountsPct(struct ath_hw *ah,
1044 u32 *rxc_pcnt,
1045 u32 *rxf_pcnt,
1046 u32 *txf_pcnt)
1047{
1048 struct ath_common *common = ath9k_hw_common(ah);
1049 static u32 cycles, rx_clear, rx_frame, tx_frame;
1050 u32 good = 1;
1051
1052 u32 rc = REG_READ(ah, AR_RCCNT);
1053 u32 rf = REG_READ(ah, AR_RFCNT);
1054 u32 tf = REG_READ(ah, AR_TFCNT);
1055 u32 cc = REG_READ(ah, AR_CCCNT);
1056
1057 if (cycles == 0 || cycles > cc) {
1058 ath_print(common, ATH_DBG_ANI,
1059 "cycle counter wrap. ExtBusy = 0\n");
1060 good = 0;
1061 } else {
1062 u32 cc_d = cc - cycles;
1063 u32 rc_d = rc - rx_clear;
1064 u32 rf_d = rf - rx_frame;
1065 u32 tf_d = tf - tx_frame;
1066
1067 if (cc_d != 0) {
1068 *rxc_pcnt = rc_d * 100 / cc_d;
1069 *rxf_pcnt = rf_d * 100 / cc_d;
1070 *txf_pcnt = tf_d * 100 / cc_d;
1071 } else {
1072 good = 0;
1073 }
1074 }
1075
1076 cycles = cc;
1077 rx_frame = rf;
1078 rx_clear = rc;
1079 tx_frame = tf;
1080
1081 return good;
1082}
1083
1084/* 790/*
1085 * Process a MIB interrupt. We may potentially be invoked because 791 * Process a MIB interrupt. We may potentially be invoked because
1086 * any of the MIB counters overflow/trigger so don't assume we're 792 * any of the MIB counters overflow/trigger so don't assume we're
1087 * here because a PHY error counter triggered. 793 * here because a PHY error counter triggered.
1088 */ 794 */
1089static void ath9k_hw_proc_mib_event_old(struct ath_hw *ah) 795void ath9k_hw_proc_mib_event(struct ath_hw *ah)
1090{ 796{
1091 u32 phyCnt1, phyCnt2; 797 u32 phyCnt1, phyCnt2;
1092 798
@@ -1114,72 +820,15 @@ static void ath9k_hw_proc_mib_event_old(struct ath_hw *ah)
1114 phyCnt2 = REG_READ(ah, AR_PHY_ERR_2); 820 phyCnt2 = REG_READ(ah, AR_PHY_ERR_2);
1115 if (((phyCnt1 & AR_MIBCNT_INTRMASK) == AR_MIBCNT_INTRMASK) || 821 if (((phyCnt1 & AR_MIBCNT_INTRMASK) == AR_MIBCNT_INTRMASK) ||
1116 ((phyCnt2 & AR_MIBCNT_INTRMASK) == AR_MIBCNT_INTRMASK)) { 822 ((phyCnt2 & AR_MIBCNT_INTRMASK) == AR_MIBCNT_INTRMASK)) {
1117 struct ar5416AniState *aniState = ah->curani;
1118 u32 ofdmPhyErrCnt, cckPhyErrCnt;
1119
1120 /* NB: only use ast_ani_*errs with AH_PRIVATE_DIAG */
1121 ofdmPhyErrCnt = phyCnt1 - aniState->ofdmPhyErrBase;
1122 ah->stats.ast_ani_ofdmerrs +=
1123 ofdmPhyErrCnt - aniState->ofdmPhyErrCount;
1124 aniState->ofdmPhyErrCount = ofdmPhyErrCnt;
1125 823
1126 cckPhyErrCnt = phyCnt2 - aniState->cckPhyErrBase; 824 if (!use_new_ani(ah))
1127 ah->stats.ast_ani_cckerrs += 825 ath9k_hw_ani_read_counters(ah);
1128 cckPhyErrCnt - aniState->cckPhyErrCount;
1129 aniState->cckPhyErrCount = cckPhyErrCnt;
1130 826
1131 /*
1132 * NB: figure out which counter triggered. If both
1133 * trigger we'll only deal with one as the processing
1134 * clobbers the error counter so the trigger threshold
1135 * check will never be true.
1136 */
1137 if (aniState->ofdmPhyErrCount > aniState->ofdmTrigHigh)
1138 ath9k_hw_ani_ofdm_err_trigger_new(ah);
1139 if (aniState->cckPhyErrCount > aniState->cckTrigHigh)
1140 ath9k_hw_ani_cck_err_trigger_old(ah);
1141 /* NB: always restart to insure the h/w counters are reset */ 827 /* NB: always restart to insure the h/w counters are reset */
1142 ath9k_ani_restart_old(ah); 828 ath9k_ani_restart(ah);
1143 }
1144}
1145
1146/*
1147 * Process a MIB interrupt. We may potentially be invoked because
1148 * any of the MIB counters overflow/trigger so don't assume we're
1149 * here because a PHY error counter triggered.
1150 */
1151static void ath9k_hw_proc_mib_event_new(struct ath_hw *ah)
1152{
1153 u32 phyCnt1, phyCnt2;
1154
1155 /* Reset these counters regardless */
1156 REG_WRITE(ah, AR_FILT_OFDM, 0);
1157 REG_WRITE(ah, AR_FILT_CCK, 0);
1158 if (!(REG_READ(ah, AR_SLP_MIB_CTRL) & AR_SLP_MIB_PENDING))
1159 REG_WRITE(ah, AR_SLP_MIB_CTRL, AR_SLP_MIB_CLEAR);
1160
1161 /* Clear the mib counters and save them in the stats */
1162 ath9k_hw_update_mibstats(ah, &ah->ah_mibStats);
1163
1164 if (!DO_ANI(ah)) {
1165 /*
1166 * We must always clear the interrupt cause by
1167 * resetting the phy error regs.
1168 */
1169 REG_WRITE(ah, AR_PHY_ERR_1, 0);
1170 REG_WRITE(ah, AR_PHY_ERR_2, 0);
1171 return;
1172 } 829 }
1173
1174 /* NB: these are not reset-on-read */
1175 phyCnt1 = REG_READ(ah, AR_PHY_ERR_1);
1176 phyCnt2 = REG_READ(ah, AR_PHY_ERR_2);
1177
1178 /* NB: always restart to insure the h/w counters are reset */
1179 if (((phyCnt1 & AR_MIBCNT_INTRMASK) == AR_MIBCNT_INTRMASK) ||
1180 ((phyCnt2 & AR_MIBCNT_INTRMASK) == AR_MIBCNT_INTRMASK))
1181 ath9k_ani_restart_new(ah);
1182} 830}
831EXPORT_SYMBOL(ath9k_hw_proc_mib_event);
1183 832
1184void ath9k_hw_ani_setup(struct ath_hw *ah) 833void ath9k_hw_ani_setup(struct ath_hw *ah)
1185{ 834{
@@ -1205,61 +854,58 @@ void ath9k_hw_ani_init(struct ath_hw *ah)
1205 854
1206 ath_print(common, ATH_DBG_ANI, "Initialize ANI\n"); 855 ath_print(common, ATH_DBG_ANI, "Initialize ANI\n");
1207 856
1208 memset(ah->ani, 0, sizeof(ah->ani)); 857 if (use_new_ani(ah)) {
1209 for (i = 0; i < ARRAY_SIZE(ah->ani); i++) { 858 ah->config.ofdm_trig_high = ATH9K_ANI_OFDM_TRIG_HIGH_NEW;
1210 if (AR_SREV_9300_20_OR_LATER(ah) || modparam_force_new_ani) { 859 ah->config.ofdm_trig_low = ATH9K_ANI_OFDM_TRIG_LOW_NEW;
1211 ah->ani[i].ofdmTrigHigh = ATH9K_ANI_OFDM_TRIG_HIGH_NEW;
1212 ah->ani[i].ofdmTrigLow = ATH9K_ANI_OFDM_TRIG_LOW_NEW;
1213 860
1214 ah->ani[i].cckTrigHigh = ATH9K_ANI_CCK_TRIG_HIGH_NEW; 861 ah->config.cck_trig_high = ATH9K_ANI_CCK_TRIG_HIGH_NEW;
1215 ah->ani[i].cckTrigLow = ATH9K_ANI_CCK_TRIG_LOW_NEW; 862 ah->config.cck_trig_low = ATH9K_ANI_CCK_TRIG_LOW_NEW;
863 } else {
864 ah->config.ofdm_trig_high = ATH9K_ANI_OFDM_TRIG_HIGH_OLD;
865 ah->config.ofdm_trig_low = ATH9K_ANI_OFDM_TRIG_LOW_OLD;
1216 866
1217 ah->ani[i].spurImmunityLevel = 867 ah->config.cck_trig_high = ATH9K_ANI_CCK_TRIG_HIGH_OLD;
1218 ATH9K_ANI_SPUR_IMMUNE_LVL_NEW; 868 ah->config.cck_trig_low = ATH9K_ANI_CCK_TRIG_LOW_OLD;
869 }
1219 870
1220 ah->ani[i].firstepLevel = ATH9K_ANI_FIRSTEP_LVL_NEW; 871 for (i = 0; i < ARRAY_SIZE(ah->channels); i++) {
872 struct ath9k_channel *chan = &ah->channels[i];
873 struct ar5416AniState *ani = &chan->ani;
1221 874
1222 ah->ani[i].ofdmPhyErrBase = 0; 875 if (use_new_ani(ah)) {
1223 ah->ani[i].cckPhyErrBase = 0; 876 ani->spurImmunityLevel =
877 ATH9K_ANI_SPUR_IMMUNE_LVL_NEW;
878
879 ani->firstepLevel = ATH9K_ANI_FIRSTEP_LVL_NEW;
1224 880
1225 if (AR_SREV_9300_20_OR_LATER(ah)) 881 if (AR_SREV_9300_20_OR_LATER(ah))
1226 ah->ani[i].mrcCCKOff = 882 ani->mrcCCKOff =
1227 !ATH9K_ANI_ENABLE_MRC_CCK; 883 !ATH9K_ANI_ENABLE_MRC_CCK;
1228 else 884 else
1229 ah->ani[i].mrcCCKOff = true; 885 ani->mrcCCKOff = true;
1230 886
1231 ah->ani[i].ofdmsTurn = true; 887 ani->ofdmsTurn = true;
1232 } else { 888 } else {
1233 ah->ani[i].ofdmTrigHigh = ATH9K_ANI_OFDM_TRIG_HIGH_OLD; 889 ani->spurImmunityLevel =
1234 ah->ani[i].ofdmTrigLow = ATH9K_ANI_OFDM_TRIG_LOW_OLD;
1235
1236 ah->ani[i].cckTrigHigh = ATH9K_ANI_CCK_TRIG_HIGH_OLD;
1237 ah->ani[i].cckTrigLow = ATH9K_ANI_CCK_TRIG_LOW_OLD;
1238
1239 ah->ani[i].spurImmunityLevel =
1240 ATH9K_ANI_SPUR_IMMUNE_LVL_OLD; 890 ATH9K_ANI_SPUR_IMMUNE_LVL_OLD;
1241 ah->ani[i].firstepLevel = ATH9K_ANI_FIRSTEP_LVL_OLD; 891 ani->firstepLevel = ATH9K_ANI_FIRSTEP_LVL_OLD;
1242 892
1243 ah->ani[i].ofdmPhyErrBase = 893 ani->cckWeakSigThreshold =
1244 AR_PHY_COUNTMAX - ATH9K_ANI_OFDM_TRIG_HIGH_OLD;
1245 ah->ani[i].cckPhyErrBase =
1246 AR_PHY_COUNTMAX - ATH9K_ANI_CCK_TRIG_HIGH_OLD;
1247 ah->ani[i].cckWeakSigThreshold =
1248 ATH9K_ANI_CCK_WEAK_SIG_THR; 894 ATH9K_ANI_CCK_WEAK_SIG_THR;
1249 } 895 }
1250 896
1251 ah->ani[i].rssiThrHigh = ATH9K_ANI_RSSI_THR_HIGH; 897 ani->rssiThrHigh = ATH9K_ANI_RSSI_THR_HIGH;
1252 ah->ani[i].rssiThrLow = ATH9K_ANI_RSSI_THR_LOW; 898 ani->rssiThrLow = ATH9K_ANI_RSSI_THR_LOW;
1253 ah->ani[i].ofdmWeakSigDetectOff = 899 ani->ofdmWeakSigDetectOff =
1254 !ATH9K_ANI_USE_OFDM_WEAK_SIG; 900 !ATH9K_ANI_USE_OFDM_WEAK_SIG;
1255 ah->ani[i].cckNoiseImmunityLevel = ATH9K_ANI_CCK_DEF_LEVEL; 901 ani->cckNoiseImmunityLevel = ATH9K_ANI_CCK_DEF_LEVEL;
1256 } 902 }
1257 903
1258 /* 904 /*
1259 * since we expect some ongoing maintenance on the tables, let's sanity 905 * since we expect some ongoing maintenance on the tables, let's sanity
1260 * check here default level should not modify INI setting. 906 * check here default level should not modify INI setting.
1261 */ 907 */
1262 if (AR_SREV_9300_20_OR_LATER(ah) || modparam_force_new_ani) { 908 if (use_new_ani(ah)) {
1263 const struct ani_ofdm_level_entry *entry_ofdm; 909 const struct ani_ofdm_level_entry *entry_ofdm;
1264 const struct ani_cck_level_entry *entry_cck; 910 const struct ani_cck_level_entry *entry_cck;
1265 911
@@ -1273,50 +919,9 @@ void ath9k_hw_ani_init(struct ath_hw *ah)
1273 ah->config.ani_poll_interval = ATH9K_ANI_POLLINTERVAL_OLD; 919 ah->config.ani_poll_interval = ATH9K_ANI_POLLINTERVAL_OLD;
1274 } 920 }
1275 921
1276 ath_print(common, ATH_DBG_ANI,
1277 "Setting OfdmErrBase = 0x%08x\n",
1278 ah->ani[0].ofdmPhyErrBase);
1279 ath_print(common, ATH_DBG_ANI, "Setting cckErrBase = 0x%08x\n",
1280 ah->ani[0].cckPhyErrBase);
1281
1282 ENABLE_REGWRITE_BUFFER(ah);
1283
1284 REG_WRITE(ah, AR_PHY_ERR_1, ah->ani[0].ofdmPhyErrBase);
1285 REG_WRITE(ah, AR_PHY_ERR_2, ah->ani[0].cckPhyErrBase);
1286
1287 REGWRITE_BUFFER_FLUSH(ah);
1288 DISABLE_REGWRITE_BUFFER(ah);
1289
1290 ath9k_enable_mib_counters(ah);
1291
1292 if (ah->config.enable_ani) 922 if (ah->config.enable_ani)
1293 ah->proc_phyerr |= HAL_PROCESS_ANI; 923 ah->proc_phyerr |= HAL_PROCESS_ANI;
1294}
1295
1296void ath9k_hw_attach_ani_ops_old(struct ath_hw *ah)
1297{
1298 struct ath_hw_private_ops *priv_ops = ath9k_hw_private_ops(ah);
1299 struct ath_hw_ops *ops = ath9k_hw_ops(ah);
1300
1301 priv_ops->ani_reset = ath9k_ani_reset_old;
1302 priv_ops->ani_lower_immunity = ath9k_hw_ani_lower_immunity_old;
1303
1304 ops->ani_proc_mib_event = ath9k_hw_proc_mib_event_old;
1305 ops->ani_monitor = ath9k_hw_ani_monitor_old;
1306 924
1307 ath_print(ath9k_hw_common(ah), ATH_DBG_ANY, "Using ANI v1\n"); 925 ath9k_ani_restart(ah);
1308} 926 ath9k_enable_mib_counters(ah);
1309
1310void ath9k_hw_attach_ani_ops_new(struct ath_hw *ah)
1311{
1312 struct ath_hw_private_ops *priv_ops = ath9k_hw_private_ops(ah);
1313 struct ath_hw_ops *ops = ath9k_hw_ops(ah);
1314
1315 priv_ops->ani_reset = ath9k_ani_reset_new;
1316 priv_ops->ani_lower_immunity = ath9k_hw_ani_lower_immunity_new;
1317
1318 ops->ani_proc_mib_event = ath9k_hw_proc_mib_event_new;
1319 ops->ani_monitor = ath9k_hw_ani_monitor_new;
1320
1321 ath_print(ath9k_hw_common(ah), ATH_DBG_ANY, "Using ANI v2\n");
1322} 927}
diff --git a/drivers/net/wireless/ath/ath9k/ani.h b/drivers/net/wireless/ath/ath9k/ani.h
index f4d0a4d48b37..0cd6783de883 100644
--- a/drivers/net/wireless/ath/ath9k/ani.h
+++ b/drivers/net/wireless/ath/ath9k/ani.h
@@ -19,7 +19,7 @@
19 19
20#define HAL_PROCESS_ANI 0x00000001 20#define HAL_PROCESS_ANI 0x00000001
21 21
22#define DO_ANI(ah) (((ah)->proc_phyerr & HAL_PROCESS_ANI)) 22#define DO_ANI(ah) (((ah)->proc_phyerr & HAL_PROCESS_ANI) && ah->curchan)
23 23
24#define BEACON_RSSI(ahp) (ahp->stats.avgbrssi) 24#define BEACON_RSSI(ahp) (ahp->stats.avgbrssi)
25 25
@@ -123,20 +123,11 @@ struct ar5416AniState {
123 u8 ofdmWeakSigDetectOff; 123 u8 ofdmWeakSigDetectOff;
124 u8 cckWeakSigThreshold; 124 u8 cckWeakSigThreshold;
125 u32 listenTime; 125 u32 listenTime;
126 u32 ofdmTrigHigh;
127 u32 ofdmTrigLow;
128 int32_t cckTrigHigh;
129 int32_t cckTrigLow;
130 int32_t rssiThrLow; 126 int32_t rssiThrLow;
131 int32_t rssiThrHigh; 127 int32_t rssiThrHigh;
132 u32 noiseFloor; 128 u32 noiseFloor;
133 u32 txFrameCount;
134 u32 rxFrameCount;
135 u32 cycleCount;
136 u32 ofdmPhyErrCount; 129 u32 ofdmPhyErrCount;
137 u32 cckPhyErrCount; 130 u32 cckPhyErrCount;
138 u32 ofdmPhyErrBase;
139 u32 cckPhyErrBase;
140 int16_t pktRssi[2]; 131 int16_t pktRssi[2];
141 int16_t ofdmErrRssi[2]; 132 int16_t ofdmErrRssi[2];
142 int16_t cckErrRssi[2]; 133 int16_t cckErrRssi[2];
@@ -166,8 +157,6 @@ struct ar5416Stats {
166 157
167void ath9k_enable_mib_counters(struct ath_hw *ah); 158void ath9k_enable_mib_counters(struct ath_hw *ah);
168void ath9k_hw_disable_mib_counters(struct ath_hw *ah); 159void ath9k_hw_disable_mib_counters(struct ath_hw *ah);
169u32 ath9k_hw_GetMibCycleCountsPct(struct ath_hw *ah, u32 *rxc_pcnt,
170 u32 *rxf_pcnt, u32 *txf_pcnt);
171void ath9k_hw_ani_setup(struct ath_hw *ah); 160void ath9k_hw_ani_setup(struct ath_hw *ah);
172void ath9k_hw_ani_init(struct ath_hw *ah); 161void ath9k_hw_ani_init(struct ath_hw *ah);
173int ath9k_hw_get_ani_channel_idx(struct ath_hw *ah, 162int ath9k_hw_get_ani_channel_idx(struct ath_hw *ah,
diff --git a/drivers/net/wireless/ath/ath9k/ar5008_phy.c b/drivers/net/wireless/ath/ath9k/ar5008_phy.c
index 3d2c8679bc85..ea9f4497f58c 100644
--- a/drivers/net/wireless/ath/ath9k/ar5008_phy.c
+++ b/drivers/net/wireless/ath/ath9k/ar5008_phy.c
@@ -118,7 +118,7 @@ static void ar5008_hw_force_bias(struct ath_hw *ah, u16 synth_freq)
118 if (!AR_SREV_5416(ah) || synth_freq >= 3000) 118 if (!AR_SREV_5416(ah) || synth_freq >= 3000)
119 return; 119 return;
120 120
121 BUG_ON(AR_SREV_9280_10_OR_LATER(ah)); 121 BUG_ON(AR_SREV_9280_20_OR_LATER(ah));
122 122
123 if (synth_freq < 2412) 123 if (synth_freq < 2412)
124 new_bias = 0; 124 new_bias = 0;
@@ -454,7 +454,7 @@ static int ar5008_hw_rf_alloc_ext_banks(struct ath_hw *ah)
454 454
455 struct ath_common *common = ath9k_hw_common(ah); 455 struct ath_common *common = ath9k_hw_common(ah);
456 456
457 BUG_ON(AR_SREV_9280_10_OR_LATER(ah)); 457 BUG_ON(AR_SREV_9280_20_OR_LATER(ah));
458 458
459 ATH_ALLOC_BANK(ah->analogBank0Data, ah->iniBank0.ia_rows); 459 ATH_ALLOC_BANK(ah->analogBank0Data, ah->iniBank0.ia_rows);
460 ATH_ALLOC_BANK(ah->analogBank1Data, ah->iniBank1.ia_rows); 460 ATH_ALLOC_BANK(ah->analogBank1Data, ah->iniBank1.ia_rows);
@@ -484,7 +484,7 @@ static void ar5008_hw_rf_free_ext_banks(struct ath_hw *ah)
484 bank = NULL; \ 484 bank = NULL; \
485 } while (0); 485 } while (0);
486 486
487 BUG_ON(AR_SREV_9280_10_OR_LATER(ah)); 487 BUG_ON(AR_SREV_9280_20_OR_LATER(ah));
488 488
489 ATH_FREE_BANK(ah->analogBank0Data); 489 ATH_FREE_BANK(ah->analogBank0Data);
490 ATH_FREE_BANK(ah->analogBank1Data); 490 ATH_FREE_BANK(ah->analogBank1Data);
@@ -525,7 +525,7 @@ static bool ar5008_hw_set_rf_regs(struct ath_hw *ah,
525 * for single chip devices, that is AR9280 or anything 525 * for single chip devices, that is AR9280 or anything
526 * after that. 526 * after that.
527 */ 527 */
528 if (AR_SREV_9280_10_OR_LATER(ah)) 528 if (AR_SREV_9280_20_OR_LATER(ah))
529 return true; 529 return true;
530 530
531 /* Setup rf parameters */ 531 /* Setup rf parameters */
@@ -613,14 +613,11 @@ static void ar5008_hw_init_chain_masks(struct ath_hw *ah)
613 rx_chainmask = ah->rxchainmask; 613 rx_chainmask = ah->rxchainmask;
614 tx_chainmask = ah->txchainmask; 614 tx_chainmask = ah->txchainmask;
615 615
616 ENABLE_REGWRITE_BUFFER(ah);
617 616
618 switch (rx_chainmask) { 617 switch (rx_chainmask) {
619 case 0x5: 618 case 0x5:
620 DISABLE_REGWRITE_BUFFER(ah);
621 REG_SET_BIT(ah, AR_PHY_ANALOG_SWAP, 619 REG_SET_BIT(ah, AR_PHY_ANALOG_SWAP,
622 AR_PHY_SWAP_ALT_CHAIN); 620 AR_PHY_SWAP_ALT_CHAIN);
623 ENABLE_REGWRITE_BUFFER(ah);
624 case 0x3: 621 case 0x3:
625 if (ah->hw_version.macVersion == AR_SREV_REVISION_5416_10) { 622 if (ah->hw_version.macVersion == AR_SREV_REVISION_5416_10) {
626 REG_WRITE(ah, AR_PHY_RX_CHAINMASK, 0x7); 623 REG_WRITE(ah, AR_PHY_RX_CHAINMASK, 0x7);
@@ -630,17 +627,18 @@ static void ar5008_hw_init_chain_masks(struct ath_hw *ah)
630 case 0x1: 627 case 0x1:
631 case 0x2: 628 case 0x2:
632 case 0x7: 629 case 0x7:
630 ENABLE_REGWRITE_BUFFER(ah);
633 REG_WRITE(ah, AR_PHY_RX_CHAINMASK, rx_chainmask); 631 REG_WRITE(ah, AR_PHY_RX_CHAINMASK, rx_chainmask);
634 REG_WRITE(ah, AR_PHY_CAL_CHAINMASK, rx_chainmask); 632 REG_WRITE(ah, AR_PHY_CAL_CHAINMASK, rx_chainmask);
635 break; 633 break;
636 default: 634 default:
635 ENABLE_REGWRITE_BUFFER(ah);
637 break; 636 break;
638 } 637 }
639 638
640 REG_WRITE(ah, AR_SELFGEN_MASK, tx_chainmask); 639 REG_WRITE(ah, AR_SELFGEN_MASK, tx_chainmask);
641 640
642 REGWRITE_BUFFER_FLUSH(ah); 641 REGWRITE_BUFFER_FLUSH(ah);
643 DISABLE_REGWRITE_BUFFER(ah);
644 642
645 if (tx_chainmask == 0x5) { 643 if (tx_chainmask == 0x5) {
646 REG_SET_BIT(ah, AR_PHY_ANALOG_SWAP, 644 REG_SET_BIT(ah, AR_PHY_ANALOG_SWAP,
@@ -663,20 +661,20 @@ static void ar5008_hw_override_ini(struct ath_hw *ah,
663 */ 661 */
664 REG_SET_BIT(ah, AR_DIAG_SW, (AR_DIAG_RX_DIS | AR_DIAG_RX_ABORT)); 662 REG_SET_BIT(ah, AR_DIAG_SW, (AR_DIAG_RX_DIS | AR_DIAG_RX_ABORT));
665 663
666 if (AR_SREV_9280_10_OR_LATER(ah)) { 664 if (AR_SREV_9280_20_OR_LATER(ah)) {
667 val = REG_READ(ah, AR_PCU_MISC_MODE2); 665 val = REG_READ(ah, AR_PCU_MISC_MODE2);
668 666
669 if (!AR_SREV_9271(ah)) 667 if (!AR_SREV_9271(ah))
670 val &= ~AR_PCU_MISC_MODE2_HWWAR1; 668 val &= ~AR_PCU_MISC_MODE2_HWWAR1;
671 669
672 if (AR_SREV_9287_10_OR_LATER(ah)) 670 if (AR_SREV_9287_11_OR_LATER(ah))
673 val = val & (~AR_PCU_MISC_MODE2_HWWAR2); 671 val = val & (~AR_PCU_MISC_MODE2_HWWAR2);
674 672
675 REG_WRITE(ah, AR_PCU_MISC_MODE2, val); 673 REG_WRITE(ah, AR_PCU_MISC_MODE2, val);
676 } 674 }
677 675
678 if (!AR_SREV_5416_20_OR_LATER(ah) || 676 if (!AR_SREV_5416_20_OR_LATER(ah) ||
679 AR_SREV_9280_10_OR_LATER(ah)) 677 AR_SREV_9280_20_OR_LATER(ah))
680 return; 678 return;
681 /* 679 /*
682 * Disable BB clock gating 680 * Disable BB clock gating
@@ -701,7 +699,7 @@ static void ar5008_hw_set_channel_regs(struct ath_hw *ah,
701 u32 phymode; 699 u32 phymode;
702 u32 enableDacFifo = 0; 700 u32 enableDacFifo = 0;
703 701
704 if (AR_SREV_9285_10_OR_LATER(ah)) 702 if (AR_SREV_9285_12_OR_LATER(ah))
705 enableDacFifo = (REG_READ(ah, AR_PHY_TURBO) & 703 enableDacFifo = (REG_READ(ah, AR_PHY_TURBO) &
706 AR_PHY_FC_ENABLE_DAC_FIFO); 704 AR_PHY_FC_ENABLE_DAC_FIFO);
707 705
@@ -726,7 +724,6 @@ static void ar5008_hw_set_channel_regs(struct ath_hw *ah,
726 REG_WRITE(ah, AR_CST, 0xF << AR_CST_TIMEOUT_LIMIT_S); 724 REG_WRITE(ah, AR_CST, 0xF << AR_CST_TIMEOUT_LIMIT_S);
727 725
728 REGWRITE_BUFFER_FLUSH(ah); 726 REGWRITE_BUFFER_FLUSH(ah);
729 DISABLE_REGWRITE_BUFFER(ah);
730} 727}
731 728
732 729
@@ -818,13 +815,12 @@ static int ar5008_hw_process_ini(struct ath_hw *ah,
818 } 815 }
819 816
820 REGWRITE_BUFFER_FLUSH(ah); 817 REGWRITE_BUFFER_FLUSH(ah);
821 DISABLE_REGWRITE_BUFFER(ah);
822 818
823 if (AR_SREV_9280(ah) || AR_SREV_9287_10_OR_LATER(ah)) 819 if (AR_SREV_9280(ah) || AR_SREV_9287_11_OR_LATER(ah))
824 REG_WRITE_ARRAY(&ah->iniModesRxGain, modesIndex, regWrites); 820 REG_WRITE_ARRAY(&ah->iniModesRxGain, modesIndex, regWrites);
825 821
826 if (AR_SREV_9280(ah) || AR_SREV_9285_12_OR_LATER(ah) || 822 if (AR_SREV_9280(ah) || AR_SREV_9285_12_OR_LATER(ah) ||
827 AR_SREV_9287_10_OR_LATER(ah)) 823 AR_SREV_9287_11_OR_LATER(ah))
828 REG_WRITE_ARRAY(&ah->iniModesTxGain, modesIndex, regWrites); 824 REG_WRITE_ARRAY(&ah->iniModesTxGain, modesIndex, regWrites);
829 825
830 if (AR_SREV_9271_10(ah)) 826 if (AR_SREV_9271_10(ah))
@@ -849,7 +845,6 @@ static int ar5008_hw_process_ini(struct ath_hw *ah,
849 } 845 }
850 846
851 REGWRITE_BUFFER_FLUSH(ah); 847 REGWRITE_BUFFER_FLUSH(ah);
852 DISABLE_REGWRITE_BUFFER(ah);
853 848
854 if (AR_SREV_9271(ah)) { 849 if (AR_SREV_9271(ah)) {
855 if (ah->eep_ops->get_eeprom(ah, EEP_TXGAIN_TYPE) == 1) 850 if (ah->eep_ops->get_eeprom(ah, EEP_TXGAIN_TYPE) == 1)
@@ -900,7 +895,7 @@ static void ar5008_hw_set_rfmode(struct ath_hw *ah, struct ath9k_channel *chan)
900 rfMode |= (IS_CHAN_B(chan) || IS_CHAN_G(chan)) 895 rfMode |= (IS_CHAN_B(chan) || IS_CHAN_G(chan))
901 ? AR_PHY_MODE_DYNAMIC : AR_PHY_MODE_OFDM; 896 ? AR_PHY_MODE_DYNAMIC : AR_PHY_MODE_OFDM;
902 897
903 if (!AR_SREV_9280_10_OR_LATER(ah)) 898 if (!AR_SREV_9280_20_OR_LATER(ah))
904 rfMode |= (IS_CHAN_5GHZ(chan)) ? 899 rfMode |= (IS_CHAN_5GHZ(chan)) ?
905 AR_PHY_MODE_RF5GHZ : AR_PHY_MODE_RF2GHZ; 900 AR_PHY_MODE_RF5GHZ : AR_PHY_MODE_RF2GHZ;
906 901
@@ -1053,7 +1048,7 @@ static bool ar5008_hw_ani_control_old(struct ath_hw *ah,
1053 enum ath9k_ani_cmd cmd, 1048 enum ath9k_ani_cmd cmd,
1054 int param) 1049 int param)
1055{ 1050{
1056 struct ar5416AniState *aniState = ah->curani; 1051 struct ar5416AniState *aniState = &ah->curchan->ani;
1057 struct ath_common *common = ath9k_hw_common(ah); 1052 struct ath_common *common = ath9k_hw_common(ah);
1058 1053
1059 switch (cmd & ah->ani_function) { 1054 switch (cmd & ah->ani_function) {
@@ -1225,8 +1220,7 @@ static bool ar5008_hw_ani_control_old(struct ath_hw *ah,
1225 aniState->firstepLevel, 1220 aniState->firstepLevel,
1226 aniState->listenTime); 1221 aniState->listenTime);
1227 ath_print(common, ATH_DBG_ANI, 1222 ath_print(common, ATH_DBG_ANI,
1228 "cycleCount=%d, ofdmPhyErrCount=%d, cckPhyErrCount=%d\n\n", 1223 "ofdmPhyErrCount=%d, cckPhyErrCount=%d\n\n",
1229 aniState->cycleCount,
1230 aniState->ofdmPhyErrCount, 1224 aniState->ofdmPhyErrCount,
1231 aniState->cckPhyErrCount); 1225 aniState->cckPhyErrCount);
1232 1226
@@ -1237,9 +1231,9 @@ static bool ar5008_hw_ani_control_new(struct ath_hw *ah,
1237 enum ath9k_ani_cmd cmd, 1231 enum ath9k_ani_cmd cmd,
1238 int param) 1232 int param)
1239{ 1233{
1240 struct ar5416AniState *aniState = ah->curani;
1241 struct ath_common *common = ath9k_hw_common(ah); 1234 struct ath_common *common = ath9k_hw_common(ah);
1242 struct ath9k_channel *chan = ah->curchan; 1235 struct ath9k_channel *chan = ah->curchan;
1236 struct ar5416AniState *aniState = &chan->ani;
1243 s32 value, value2; 1237 s32 value, value2;
1244 1238
1245 switch (cmd & ah->ani_function) { 1239 switch (cmd & ah->ani_function) {
@@ -1478,15 +1472,13 @@ static bool ar5008_hw_ani_control_new(struct ath_hw *ah,
1478 1472
1479 ath_print(common, ATH_DBG_ANI, 1473 ath_print(common, ATH_DBG_ANI,
1480 "ANI parameters: SI=%d, ofdmWS=%s FS=%d " 1474 "ANI parameters: SI=%d, ofdmWS=%s FS=%d "
1481 "MRCcck=%s listenTime=%d CC=%d listen=%d " 1475 "MRCcck=%s listenTime=%d "
1482 "ofdmErrs=%d cckErrs=%d\n", 1476 "ofdmErrs=%d cckErrs=%d\n",
1483 aniState->spurImmunityLevel, 1477 aniState->spurImmunityLevel,
1484 !aniState->ofdmWeakSigDetectOff ? "on" : "off", 1478 !aniState->ofdmWeakSigDetectOff ? "on" : "off",
1485 aniState->firstepLevel, 1479 aniState->firstepLevel,
1486 !aniState->mrcCCKOff ? "on" : "off", 1480 !aniState->mrcCCKOff ? "on" : "off",
1487 aniState->listenTime, 1481 aniState->listenTime,
1488 aniState->cycleCount,
1489 aniState->listenTime,
1490 aniState->ofdmPhyErrCount, 1482 aniState->ofdmPhyErrCount,
1491 aniState->cckPhyErrCount); 1483 aniState->cckPhyErrCount);
1492 return true; 1484 return true;
@@ -1526,16 +1518,12 @@ static void ar5008_hw_do_getnf(struct ath_hw *ah,
1526 */ 1518 */
1527static void ar5008_hw_ani_cache_ini_regs(struct ath_hw *ah) 1519static void ar5008_hw_ani_cache_ini_regs(struct ath_hw *ah)
1528{ 1520{
1529 struct ar5416AniState *aniState;
1530 struct ath_common *common = ath9k_hw_common(ah); 1521 struct ath_common *common = ath9k_hw_common(ah);
1531 struct ath9k_channel *chan = ah->curchan; 1522 struct ath9k_channel *chan = ah->curchan;
1523 struct ar5416AniState *aniState = &chan->ani;
1532 struct ath9k_ani_default *iniDef; 1524 struct ath9k_ani_default *iniDef;
1533 int index;
1534 u32 val; 1525 u32 val;
1535 1526
1536 index = ath9k_hw_get_ani_channel_idx(ah, chan);
1537 aniState = &ah->ani[index];
1538 ah->curani = aniState;
1539 iniDef = &aniState->iniDef; 1527 iniDef = &aniState->iniDef;
1540 1528
1541 ath_print(common, ATH_DBG_ANI, 1529 ath_print(common, ATH_DBG_ANI,
@@ -1579,8 +1567,6 @@ static void ar5008_hw_ani_cache_ini_regs(struct ath_hw *ah)
1579 aniState->firstepLevel = ATH9K_ANI_FIRSTEP_LVL_NEW; 1567 aniState->firstepLevel = ATH9K_ANI_FIRSTEP_LVL_NEW;
1580 aniState->ofdmWeakSigDetectOff = !ATH9K_ANI_USE_OFDM_WEAK_SIG; 1568 aniState->ofdmWeakSigDetectOff = !ATH9K_ANI_USE_OFDM_WEAK_SIG;
1581 aniState->mrcCCKOff = true; /* not available on pre AR9003 */ 1569 aniState->mrcCCKOff = true; /* not available on pre AR9003 */
1582
1583 aniState->cycleCount = 0;
1584} 1570}
1585 1571
1586static void ar5008_hw_set_nf_limits(struct ath_hw *ah) 1572static void ar5008_hw_set_nf_limits(struct ath_hw *ah)
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_calib.c b/drivers/net/wireless/ath/ath9k/ar9002_calib.c
index fe7418aefc4a..15f62cd0cc38 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_calib.c
+++ b/drivers/net/wireless/ath/ath9k/ar9002_calib.c
@@ -20,6 +20,13 @@
20 20
21#define AR9285_CLCAL_REDO_THRESH 1 21#define AR9285_CLCAL_REDO_THRESH 1
22 22
23enum ar9002_cal_types {
24 ADC_GAIN_CAL = BIT(0),
25 ADC_DC_CAL = BIT(1),
26 IQ_MISMATCH_CAL = BIT(2),
27};
28
29
23static void ar9002_hw_setup_calibration(struct ath_hw *ah, 30static void ar9002_hw_setup_calibration(struct ath_hw *ah,
24 struct ath9k_cal_list *currCal) 31 struct ath9k_cal_list *currCal)
25{ 32{
@@ -45,13 +52,6 @@ static void ar9002_hw_setup_calibration(struct ath_hw *ah,
45 ath_print(common, ATH_DBG_CALIBRATE, 52 ath_print(common, ATH_DBG_CALIBRATE,
46 "starting ADC DC Calibration\n"); 53 "starting ADC DC Calibration\n");
47 break; 54 break;
48 case ADC_DC_INIT_CAL:
49 REG_WRITE(ah, AR_PHY_CALMODE, AR_PHY_CALMODE_ADC_DC_INIT);
50 ath_print(common, ATH_DBG_CALIBRATE,
51 "starting Init ADC DC Calibration\n");
52 break;
53 case TEMP_COMP_CAL:
54 break; /* Not supported */
55 } 55 }
56 56
57 REG_SET_BIT(ah, AR_PHY_TIMING_CTRL4(0), 57 REG_SET_BIT(ah, AR_PHY_TIMING_CTRL4(0),
@@ -96,25 +96,6 @@ static bool ar9002_hw_per_calibration(struct ath_hw *ah,
96 return iscaldone; 96 return iscaldone;
97} 97}
98 98
99/* Assumes you are talking about the currently configured channel */
100static bool ar9002_hw_iscal_supported(struct ath_hw *ah,
101 enum ath9k_cal_types calType)
102{
103 struct ieee80211_conf *conf = &ath9k_hw_common(ah)->hw->conf;
104
105 switch (calType & ah->supp_cals) {
106 case IQ_MISMATCH_CAL: /* Both 2 GHz and 5 GHz support OFDM */
107 return true;
108 case ADC_GAIN_CAL:
109 case ADC_DC_CAL:
110 if (!(conf->channel->band == IEEE80211_BAND_2GHZ &&
111 conf_is_ht20(conf)))
112 return true;
113 break;
114 }
115 return false;
116}
117
118static void ar9002_hw_iqcal_collect(struct ath_hw *ah) 99static void ar9002_hw_iqcal_collect(struct ath_hw *ah)
119{ 100{
120 int i; 101 int i;
@@ -541,7 +522,6 @@ static void ar9271_hw_pa_cal(struct ath_hw *ah, bool is_reset)
541 REG_WRITE(ah, regList[i][0], regList[i][1]); 522 REG_WRITE(ah, regList[i][0], regList[i][1]);
542 523
543 REGWRITE_BUFFER_FLUSH(ah); 524 REGWRITE_BUFFER_FLUSH(ah);
544 DISABLE_REGWRITE_BUFFER(ah);
545} 525}
546 526
547static inline void ar9285_hw_pa_cal(struct ath_hw *ah, bool is_reset) 527static inline void ar9285_hw_pa_cal(struct ath_hw *ah, bool is_reset)
@@ -567,11 +547,6 @@ static inline void ar9285_hw_pa_cal(struct ath_hw *ah, bool is_reset)
567 AR5416_EEP_TXGAIN_HIGH_POWER) 547 AR5416_EEP_TXGAIN_HIGH_POWER)
568 return; 548 return;
569 549
570 if (AR_SREV_9285_11(ah)) {
571 REG_WRITE(ah, AR9285_AN_TOP4, (AR9285_AN_TOP4_DEFAULT | 0x14));
572 udelay(10);
573 }
574
575 for (i = 0; i < ARRAY_SIZE(regList); i++) 550 for (i = 0; i < ARRAY_SIZE(regList); i++)
576 regList[i][1] = REG_READ(ah, regList[i][0]); 551 regList[i][1] = REG_READ(ah, regList[i][0]);
577 552
@@ -651,10 +626,6 @@ static inline void ar9285_hw_pa_cal(struct ath_hw *ah, bool is_reset)
651 REG_WRITE(ah, regList[i][0], regList[i][1]); 626 REG_WRITE(ah, regList[i][0], regList[i][1]);
652 627
653 REG_RMW_FIELD(ah, AR9285_AN_RF2G6, AR9285_AN_RF2G6_CCOMP, ccomp_org); 628 REG_RMW_FIELD(ah, AR9285_AN_RF2G6, AR9285_AN_RF2G6_CCOMP, ccomp_org);
654
655 if (AR_SREV_9285_11(ah))
656 REG_WRITE(ah, AR9285_AN_TOP4, AR9285_AN_TOP4_DEFAULT);
657
658} 629}
659 630
660static void ar9002_hw_pa_cal(struct ath_hw *ah, bool is_reset) 631static void ar9002_hw_pa_cal(struct ath_hw *ah, bool is_reset)
@@ -664,7 +635,7 @@ static void ar9002_hw_pa_cal(struct ath_hw *ah, bool is_reset)
664 ar9271_hw_pa_cal(ah, is_reset); 635 ar9271_hw_pa_cal(ah, is_reset);
665 else 636 else
666 ah->pacal_info.skipcount--; 637 ah->pacal_info.skipcount--;
667 } else if (AR_SREV_9285_11_OR_LATER(ah)) { 638 } else if (AR_SREV_9285_12_OR_LATER(ah)) {
668 if (is_reset || !ah->pacal_info.skipcount) 639 if (is_reset || !ah->pacal_info.skipcount)
669 ar9285_hw_pa_cal(ah, is_reset); 640 ar9285_hw_pa_cal(ah, is_reset);
670 else 641 else
@@ -841,8 +812,8 @@ static bool ar9002_hw_init_cal(struct ath_hw *ah, struct ath9k_channel *chan)
841 if (!ar9285_hw_clc(ah, chan)) 812 if (!ar9285_hw_clc(ah, chan))
842 return false; 813 return false;
843 } else { 814 } else {
844 if (AR_SREV_9280_10_OR_LATER(ah)) { 815 if (AR_SREV_9280_20_OR_LATER(ah)) {
845 if (!AR_SREV_9287_10_OR_LATER(ah)) 816 if (!AR_SREV_9287_11_OR_LATER(ah))
846 REG_CLR_BIT(ah, AR_PHY_ADC_CTL, 817 REG_CLR_BIT(ah, AR_PHY_ADC_CTL,
847 AR_PHY_ADC_CTL_OFF_PWDADC); 818 AR_PHY_ADC_CTL_OFF_PWDADC);
848 REG_SET_BIT(ah, AR_PHY_AGC_CONTROL, 819 REG_SET_BIT(ah, AR_PHY_AGC_CONTROL,
@@ -864,8 +835,8 @@ static bool ar9002_hw_init_cal(struct ath_hw *ah, struct ath9k_channel *chan)
864 return false; 835 return false;
865 } 836 }
866 837
867 if (AR_SREV_9280_10_OR_LATER(ah)) { 838 if (AR_SREV_9280_20_OR_LATER(ah)) {
868 if (!AR_SREV_9287_10_OR_LATER(ah)) 839 if (!AR_SREV_9287_11_OR_LATER(ah))
869 REG_SET_BIT(ah, AR_PHY_ADC_CTL, 840 REG_SET_BIT(ah, AR_PHY_ADC_CTL,
870 AR_PHY_ADC_CTL_OFF_PWDADC); 841 AR_PHY_ADC_CTL_OFF_PWDADC);
871 REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL, 842 REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL,
@@ -886,24 +857,28 @@ static bool ar9002_hw_init_cal(struct ath_hw *ah, struct ath9k_channel *chan)
886 857
887 /* Enable IQ, ADC Gain and ADC DC offset CALs */ 858 /* Enable IQ, ADC Gain and ADC DC offset CALs */
888 if (AR_SREV_9100(ah) || AR_SREV_9160_10_OR_LATER(ah)) { 859 if (AR_SREV_9100(ah) || AR_SREV_9160_10_OR_LATER(ah)) {
889 if (ar9002_hw_iscal_supported(ah, ADC_GAIN_CAL)) { 860 ah->supp_cals = IQ_MISMATCH_CAL;
861
862 if (AR_SREV_9160_10_OR_LATER(ah) &&
863 !(IS_CHAN_2GHZ(chan) && IS_CHAN_HT20(chan))) {
864 ah->supp_cals |= ADC_GAIN_CAL | ADC_DC_CAL;
865
866
890 INIT_CAL(&ah->adcgain_caldata); 867 INIT_CAL(&ah->adcgain_caldata);
891 INSERT_CAL(ah, &ah->adcgain_caldata); 868 INSERT_CAL(ah, &ah->adcgain_caldata);
892 ath_print(common, ATH_DBG_CALIBRATE, 869 ath_print(common, ATH_DBG_CALIBRATE,
893 "enabling ADC Gain Calibration.\n"); 870 "enabling ADC Gain Calibration.\n");
894 } 871
895 if (ar9002_hw_iscal_supported(ah, ADC_DC_CAL)) {
896 INIT_CAL(&ah->adcdc_caldata); 872 INIT_CAL(&ah->adcdc_caldata);
897 INSERT_CAL(ah, &ah->adcdc_caldata); 873 INSERT_CAL(ah, &ah->adcdc_caldata);
898 ath_print(common, ATH_DBG_CALIBRATE, 874 ath_print(common, ATH_DBG_CALIBRATE,
899 "enabling ADC DC Calibration.\n"); 875 "enabling ADC DC Calibration.\n");
900 } 876 }
901 if (ar9002_hw_iscal_supported(ah, IQ_MISMATCH_CAL)) { 877
902 INIT_CAL(&ah->iq_caldata); 878 INIT_CAL(&ah->iq_caldata);
903 INSERT_CAL(ah, &ah->iq_caldata); 879 INSERT_CAL(ah, &ah->iq_caldata);
904 ath_print(common, ATH_DBG_CALIBRATE, 880 ath_print(common, ATH_DBG_CALIBRATE,
905 "enabling IQ Calibration.\n"); 881 "enabling IQ Calibration.\n");
906 }
907 882
908 ah->cal_list_curr = ah->cal_list; 883 ah->cal_list_curr = ah->cal_list;
909 884
@@ -959,13 +934,6 @@ static const struct ath9k_percal_data adc_dc_cal_single_sample = {
959 ar9002_hw_adc_dccal_collect, 934 ar9002_hw_adc_dccal_collect,
960 ar9002_hw_adc_dccal_calibrate 935 ar9002_hw_adc_dccal_calibrate
961}; 936};
962static const struct ath9k_percal_data adc_init_dc_cal = {
963 ADC_DC_INIT_CAL,
964 MIN_CAL_SAMPLES,
965 INIT_LOG_COUNT,
966 ar9002_hw_adc_dccal_collect,
967 ar9002_hw_adc_dccal_calibrate
968};
969 937
970static void ar9002_hw_init_cal_settings(struct ath_hw *ah) 938static void ar9002_hw_init_cal_settings(struct ath_hw *ah)
971{ 939{
@@ -976,22 +944,18 @@ static void ar9002_hw_init_cal_settings(struct ath_hw *ah)
976 } 944 }
977 945
978 if (AR_SREV_9160_10_OR_LATER(ah)) { 946 if (AR_SREV_9160_10_OR_LATER(ah)) {
979 if (AR_SREV_9280_10_OR_LATER(ah)) { 947 if (AR_SREV_9280_20_OR_LATER(ah)) {
980 ah->iq_caldata.calData = &iq_cal_single_sample; 948 ah->iq_caldata.calData = &iq_cal_single_sample;
981 ah->adcgain_caldata.calData = 949 ah->adcgain_caldata.calData =
982 &adc_gain_cal_single_sample; 950 &adc_gain_cal_single_sample;
983 ah->adcdc_caldata.calData = 951 ah->adcdc_caldata.calData =
984 &adc_dc_cal_single_sample; 952 &adc_dc_cal_single_sample;
985 ah->adcdc_calinitdata.calData =
986 &adc_init_dc_cal;
987 } else { 953 } else {
988 ah->iq_caldata.calData = &iq_cal_multi_sample; 954 ah->iq_caldata.calData = &iq_cal_multi_sample;
989 ah->adcgain_caldata.calData = 955 ah->adcgain_caldata.calData =
990 &adc_gain_cal_multi_sample; 956 &adc_gain_cal_multi_sample;
991 ah->adcdc_caldata.calData = 957 ah->adcdc_caldata.calData =
992 &adc_dc_cal_multi_sample; 958 &adc_dc_cal_multi_sample;
993 ah->adcdc_calinitdata.calData =
994 &adc_init_dc_cal;
995 } 959 }
996 ah->supp_cals = ADC_GAIN_CAL | ADC_DC_CAL | IQ_MISMATCH_CAL; 960 ah->supp_cals = ADC_GAIN_CAL | ADC_DC_CAL | IQ_MISMATCH_CAL;
997 } 961 }
@@ -1005,7 +969,6 @@ void ar9002_hw_attach_calib_ops(struct ath_hw *ah)
1005 priv_ops->init_cal_settings = ar9002_hw_init_cal_settings; 969 priv_ops->init_cal_settings = ar9002_hw_init_cal_settings;
1006 priv_ops->init_cal = ar9002_hw_init_cal; 970 priv_ops->init_cal = ar9002_hw_init_cal;
1007 priv_ops->setup_calibration = ar9002_hw_setup_calibration; 971 priv_ops->setup_calibration = ar9002_hw_setup_calibration;
1008 priv_ops->iscal_supported = ar9002_hw_iscal_supported;
1009 972
1010 ops->calibrate = ar9002_hw_calibrate; 973 ops->calibrate = ar9002_hw_calibrate;
1011} 974}
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_hw.c b/drivers/net/wireless/ath/ath9k/ar9002_hw.c
index 303c63da5ea3..a0471f2e1c7a 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_hw.c
+++ b/drivers/net/wireless/ath/ath9k/ar9002_hw.c
@@ -371,7 +371,6 @@ static void ar9002_hw_configpcipowersave(struct ath_hw *ah,
371 REG_WRITE(ah, AR_PCIE_SERDES2, 0x00000000); 371 REG_WRITE(ah, AR_PCIE_SERDES2, 0x00000000);
372 372
373 REGWRITE_BUFFER_FLUSH(ah); 373 REGWRITE_BUFFER_FLUSH(ah);
374 DISABLE_REGWRITE_BUFFER(ah);
375 } 374 }
376 375
377 udelay(1000); 376 udelay(1000);
@@ -468,7 +467,6 @@ static int ar9002_hw_get_radiorev(struct ath_hw *ah)
468 REG_WRITE(ah, AR_PHY(0x20), 0x00010000); 467 REG_WRITE(ah, AR_PHY(0x20), 0x00010000);
469 468
470 REGWRITE_BUFFER_FLUSH(ah); 469 REGWRITE_BUFFER_FLUSH(ah);
471 DISABLE_REGWRITE_BUFFER(ah);
472 470
473 val = (REG_READ(ah, AR_PHY(256)) >> 24) & 0xff; 471 val = (REG_READ(ah, AR_PHY(256)) >> 24) & 0xff;
474 val = ((val & 0xf0) >> 4) | ((val & 0x0f) << 4); 472 val = ((val & 0xf0) >> 4) | ((val & 0x0f) << 4);
@@ -569,14 +567,57 @@ void ar9002_hw_attach_ops(struct ath_hw *ah)
569 ops->config_pci_powersave = ar9002_hw_configpcipowersave; 567 ops->config_pci_powersave = ar9002_hw_configpcipowersave;
570 568
571 ar5008_hw_attach_phy_ops(ah); 569 ar5008_hw_attach_phy_ops(ah);
572 if (AR_SREV_9280_10_OR_LATER(ah)) 570 if (AR_SREV_9280_20_OR_LATER(ah))
573 ar9002_hw_attach_phy_ops(ah); 571 ar9002_hw_attach_phy_ops(ah);
574 572
575 ar9002_hw_attach_calib_ops(ah); 573 ar9002_hw_attach_calib_ops(ah);
576 ar9002_hw_attach_mac_ops(ah); 574 ar9002_hw_attach_mac_ops(ah);
575}
576
577void ar9002_hw_load_ani_reg(struct ath_hw *ah, struct ath9k_channel *chan)
578{
579 u32 modesIndex;
580 int i;
581
582 switch (chan->chanmode) {
583 case CHANNEL_A:
584 case CHANNEL_A_HT20:
585 modesIndex = 1;
586 break;
587 case CHANNEL_A_HT40PLUS:
588 case CHANNEL_A_HT40MINUS:
589 modesIndex = 2;
590 break;
591 case CHANNEL_G:
592 case CHANNEL_G_HT20:
593 case CHANNEL_B:
594 modesIndex = 4;
595 break;
596 case CHANNEL_G_HT40PLUS:
597 case CHANNEL_G_HT40MINUS:
598 modesIndex = 3;
599 break;
600
601 default:
602 return;
603 }
604
605 ENABLE_REGWRITE_BUFFER(ah);
577 606
578 if (modparam_force_new_ani) 607 for (i = 0; i < ah->iniModes_9271_ANI_reg.ia_rows; i++) {
579 ath9k_hw_attach_ani_ops_new(ah); 608 u32 reg = INI_RA(&ah->iniModes_9271_ANI_reg, i, 0);
580 else 609 u32 val = INI_RA(&ah->iniModes_9271_ANI_reg, i, modesIndex);
581 ath9k_hw_attach_ani_ops_old(ah); 610 u32 val_orig;
611
612 if (reg == AR_PHY_CCK_DETECT) {
613 val_orig = REG_READ(ah, reg);
614 val &= AR_PHY_CCK_DETECT_WEAK_SIG_THR_CCK;
615 val_orig &= ~AR_PHY_CCK_DETECT_WEAK_SIG_THR_CCK;
616
617 REG_WRITE(ah, reg, val|val_orig);
618 } else
619 REG_WRITE(ah, reg, val);
620 }
621
622 REGWRITE_BUFFER_FLUSH(ah);
582} 623}
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_phy.c b/drivers/net/wireless/ath/ath9k/ar9002_phy.c
index adbf031fbc5a..c00cdc67b55b 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_phy.c
+++ b/drivers/net/wireless/ath/ath9k/ar9002_phy.c
@@ -415,7 +415,6 @@ static void ar9002_hw_spur_mitigate(struct ath_hw *ah,
415 REG_WRITE(ah, AR_PHY_MASK2_P_61_45, tmp_mask); 415 REG_WRITE(ah, AR_PHY_MASK2_P_61_45, tmp_mask);
416 416
417 REGWRITE_BUFFER_FLUSH(ah); 417 REGWRITE_BUFFER_FLUSH(ah);
418 DISABLE_REGWRITE_BUFFER(ah);
419} 418}
420 419
421static void ar9002_olc_init(struct ath_hw *ah) 420static void ar9002_olc_init(struct ath_hw *ah)
@@ -530,3 +529,38 @@ void ar9002_hw_attach_phy_ops(struct ath_hw *ah)
530 529
531 ar9002_hw_set_nf_limits(ah); 530 ar9002_hw_set_nf_limits(ah);
532} 531}
532
533void ath9k_hw_antdiv_comb_conf_get(struct ath_hw *ah,
534 struct ath_hw_antcomb_conf *antconf)
535{
536 u32 regval;
537
538 regval = REG_READ(ah, AR_PHY_MULTICHAIN_GAIN_CTL);
539 antconf->main_lna_conf = (regval & AR_PHY_9285_ANT_DIV_MAIN_LNACONF) >>
540 AR_PHY_9285_ANT_DIV_MAIN_LNACONF_S;
541 antconf->alt_lna_conf = (regval & AR_PHY_9285_ANT_DIV_ALT_LNACONF) >>
542 AR_PHY_9285_ANT_DIV_ALT_LNACONF_S;
543 antconf->fast_div_bias = (regval & AR_PHY_9285_FAST_DIV_BIAS) >>
544 AR_PHY_9285_FAST_DIV_BIAS_S;
545}
546EXPORT_SYMBOL(ath9k_hw_antdiv_comb_conf_get);
547
548void ath9k_hw_antdiv_comb_conf_set(struct ath_hw *ah,
549 struct ath_hw_antcomb_conf *antconf)
550{
551 u32 regval;
552
553 regval = REG_READ(ah, AR_PHY_MULTICHAIN_GAIN_CTL);
554 regval &= ~(AR_PHY_9285_ANT_DIV_MAIN_LNACONF |
555 AR_PHY_9285_ANT_DIV_ALT_LNACONF |
556 AR_PHY_9285_FAST_DIV_BIAS);
557 regval |= ((antconf->main_lna_conf << AR_PHY_9285_ANT_DIV_MAIN_LNACONF_S)
558 & AR_PHY_9285_ANT_DIV_MAIN_LNACONF);
559 regval |= ((antconf->alt_lna_conf << AR_PHY_9285_ANT_DIV_ALT_LNACONF_S)
560 & AR_PHY_9285_ANT_DIV_ALT_LNACONF);
561 regval |= ((antconf->fast_div_bias << AR_PHY_9285_FAST_DIV_BIAS_S)
562 & AR_PHY_9285_FAST_DIV_BIAS);
563
564 REG_WRITE(ah, AR_PHY_MULTICHAIN_GAIN_CTL, regval);
565}
566EXPORT_SYMBOL(ath9k_hw_antdiv_comb_conf_set);
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_phy.h b/drivers/net/wireless/ath/ath9k/ar9002_phy.h
index c5151a4dd10b..37663dbbcf57 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_phy.h
+++ b/drivers/net/wireless/ath/ath9k/ar9002_phy.h
@@ -302,6 +302,8 @@
302#define AR_PHY_NEW_ADC_DC_OFFSET_CORR_ENABLE 0x80000000 302#define AR_PHY_NEW_ADC_DC_OFFSET_CORR_ENABLE 0x80000000
303 303
304#define AR_PHY_MULTICHAIN_GAIN_CTL 0x99ac 304#define AR_PHY_MULTICHAIN_GAIN_CTL 0x99ac
305#define AR_PHY_9285_FAST_DIV_BIAS 0x00007E00
306#define AR_PHY_9285_FAST_DIV_BIAS_S 9
305#define AR_PHY_9285_ANT_DIV_CTL_ALL 0x7f000000 307#define AR_PHY_9285_ANT_DIV_CTL_ALL 0x7f000000
306#define AR_PHY_9285_ANT_DIV_CTL 0x01000000 308#define AR_PHY_9285_ANT_DIV_CTL 0x01000000
307#define AR_PHY_9285_ANT_DIV_CTL_S 24 309#define AR_PHY_9285_ANT_DIV_CTL_S 24
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_2p0_initvals.h b/drivers/net/wireless/ath/ath9k/ar9003_2p0_initvals.h
deleted file mode 100644
index d3375fc4ce8b..000000000000
--- a/drivers/net/wireless/ath/ath9k/ar9003_2p0_initvals.h
+++ /dev/null
@@ -1,1784 +0,0 @@
1/*
2 * Copyright (c) 2010 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#ifndef INITVALS_9003_2P0_H
18#define INITVALS_9003_2P0_H
19
20/* AR9003 2.0 */
21
22static const u32 ar9300_2p0_radio_postamble[][5] = {
23 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
24 {0x0001609c, 0x0dd08f29, 0x0dd08f29, 0x0b283f31, 0x0b283f31},
25 {0x000160ac, 0xa4653c00, 0xa4653c00, 0x24652800, 0x24652800},
26 {0x000160b0, 0x03284f3e, 0x03284f3e, 0x05d08f20, 0x05d08f20},
27 {0x0001610c, 0x08000000, 0x00000000, 0x00000000, 0x00000000},
28 {0x00016140, 0x10804008, 0x10804008, 0x50804008, 0x50804008},
29 {0x0001650c, 0x08000000, 0x00000000, 0x00000000, 0x00000000},
30 {0x00016540, 0x10804008, 0x10804008, 0x50804008, 0x50804008},
31 {0x0001690c, 0x08000000, 0x00000000, 0x00000000, 0x00000000},
32 {0x00016940, 0x10804008, 0x10804008, 0x50804008, 0x50804008},
33};
34
35static const u32 ar9300Modes_lowest_ob_db_tx_gain_table_2p0[][5] = {
36 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
37 {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d9, 0x000050d9},
38 {0x0000a500, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
39 {0x0000a504, 0x06000003, 0x06000003, 0x04000002, 0x04000002},
40 {0x0000a508, 0x0a000020, 0x0a000020, 0x08000004, 0x08000004},
41 {0x0000a50c, 0x10000023, 0x10000023, 0x0b000200, 0x0b000200},
42 {0x0000a510, 0x16000220, 0x16000220, 0x0f000202, 0x0f000202},
43 {0x0000a514, 0x1c000223, 0x1c000223, 0x12000400, 0x12000400},
44 {0x0000a518, 0x21020220, 0x21020220, 0x16000402, 0x16000402},
45 {0x0000a51c, 0x27020223, 0x27020223, 0x19000404, 0x19000404},
46 {0x0000a520, 0x2b022220, 0x2b022220, 0x1c000603, 0x1c000603},
47 {0x0000a524, 0x2f022222, 0x2f022222, 0x21000a02, 0x21000a02},
48 {0x0000a528, 0x34022225, 0x34022225, 0x25000a04, 0x25000a04},
49 {0x0000a52c, 0x3a02222a, 0x3a02222a, 0x28000a20, 0x28000a20},
50 {0x0000a530, 0x3e02222c, 0x3e02222c, 0x2c000e20, 0x2c000e20},
51 {0x0000a534, 0x4202242a, 0x4202242a, 0x30000e22, 0x30000e22},
52 {0x0000a538, 0x4702244a, 0x4702244a, 0x34000e24, 0x34000e24},
53 {0x0000a53c, 0x4b02244c, 0x4b02244c, 0x38001640, 0x38001640},
54 {0x0000a540, 0x4e02246c, 0x4e02246c, 0x3c001660, 0x3c001660},
55 {0x0000a544, 0x5302266c, 0x5302266c, 0x3f001861, 0x3f001861},
56 {0x0000a548, 0x5702286c, 0x5702286c, 0x43001a81, 0x43001a81},
57 {0x0000a54c, 0x5c04286b, 0x5c04286b, 0x47001a83, 0x47001a83},
58 {0x0000a550, 0x61042a6c, 0x61042a6c, 0x4a001c84, 0x4a001c84},
59 {0x0000a554, 0x66062a6c, 0x66062a6c, 0x4e001ce3, 0x4e001ce3},
60 {0x0000a558, 0x6b062e6c, 0x6b062e6c, 0x52001ce5, 0x52001ce5},
61 {0x0000a55c, 0x7006308c, 0x7006308c, 0x56001ce9, 0x56001ce9},
62 {0x0000a560, 0x730a308a, 0x730a308a, 0x5a001ceb, 0x5a001ceb},
63 {0x0000a564, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
64 {0x0000a568, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
65 {0x0000a56c, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
66 {0x0000a570, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
67 {0x0000a574, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
68 {0x0000a578, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
69 {0x0000a57c, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
70 {0x0000a580, 0x00800000, 0x00800000, 0x00800000, 0x00800000},
71 {0x0000a584, 0x06800003, 0x06800003, 0x04800002, 0x04800002},
72 {0x0000a588, 0x0a800020, 0x0a800020, 0x08800004, 0x08800004},
73 {0x0000a58c, 0x10800023, 0x10800023, 0x0b800200, 0x0b800200},
74 {0x0000a590, 0x16800220, 0x16800220, 0x0f800202, 0x0f800202},
75 {0x0000a594, 0x1c800223, 0x1c800223, 0x12800400, 0x12800400},
76 {0x0000a598, 0x21820220, 0x21820220, 0x16800402, 0x16800402},
77 {0x0000a59c, 0x27820223, 0x27820223, 0x19800404, 0x19800404},
78 {0x0000a5a0, 0x2b822220, 0x2b822220, 0x1c800603, 0x1c800603},
79 {0x0000a5a4, 0x2f822222, 0x2f822222, 0x21800a02, 0x21800a02},
80 {0x0000a5a8, 0x34822225, 0x34822225, 0x25800a04, 0x25800a04},
81 {0x0000a5ac, 0x3a82222a, 0x3a82222a, 0x28800a20, 0x28800a20},
82 {0x0000a5b0, 0x3e82222c, 0x3e82222c, 0x2c800e20, 0x2c800e20},
83 {0x0000a5b4, 0x4282242a, 0x4282242a, 0x30800e22, 0x30800e22},
84 {0x0000a5b8, 0x4782244a, 0x4782244a, 0x34800e24, 0x34800e24},
85 {0x0000a5bc, 0x4b82244c, 0x4b82244c, 0x38801640, 0x38801640},
86 {0x0000a5c0, 0x4e82246c, 0x4e82246c, 0x3c801660, 0x3c801660},
87 {0x0000a5c4, 0x5382266c, 0x5382266c, 0x3f801861, 0x3f801861},
88 {0x0000a5c8, 0x5782286c, 0x5782286c, 0x43801a81, 0x43801a81},
89 {0x0000a5cc, 0x5c84286b, 0x5c84286b, 0x47801a83, 0x47801a83},
90 {0x0000a5d0, 0x61842a6c, 0x61842a6c, 0x4a801c84, 0x4a801c84},
91 {0x0000a5d4, 0x66862a6c, 0x66862a6c, 0x4e801ce3, 0x4e801ce3},
92 {0x0000a5d8, 0x6b862e6c, 0x6b862e6c, 0x52801ce5, 0x52801ce5},
93 {0x0000a5dc, 0x7086308c, 0x7086308c, 0x56801ce9, 0x56801ce9},
94 {0x0000a5e0, 0x738a308a, 0x738a308a, 0x5a801ceb, 0x5a801ceb},
95 {0x0000a5e4, 0x778a308c, 0x778a308c, 0x5d801eec, 0x5d801eec},
96 {0x0000a5e8, 0x778a308c, 0x778a308c, 0x5d801eec, 0x5d801eec},
97 {0x0000a5ec, 0x778a308c, 0x778a308c, 0x5d801eec, 0x5d801eec},
98 {0x0000a5f0, 0x778a308c, 0x778a308c, 0x5d801eec, 0x5d801eec},
99 {0x0000a5f4, 0x778a308c, 0x778a308c, 0x5d801eec, 0x5d801eec},
100 {0x0000a5f8, 0x778a308c, 0x778a308c, 0x5d801eec, 0x5d801eec},
101 {0x0000a5fc, 0x778a308c, 0x778a308c, 0x5d801eec, 0x5d801eec},
102 {0x00016044, 0x012492d4, 0x012492d4, 0x012492d4, 0x012492d4},
103 {0x00016048, 0x62480001, 0x62480001, 0x62480001, 0x62480001},
104 {0x00016068, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
105 {0x00016444, 0x012492d4, 0x012492d4, 0x012492d4, 0x012492d4},
106 {0x00016448, 0x62480001, 0x62480001, 0x62480001, 0x62480001},
107 {0x00016468, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
108 {0x00016844, 0x012492d4, 0x012492d4, 0x012492d4, 0x012492d4},
109 {0x00016848, 0x62480001, 0x62480001, 0x62480001, 0x62480001},
110 {0x00016868, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
111};
112
113static const u32 ar9300Modes_fast_clock_2p0[][3] = {
114 /* Addr 5G_HT20 5G_HT40 */
115 {0x00001030, 0x00000268, 0x000004d0},
116 {0x00001070, 0x0000018c, 0x00000318},
117 {0x000010b0, 0x00000fd0, 0x00001fa0},
118 {0x00008014, 0x044c044c, 0x08980898},
119 {0x0000801c, 0x148ec02b, 0x148ec057},
120 {0x00008318, 0x000044c0, 0x00008980},
121 {0x00009e00, 0x03721821, 0x03721821},
122 {0x0000a230, 0x0000000b, 0x00000016},
123 {0x0000a254, 0x00000898, 0x00001130},
124};
125
126static const u32 ar9300_2p0_radio_core[][2] = {
127 /* Addr allmodes */
128 {0x00016000, 0x36db6db6},
129 {0x00016004, 0x6db6db40},
130 {0x00016008, 0x73f00000},
131 {0x0001600c, 0x00000000},
132 {0x00016040, 0x7f80fff8},
133 {0x0001604c, 0x76d005b5},
134 {0x00016050, 0x556cf031},
135 {0x00016054, 0x13449440},
136 {0x00016058, 0x0c51c92c},
137 {0x0001605c, 0x3db7fffc},
138 {0x00016060, 0xfffffffc},
139 {0x00016064, 0x000f0278},
140 {0x0001606c, 0x6db60000},
141 {0x00016080, 0x00000000},
142 {0x00016084, 0x0e48048c},
143 {0x00016088, 0x54214514},
144 {0x0001608c, 0x119f481e},
145 {0x00016090, 0x24926490},
146 {0x00016098, 0xd2888888},
147 {0x000160a0, 0x0a108ffe},
148 {0x000160a4, 0x812fc370},
149 {0x000160a8, 0x423c8000},
150 {0x000160b4, 0x92480080},
151 {0x000160c0, 0x00adb6d0},
152 {0x000160c4, 0x6db6db60},
153 {0x000160c8, 0x6db6db6c},
154 {0x000160cc, 0x01e6c000},
155 {0x00016100, 0x3fffbe01},
156 {0x00016104, 0xfff80000},
157 {0x00016108, 0x00080010},
158 {0x00016144, 0x02084080},
159 {0x00016148, 0x00000000},
160 {0x00016280, 0x058a0001},
161 {0x00016284, 0x3d840208},
162 {0x00016288, 0x05a20408},
163 {0x0001628c, 0x00038c07},
164 {0x00016290, 0x40000004},
165 {0x00016294, 0x458aa14f},
166 {0x00016380, 0x00000000},
167 {0x00016384, 0x00000000},
168 {0x00016388, 0x00800700},
169 {0x0001638c, 0x00800700},
170 {0x00016390, 0x00800700},
171 {0x00016394, 0x00000000},
172 {0x00016398, 0x00000000},
173 {0x0001639c, 0x00000000},
174 {0x000163a0, 0x00000001},
175 {0x000163a4, 0x00000001},
176 {0x000163a8, 0x00000000},
177 {0x000163ac, 0x00000000},
178 {0x000163b0, 0x00000000},
179 {0x000163b4, 0x00000000},
180 {0x000163b8, 0x00000000},
181 {0x000163bc, 0x00000000},
182 {0x000163c0, 0x000000a0},
183 {0x000163c4, 0x000c0000},
184 {0x000163c8, 0x14021402},
185 {0x000163cc, 0x00001402},
186 {0x000163d0, 0x00000000},
187 {0x000163d4, 0x00000000},
188 {0x00016400, 0x36db6db6},
189 {0x00016404, 0x6db6db40},
190 {0x00016408, 0x73f00000},
191 {0x0001640c, 0x00000000},
192 {0x00016440, 0x7f80fff8},
193 {0x0001644c, 0x76d005b5},
194 {0x00016450, 0x556cf031},
195 {0x00016454, 0x13449440},
196 {0x00016458, 0x0c51c92c},
197 {0x0001645c, 0x3db7fffc},
198 {0x00016460, 0xfffffffc},
199 {0x00016464, 0x000f0278},
200 {0x0001646c, 0x6db60000},
201 {0x00016500, 0x3fffbe01},
202 {0x00016504, 0xfff80000},
203 {0x00016508, 0x00080010},
204 {0x00016544, 0x02084080},
205 {0x00016548, 0x00000000},
206 {0x00016780, 0x00000000},
207 {0x00016784, 0x00000000},
208 {0x00016788, 0x00800700},
209 {0x0001678c, 0x00800700},
210 {0x00016790, 0x00800700},
211 {0x00016794, 0x00000000},
212 {0x00016798, 0x00000000},
213 {0x0001679c, 0x00000000},
214 {0x000167a0, 0x00000001},
215 {0x000167a4, 0x00000001},
216 {0x000167a8, 0x00000000},
217 {0x000167ac, 0x00000000},
218 {0x000167b0, 0x00000000},
219 {0x000167b4, 0x00000000},
220 {0x000167b8, 0x00000000},
221 {0x000167bc, 0x00000000},
222 {0x000167c0, 0x000000a0},
223 {0x000167c4, 0x000c0000},
224 {0x000167c8, 0x14021402},
225 {0x000167cc, 0x00001402},
226 {0x000167d0, 0x00000000},
227 {0x000167d4, 0x00000000},
228 {0x00016800, 0x36db6db6},
229 {0x00016804, 0x6db6db40},
230 {0x00016808, 0x73f00000},
231 {0x0001680c, 0x00000000},
232 {0x00016840, 0x7f80fff8},
233 {0x0001684c, 0x76d005b5},
234 {0x00016850, 0x556cf031},
235 {0x00016854, 0x13449440},
236 {0x00016858, 0x0c51c92c},
237 {0x0001685c, 0x3db7fffc},
238 {0x00016860, 0xfffffffc},
239 {0x00016864, 0x000f0278},
240 {0x0001686c, 0x6db60000},
241 {0x00016900, 0x3fffbe01},
242 {0x00016904, 0xfff80000},
243 {0x00016908, 0x00080010},
244 {0x00016944, 0x02084080},
245 {0x00016948, 0x00000000},
246 {0x00016b80, 0x00000000},
247 {0x00016b84, 0x00000000},
248 {0x00016b88, 0x00800700},
249 {0x00016b8c, 0x00800700},
250 {0x00016b90, 0x00800700},
251 {0x00016b94, 0x00000000},
252 {0x00016b98, 0x00000000},
253 {0x00016b9c, 0x00000000},
254 {0x00016ba0, 0x00000001},
255 {0x00016ba4, 0x00000001},
256 {0x00016ba8, 0x00000000},
257 {0x00016bac, 0x00000000},
258 {0x00016bb0, 0x00000000},
259 {0x00016bb4, 0x00000000},
260 {0x00016bb8, 0x00000000},
261 {0x00016bbc, 0x00000000},
262 {0x00016bc0, 0x000000a0},
263 {0x00016bc4, 0x000c0000},
264 {0x00016bc8, 0x14021402},
265 {0x00016bcc, 0x00001402},
266 {0x00016bd0, 0x00000000},
267 {0x00016bd4, 0x00000000},
268};
269
270static const u32 ar9300Common_rx_gain_table_merlin_2p0[][2] = {
271 /* Addr allmodes */
272 {0x0000a000, 0x02000101},
273 {0x0000a004, 0x02000102},
274 {0x0000a008, 0x02000103},
275 {0x0000a00c, 0x02000104},
276 {0x0000a010, 0x02000200},
277 {0x0000a014, 0x02000201},
278 {0x0000a018, 0x02000202},
279 {0x0000a01c, 0x02000203},
280 {0x0000a020, 0x02000204},
281 {0x0000a024, 0x02000205},
282 {0x0000a028, 0x02000208},
283 {0x0000a02c, 0x02000302},
284 {0x0000a030, 0x02000303},
285 {0x0000a034, 0x02000304},
286 {0x0000a038, 0x02000400},
287 {0x0000a03c, 0x02010300},
288 {0x0000a040, 0x02010301},
289 {0x0000a044, 0x02010302},
290 {0x0000a048, 0x02000500},
291 {0x0000a04c, 0x02010400},
292 {0x0000a050, 0x02020300},
293 {0x0000a054, 0x02020301},
294 {0x0000a058, 0x02020302},
295 {0x0000a05c, 0x02020303},
296 {0x0000a060, 0x02020400},
297 {0x0000a064, 0x02030300},
298 {0x0000a068, 0x02030301},
299 {0x0000a06c, 0x02030302},
300 {0x0000a070, 0x02030303},
301 {0x0000a074, 0x02030400},
302 {0x0000a078, 0x02040300},
303 {0x0000a07c, 0x02040301},
304 {0x0000a080, 0x02040302},
305 {0x0000a084, 0x02040303},
306 {0x0000a088, 0x02030500},
307 {0x0000a08c, 0x02040400},
308 {0x0000a090, 0x02050203},
309 {0x0000a094, 0x02050204},
310 {0x0000a098, 0x02050205},
311 {0x0000a09c, 0x02040500},
312 {0x0000a0a0, 0x02050301},
313 {0x0000a0a4, 0x02050302},
314 {0x0000a0a8, 0x02050303},
315 {0x0000a0ac, 0x02050400},
316 {0x0000a0b0, 0x02050401},
317 {0x0000a0b4, 0x02050402},
318 {0x0000a0b8, 0x02050403},
319 {0x0000a0bc, 0x02050500},
320 {0x0000a0c0, 0x02050501},
321 {0x0000a0c4, 0x02050502},
322 {0x0000a0c8, 0x02050503},
323 {0x0000a0cc, 0x02050504},
324 {0x0000a0d0, 0x02050600},
325 {0x0000a0d4, 0x02050601},
326 {0x0000a0d8, 0x02050602},
327 {0x0000a0dc, 0x02050603},
328 {0x0000a0e0, 0x02050604},
329 {0x0000a0e4, 0x02050700},
330 {0x0000a0e8, 0x02050701},
331 {0x0000a0ec, 0x02050702},
332 {0x0000a0f0, 0x02050703},
333 {0x0000a0f4, 0x02050704},
334 {0x0000a0f8, 0x02050705},
335 {0x0000a0fc, 0x02050708},
336 {0x0000a100, 0x02050709},
337 {0x0000a104, 0x0205070a},
338 {0x0000a108, 0x0205070b},
339 {0x0000a10c, 0x0205070c},
340 {0x0000a110, 0x0205070d},
341 {0x0000a114, 0x02050710},
342 {0x0000a118, 0x02050711},
343 {0x0000a11c, 0x02050712},
344 {0x0000a120, 0x02050713},
345 {0x0000a124, 0x02050714},
346 {0x0000a128, 0x02050715},
347 {0x0000a12c, 0x02050730},
348 {0x0000a130, 0x02050731},
349 {0x0000a134, 0x02050732},
350 {0x0000a138, 0x02050733},
351 {0x0000a13c, 0x02050734},
352 {0x0000a140, 0x02050735},
353 {0x0000a144, 0x02050750},
354 {0x0000a148, 0x02050751},
355 {0x0000a14c, 0x02050752},
356 {0x0000a150, 0x02050753},
357 {0x0000a154, 0x02050754},
358 {0x0000a158, 0x02050755},
359 {0x0000a15c, 0x02050770},
360 {0x0000a160, 0x02050771},
361 {0x0000a164, 0x02050772},
362 {0x0000a168, 0x02050773},
363 {0x0000a16c, 0x02050774},
364 {0x0000a170, 0x02050775},
365 {0x0000a174, 0x00000776},
366 {0x0000a178, 0x00000776},
367 {0x0000a17c, 0x00000776},
368 {0x0000a180, 0x00000776},
369 {0x0000a184, 0x00000776},
370 {0x0000a188, 0x00000776},
371 {0x0000a18c, 0x00000776},
372 {0x0000a190, 0x00000776},
373 {0x0000a194, 0x00000776},
374 {0x0000a198, 0x00000776},
375 {0x0000a19c, 0x00000776},
376 {0x0000a1a0, 0x00000776},
377 {0x0000a1a4, 0x00000776},
378 {0x0000a1a8, 0x00000776},
379 {0x0000a1ac, 0x00000776},
380 {0x0000a1b0, 0x00000776},
381 {0x0000a1b4, 0x00000776},
382 {0x0000a1b8, 0x00000776},
383 {0x0000a1bc, 0x00000776},
384 {0x0000a1c0, 0x00000776},
385 {0x0000a1c4, 0x00000776},
386 {0x0000a1c8, 0x00000776},
387 {0x0000a1cc, 0x00000776},
388 {0x0000a1d0, 0x00000776},
389 {0x0000a1d4, 0x00000776},
390 {0x0000a1d8, 0x00000776},
391 {0x0000a1dc, 0x00000776},
392 {0x0000a1e0, 0x00000776},
393 {0x0000a1e4, 0x00000776},
394 {0x0000a1e8, 0x00000776},
395 {0x0000a1ec, 0x00000776},
396 {0x0000a1f0, 0x00000776},
397 {0x0000a1f4, 0x00000776},
398 {0x0000a1f8, 0x00000776},
399 {0x0000a1fc, 0x00000776},
400 {0x0000b000, 0x02000101},
401 {0x0000b004, 0x02000102},
402 {0x0000b008, 0x02000103},
403 {0x0000b00c, 0x02000104},
404 {0x0000b010, 0x02000200},
405 {0x0000b014, 0x02000201},
406 {0x0000b018, 0x02000202},
407 {0x0000b01c, 0x02000203},
408 {0x0000b020, 0x02000204},
409 {0x0000b024, 0x02000205},
410 {0x0000b028, 0x02000208},
411 {0x0000b02c, 0x02000302},
412 {0x0000b030, 0x02000303},
413 {0x0000b034, 0x02000304},
414 {0x0000b038, 0x02000400},
415 {0x0000b03c, 0x02010300},
416 {0x0000b040, 0x02010301},
417 {0x0000b044, 0x02010302},
418 {0x0000b048, 0x02000500},
419 {0x0000b04c, 0x02010400},
420 {0x0000b050, 0x02020300},
421 {0x0000b054, 0x02020301},
422 {0x0000b058, 0x02020302},
423 {0x0000b05c, 0x02020303},
424 {0x0000b060, 0x02020400},
425 {0x0000b064, 0x02030300},
426 {0x0000b068, 0x02030301},
427 {0x0000b06c, 0x02030302},
428 {0x0000b070, 0x02030303},
429 {0x0000b074, 0x02030400},
430 {0x0000b078, 0x02040300},
431 {0x0000b07c, 0x02040301},
432 {0x0000b080, 0x02040302},
433 {0x0000b084, 0x02040303},
434 {0x0000b088, 0x02030500},
435 {0x0000b08c, 0x02040400},
436 {0x0000b090, 0x02050203},
437 {0x0000b094, 0x02050204},
438 {0x0000b098, 0x02050205},
439 {0x0000b09c, 0x02040500},
440 {0x0000b0a0, 0x02050301},
441 {0x0000b0a4, 0x02050302},
442 {0x0000b0a8, 0x02050303},
443 {0x0000b0ac, 0x02050400},
444 {0x0000b0b0, 0x02050401},
445 {0x0000b0b4, 0x02050402},
446 {0x0000b0b8, 0x02050403},
447 {0x0000b0bc, 0x02050500},
448 {0x0000b0c0, 0x02050501},
449 {0x0000b0c4, 0x02050502},
450 {0x0000b0c8, 0x02050503},
451 {0x0000b0cc, 0x02050504},
452 {0x0000b0d0, 0x02050600},
453 {0x0000b0d4, 0x02050601},
454 {0x0000b0d8, 0x02050602},
455 {0x0000b0dc, 0x02050603},
456 {0x0000b0e0, 0x02050604},
457 {0x0000b0e4, 0x02050700},
458 {0x0000b0e8, 0x02050701},
459 {0x0000b0ec, 0x02050702},
460 {0x0000b0f0, 0x02050703},
461 {0x0000b0f4, 0x02050704},
462 {0x0000b0f8, 0x02050705},
463 {0x0000b0fc, 0x02050708},
464 {0x0000b100, 0x02050709},
465 {0x0000b104, 0x0205070a},
466 {0x0000b108, 0x0205070b},
467 {0x0000b10c, 0x0205070c},
468 {0x0000b110, 0x0205070d},
469 {0x0000b114, 0x02050710},
470 {0x0000b118, 0x02050711},
471 {0x0000b11c, 0x02050712},
472 {0x0000b120, 0x02050713},
473 {0x0000b124, 0x02050714},
474 {0x0000b128, 0x02050715},
475 {0x0000b12c, 0x02050730},
476 {0x0000b130, 0x02050731},
477 {0x0000b134, 0x02050732},
478 {0x0000b138, 0x02050733},
479 {0x0000b13c, 0x02050734},
480 {0x0000b140, 0x02050735},
481 {0x0000b144, 0x02050750},
482 {0x0000b148, 0x02050751},
483 {0x0000b14c, 0x02050752},
484 {0x0000b150, 0x02050753},
485 {0x0000b154, 0x02050754},
486 {0x0000b158, 0x02050755},
487 {0x0000b15c, 0x02050770},
488 {0x0000b160, 0x02050771},
489 {0x0000b164, 0x02050772},
490 {0x0000b168, 0x02050773},
491 {0x0000b16c, 0x02050774},
492 {0x0000b170, 0x02050775},
493 {0x0000b174, 0x00000776},
494 {0x0000b178, 0x00000776},
495 {0x0000b17c, 0x00000776},
496 {0x0000b180, 0x00000776},
497 {0x0000b184, 0x00000776},
498 {0x0000b188, 0x00000776},
499 {0x0000b18c, 0x00000776},
500 {0x0000b190, 0x00000776},
501 {0x0000b194, 0x00000776},
502 {0x0000b198, 0x00000776},
503 {0x0000b19c, 0x00000776},
504 {0x0000b1a0, 0x00000776},
505 {0x0000b1a4, 0x00000776},
506 {0x0000b1a8, 0x00000776},
507 {0x0000b1ac, 0x00000776},
508 {0x0000b1b0, 0x00000776},
509 {0x0000b1b4, 0x00000776},
510 {0x0000b1b8, 0x00000776},
511 {0x0000b1bc, 0x00000776},
512 {0x0000b1c0, 0x00000776},
513 {0x0000b1c4, 0x00000776},
514 {0x0000b1c8, 0x00000776},
515 {0x0000b1cc, 0x00000776},
516 {0x0000b1d0, 0x00000776},
517 {0x0000b1d4, 0x00000776},
518 {0x0000b1d8, 0x00000776},
519 {0x0000b1dc, 0x00000776},
520 {0x0000b1e0, 0x00000776},
521 {0x0000b1e4, 0x00000776},
522 {0x0000b1e8, 0x00000776},
523 {0x0000b1ec, 0x00000776},
524 {0x0000b1f0, 0x00000776},
525 {0x0000b1f4, 0x00000776},
526 {0x0000b1f8, 0x00000776},
527 {0x0000b1fc, 0x00000776},
528};
529
530static const u32 ar9300_2p0_mac_postamble[][5] = {
531 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
532 {0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160},
533 {0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c},
534 {0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38},
535 {0x00008014, 0x03e803e8, 0x07d007d0, 0x10801600, 0x08400b00},
536 {0x0000801c, 0x128d8027, 0x128d804f, 0x12e00057, 0x12e0002b},
537 {0x00008120, 0x08f04800, 0x08f04800, 0x08f04810, 0x08f04810},
538 {0x000081d0, 0x00003210, 0x00003210, 0x0000320a, 0x0000320a},
539 {0x00008318, 0x00003e80, 0x00007d00, 0x00006880, 0x00003440},
540};
541
542static const u32 ar9300_2p0_soc_postamble[][5] = {
543 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
544 {0x00007010, 0x00000023, 0x00000023, 0x00000023, 0x00000023},
545};
546
547static const u32 ar9200_merlin_2p0_radio_core[][2] = {
548 /* Addr allmodes */
549 {0x00007800, 0x00040000},
550 {0x00007804, 0xdb005012},
551 {0x00007808, 0x04924914},
552 {0x0000780c, 0x21084210},
553 {0x00007810, 0x6d801300},
554 {0x00007814, 0x0019beff},
555 {0x00007818, 0x07e41000},
556 {0x0000781c, 0x00392000},
557 {0x00007820, 0x92592480},
558 {0x00007824, 0x00040000},
559 {0x00007828, 0xdb005012},
560 {0x0000782c, 0x04924914},
561 {0x00007830, 0x21084210},
562 {0x00007834, 0x6d801300},
563 {0x00007838, 0x0019beff},
564 {0x0000783c, 0x07e40000},
565 {0x00007840, 0x00392000},
566 {0x00007844, 0x92592480},
567 {0x00007848, 0x00100000},
568 {0x0000784c, 0x773f0567},
569 {0x00007850, 0x54214514},
570 {0x00007854, 0x12035828},
571 {0x00007858, 0x92592692},
572 {0x0000785c, 0x00000000},
573 {0x00007860, 0x56400000},
574 {0x00007864, 0x0a8e370e},
575 {0x00007868, 0xc0102850},
576 {0x0000786c, 0x812d4000},
577 {0x00007870, 0x807ec400},
578 {0x00007874, 0x001b6db0},
579 {0x00007878, 0x00376b63},
580 {0x0000787c, 0x06db6db6},
581 {0x00007880, 0x006d8000},
582 {0x00007884, 0xffeffffe},
583 {0x00007888, 0xffeffffe},
584 {0x0000788c, 0x00010000},
585 {0x00007890, 0x02060aeb},
586 {0x00007894, 0x5a108000},
587};
588
589static const u32 ar9300_2p0_baseband_postamble[][5] = {
590 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
591 {0x00009810, 0xd00a8005, 0xd00a8005, 0xd00a8011, 0xd00a8011},
592 {0x00009820, 0x206a022e, 0x206a022e, 0x206a012e, 0x206a012e},
593 {0x00009824, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0},
594 {0x00009828, 0x06903081, 0x06903081, 0x06903881, 0x06903881},
595 {0x0000982c, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4},
596 {0x00009830, 0x0000059c, 0x0000059c, 0x0000119c, 0x0000119c},
597 {0x00009c00, 0x00000044, 0x000000c4, 0x000000c4, 0x00000044},
598 {0x00009e00, 0x0372161e, 0x0372161e, 0x037216a0, 0x037216a0},
599 {0x00009e04, 0x00802020, 0x00802020, 0x00802020, 0x00802020},
600 {0x00009e0c, 0x6c4000e2, 0x6d4000e2, 0x6d4000e2, 0x6c4000e2},
601 {0x00009e10, 0x7ec88d2e, 0x7ec88d2e, 0x7ec84d2e, 0x7ec84d2e},
602 {0x00009e14, 0x31395d5e, 0x3139605e, 0x3139605e, 0x31395d5e},
603 {0x00009e18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
604 {0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c},
605 {0x00009e20, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce},
606 {0x00009e2c, 0x0000001c, 0x0000001c, 0x00000021, 0x00000021},
607 {0x00009e44, 0x02321e27, 0x02321e27, 0x02291e27, 0x02291e27},
608 {0x00009e48, 0x5030201a, 0x5030201a, 0x50302012, 0x50302012},
609 {0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000},
610 {0x0000a204, 0x000037c0, 0x000037c4, 0x000037c4, 0x000037c0},
611 {0x0000a208, 0x00000104, 0x00000104, 0x00000004, 0x00000004},
612 {0x0000a230, 0x0000000a, 0x00000014, 0x00000016, 0x0000000b},
613 {0x0000a238, 0xffb81018, 0xffb81018, 0xffb81018, 0xffb81018},
614 {0x0000a250, 0x00000000, 0x00000000, 0x00000210, 0x00000108},
615 {0x0000a254, 0x000007d0, 0x00000fa0, 0x00001130, 0x00000898},
616 {0x0000a258, 0x02020002, 0x02020002, 0x02020002, 0x02020002},
617 {0x0000a25c, 0x01000e0e, 0x01000e0e, 0x01000e0e, 0x01000e0e},
618 {0x0000a260, 0x0a021501, 0x0a021501, 0x3a021501, 0x3a021501},
619 {0x0000a264, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e},
620 {0x0000a280, 0x00000007, 0x00000007, 0x0000000b, 0x0000000b},
621 {0x0000a284, 0x00000000, 0x00000000, 0x00000150, 0x00000150},
622 {0x0000a288, 0x00000110, 0x00000110, 0x00000110, 0x00000110},
623 {0x0000a28c, 0x00022222, 0x00022222, 0x00022222, 0x00022222},
624 {0x0000a2c4, 0x00158d18, 0x00158d18, 0x00158d18, 0x00158d18},
625 {0x0000a2d0, 0x00071981, 0x00071981, 0x00071981, 0x00071982},
626 {0x0000a2d8, 0xf999a83a, 0xf999a83a, 0xf999a83a, 0xf999a83a},
627 {0x0000a358, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
628 {0x0000a830, 0x0000019c, 0x0000019c, 0x0000019c, 0x0000019c},
629 {0x0000ae04, 0x00800000, 0x00800000, 0x00800000, 0x00800000},
630 {0x0000ae18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
631 {0x0000ae1c, 0x0000019c, 0x0000019c, 0x0000019c, 0x0000019c},
632 {0x0000ae20, 0x000001b5, 0x000001b5, 0x000001ce, 0x000001ce},
633 {0x0000b284, 0x00000000, 0x00000000, 0x00000150, 0x00000150},
634 {0x0000b830, 0x0000019c, 0x0000019c, 0x0000019c, 0x0000019c},
635 {0x0000be04, 0x00800000, 0x00800000, 0x00800000, 0x00800000},
636 {0x0000be18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
637 {0x0000be1c, 0x0000019c, 0x0000019c, 0x0000019c, 0x0000019c},
638 {0x0000be20, 0x000001b5, 0x000001b5, 0x000001ce, 0x000001ce},
639 {0x0000c284, 0x00000000, 0x00000000, 0x00000150, 0x00000150},
640};
641
642static const u32 ar9300_2p0_baseband_core[][2] = {
643 /* Addr allmodes */
644 {0x00009800, 0xafe68e30},
645 {0x00009804, 0xfd14e000},
646 {0x00009808, 0x9c0a9f6b},
647 {0x0000980c, 0x04900000},
648 {0x00009814, 0x9280c00a},
649 {0x00009818, 0x00000000},
650 {0x0000981c, 0x00020028},
651 {0x00009834, 0x5f3ca3de},
652 {0x00009838, 0x0108ecff},
653 {0x0000983c, 0x14750600},
654 {0x00009880, 0x201fff00},
655 {0x00009884, 0x00001042},
656 {0x000098a4, 0x00200400},
657 {0x000098b0, 0x52440bbe},
658 {0x000098d0, 0x004b6a8e},
659 {0x000098d4, 0x00000820},
660 {0x000098dc, 0x00000000},
661 {0x000098f0, 0x00000000},
662 {0x000098f4, 0x00000000},
663 {0x00009c04, 0xff55ff55},
664 {0x00009c08, 0x0320ff55},
665 {0x00009c0c, 0x00000000},
666 {0x00009c10, 0x00000000},
667 {0x00009c14, 0x00046384},
668 {0x00009c18, 0x05b6b440},
669 {0x00009c1c, 0x00b6b440},
670 {0x00009d00, 0xc080a333},
671 {0x00009d04, 0x40206c10},
672 {0x00009d08, 0x009c4060},
673 {0x00009d0c, 0x9883800a},
674 {0x00009d10, 0x01834061},
675 {0x00009d14, 0x00c0040b},
676 {0x00009d18, 0x00000000},
677 {0x00009e08, 0x0038230c},
678 {0x00009e24, 0x990bb515},
679 {0x00009e28, 0x0c6f0000},
680 {0x00009e30, 0x06336f77},
681 {0x00009e34, 0x6af6532f},
682 {0x00009e38, 0x0cc80c00},
683 {0x00009e3c, 0xcf946222},
684 {0x00009e40, 0x0d261820},
685 {0x00009e4c, 0x00001004},
686 {0x00009e50, 0x00ff03f1},
687 {0x00009e54, 0x00000000},
688 {0x00009fc0, 0x803e4788},
689 {0x00009fc4, 0x0001efb5},
690 {0x00009fcc, 0x40000014},
691 {0x00009fd0, 0x01193b93},
692 {0x0000a20c, 0x00000000},
693 {0x0000a220, 0x00000000},
694 {0x0000a224, 0x00000000},
695 {0x0000a228, 0x10002310},
696 {0x0000a22c, 0x01036a1e},
697 {0x0000a234, 0x10000fff},
698 {0x0000a23c, 0x00000000},
699 {0x0000a244, 0x0c000000},
700 {0x0000a2a0, 0x00000001},
701 {0x0000a2c0, 0x00000001},
702 {0x0000a2c8, 0x00000000},
703 {0x0000a2cc, 0x18c43433},
704 {0x0000a2d4, 0x00000000},
705 {0x0000a2dc, 0x00000000},
706 {0x0000a2e0, 0x00000000},
707 {0x0000a2e4, 0x00000000},
708 {0x0000a2e8, 0x00000000},
709 {0x0000a2ec, 0x00000000},
710 {0x0000a2f0, 0x00000000},
711 {0x0000a2f4, 0x00000000},
712 {0x0000a2f8, 0x00000000},
713 {0x0000a344, 0x00000000},
714 {0x0000a34c, 0x00000000},
715 {0x0000a350, 0x0000a000},
716 {0x0000a364, 0x00000000},
717 {0x0000a370, 0x00000000},
718 {0x0000a390, 0x00000001},
719 {0x0000a394, 0x00000444},
720 {0x0000a398, 0x001f0e0f},
721 {0x0000a39c, 0x0075393f},
722 {0x0000a3a0, 0xb79f6427},
723 {0x0000a3a4, 0x00000000},
724 {0x0000a3a8, 0xaaaaaaaa},
725 {0x0000a3ac, 0x3c466478},
726 {0x0000a3c0, 0x20202020},
727 {0x0000a3c4, 0x22222220},
728 {0x0000a3c8, 0x20200020},
729 {0x0000a3cc, 0x20202020},
730 {0x0000a3d0, 0x20202020},
731 {0x0000a3d4, 0x20202020},
732 {0x0000a3d8, 0x20202020},
733 {0x0000a3dc, 0x20202020},
734 {0x0000a3e0, 0x20202020},
735 {0x0000a3e4, 0x20202020},
736 {0x0000a3e8, 0x20202020},
737 {0x0000a3ec, 0x20202020},
738 {0x0000a3f0, 0x00000000},
739 {0x0000a3f4, 0x00000246},
740 {0x0000a3f8, 0x0cdbd380},
741 {0x0000a3fc, 0x000f0f01},
742 {0x0000a400, 0x8fa91f01},
743 {0x0000a404, 0x00000000},
744 {0x0000a408, 0x0e79e5c6},
745 {0x0000a40c, 0x00820820},
746 {0x0000a414, 0x1ce739ce},
747 {0x0000a418, 0x2d001dce},
748 {0x0000a41c, 0x1ce739ce},
749 {0x0000a420, 0x000001ce},
750 {0x0000a424, 0x1ce739ce},
751 {0x0000a428, 0x000001ce},
752 {0x0000a42c, 0x1ce739ce},
753 {0x0000a430, 0x1ce739ce},
754 {0x0000a434, 0x00000000},
755 {0x0000a438, 0x00001801},
756 {0x0000a43c, 0x00000000},
757 {0x0000a440, 0x00000000},
758 {0x0000a444, 0x00000000},
759 {0x0000a448, 0x04000080},
760 {0x0000a44c, 0x00000001},
761 {0x0000a450, 0x00010000},
762 {0x0000a458, 0x00000000},
763 {0x0000a600, 0x00000000},
764 {0x0000a604, 0x00000000},
765 {0x0000a608, 0x00000000},
766 {0x0000a60c, 0x00000000},
767 {0x0000a610, 0x00000000},
768 {0x0000a614, 0x00000000},
769 {0x0000a618, 0x00000000},
770 {0x0000a61c, 0x00000000},
771 {0x0000a620, 0x00000000},
772 {0x0000a624, 0x00000000},
773 {0x0000a628, 0x00000000},
774 {0x0000a62c, 0x00000000},
775 {0x0000a630, 0x00000000},
776 {0x0000a634, 0x00000000},
777 {0x0000a638, 0x00000000},
778 {0x0000a63c, 0x00000000},
779 {0x0000a640, 0x00000000},
780 {0x0000a644, 0x3fad9d74},
781 {0x0000a648, 0x0048060a},
782 {0x0000a64c, 0x00000637},
783 {0x0000a670, 0x03020100},
784 {0x0000a674, 0x09080504},
785 {0x0000a678, 0x0d0c0b0a},
786 {0x0000a67c, 0x13121110},
787 {0x0000a680, 0x31301514},
788 {0x0000a684, 0x35343332},
789 {0x0000a688, 0x00000036},
790 {0x0000a690, 0x00000838},
791 {0x0000a7c0, 0x00000000},
792 {0x0000a7c4, 0xfffffffc},
793 {0x0000a7c8, 0x00000000},
794 {0x0000a7cc, 0x00000000},
795 {0x0000a7d0, 0x00000000},
796 {0x0000a7d4, 0x00000004},
797 {0x0000a7dc, 0x00000001},
798 {0x0000a8d0, 0x004b6a8e},
799 {0x0000a8d4, 0x00000820},
800 {0x0000a8dc, 0x00000000},
801 {0x0000a8f0, 0x00000000},
802 {0x0000a8f4, 0x00000000},
803 {0x0000b2d0, 0x00000080},
804 {0x0000b2d4, 0x00000000},
805 {0x0000b2dc, 0x00000000},
806 {0x0000b2e0, 0x00000000},
807 {0x0000b2e4, 0x00000000},
808 {0x0000b2e8, 0x00000000},
809 {0x0000b2ec, 0x00000000},
810 {0x0000b2f0, 0x00000000},
811 {0x0000b2f4, 0x00000000},
812 {0x0000b2f8, 0x00000000},
813 {0x0000b408, 0x0e79e5c0},
814 {0x0000b40c, 0x00820820},
815 {0x0000b420, 0x00000000},
816 {0x0000b8d0, 0x004b6a8e},
817 {0x0000b8d4, 0x00000820},
818 {0x0000b8dc, 0x00000000},
819 {0x0000b8f0, 0x00000000},
820 {0x0000b8f4, 0x00000000},
821 {0x0000c2d0, 0x00000080},
822 {0x0000c2d4, 0x00000000},
823 {0x0000c2dc, 0x00000000},
824 {0x0000c2e0, 0x00000000},
825 {0x0000c2e4, 0x00000000},
826 {0x0000c2e8, 0x00000000},
827 {0x0000c2ec, 0x00000000},
828 {0x0000c2f0, 0x00000000},
829 {0x0000c2f4, 0x00000000},
830 {0x0000c2f8, 0x00000000},
831 {0x0000c408, 0x0e79e5c0},
832 {0x0000c40c, 0x00820820},
833 {0x0000c420, 0x00000000},
834};
835
836static const u32 ar9300Modes_high_power_tx_gain_table_2p0[][5] = {
837 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
838 {0x0000a410, 0x000050d8, 0x000050d8, 0x000050d9, 0x000050d9},
839 {0x0000a500, 0x00002220, 0x00002220, 0x00000000, 0x00000000},
840 {0x0000a504, 0x04002222, 0x04002222, 0x04000002, 0x04000002},
841 {0x0000a508, 0x09002421, 0x09002421, 0x08000004, 0x08000004},
842 {0x0000a50c, 0x0d002621, 0x0d002621, 0x0b000200, 0x0b000200},
843 {0x0000a510, 0x13004620, 0x13004620, 0x0f000202, 0x0f000202},
844 {0x0000a514, 0x19004a20, 0x19004a20, 0x11000400, 0x11000400},
845 {0x0000a518, 0x1d004e20, 0x1d004e20, 0x15000402, 0x15000402},
846 {0x0000a51c, 0x21005420, 0x21005420, 0x19000404, 0x19000404},
847 {0x0000a520, 0x26005e20, 0x26005e20, 0x1b000603, 0x1b000603},
848 {0x0000a524, 0x2b005e40, 0x2b005e40, 0x1f000a02, 0x1f000a02},
849 {0x0000a528, 0x2f005e42, 0x2f005e42, 0x23000a04, 0x23000a04},
850 {0x0000a52c, 0x33005e44, 0x33005e44, 0x26000a20, 0x26000a20},
851 {0x0000a530, 0x38005e65, 0x38005e65, 0x2a000e20, 0x2a000e20},
852 {0x0000a534, 0x3c005e69, 0x3c005e69, 0x2e000e22, 0x2e000e22},
853 {0x0000a538, 0x40005e6b, 0x40005e6b, 0x31000e24, 0x31000e24},
854 {0x0000a53c, 0x44005e6d, 0x44005e6d, 0x34001640, 0x34001640},
855 {0x0000a540, 0x49005e72, 0x49005e72, 0x38001660, 0x38001660},
856 {0x0000a544, 0x4e005eb2, 0x4e005eb2, 0x3b001861, 0x3b001861},
857 {0x0000a548, 0x53005f12, 0x53005f12, 0x3e001a81, 0x3e001a81},
858 {0x0000a54c, 0x59025eb5, 0x59025eb5, 0x42001a83, 0x42001a83},
859 {0x0000a550, 0x5e025f12, 0x5e025f12, 0x44001c84, 0x44001c84},
860 {0x0000a554, 0x61027f12, 0x61027f12, 0x48001ce3, 0x48001ce3},
861 {0x0000a558, 0x6702bf12, 0x6702bf12, 0x4c001ce5, 0x4c001ce5},
862 {0x0000a55c, 0x6b02bf14, 0x6b02bf14, 0x50001ce9, 0x50001ce9},
863 {0x0000a560, 0x6f02bf16, 0x6f02bf16, 0x54001ceb, 0x54001ceb},
864 {0x0000a564, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
865 {0x0000a568, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
866 {0x0000a56c, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
867 {0x0000a570, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
868 {0x0000a574, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
869 {0x0000a578, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
870 {0x0000a57c, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
871 {0x0000a580, 0x00802220, 0x00802220, 0x00800000, 0x00800000},
872 {0x0000a584, 0x04802222, 0x04802222, 0x04800002, 0x04800002},
873 {0x0000a588, 0x09802421, 0x09802421, 0x08800004, 0x08800004},
874 {0x0000a58c, 0x0d802621, 0x0d802621, 0x0b800200, 0x0b800200},
875 {0x0000a590, 0x13804620, 0x13804620, 0x0f800202, 0x0f800202},
876 {0x0000a594, 0x19804a20, 0x19804a20, 0x11800400, 0x11800400},
877 {0x0000a598, 0x1d804e20, 0x1d804e20, 0x15800402, 0x15800402},
878 {0x0000a59c, 0x21805420, 0x21805420, 0x19800404, 0x19800404},
879 {0x0000a5a0, 0x26805e20, 0x26805e20, 0x1b800603, 0x1b800603},
880 {0x0000a5a4, 0x2b805e40, 0x2b805e40, 0x1f800a02, 0x1f800a02},
881 {0x0000a5a8, 0x2f805e42, 0x2f805e42, 0x23800a04, 0x23800a04},
882 {0x0000a5ac, 0x33805e44, 0x33805e44, 0x26800a20, 0x26800a20},
883 {0x0000a5b0, 0x38805e65, 0x38805e65, 0x2a800e20, 0x2a800e20},
884 {0x0000a5b4, 0x3c805e69, 0x3c805e69, 0x2e800e22, 0x2e800e22},
885 {0x0000a5b8, 0x40805e6b, 0x40805e6b, 0x31800e24, 0x31800e24},
886 {0x0000a5bc, 0x44805e6d, 0x44805e6d, 0x34801640, 0x34801640},
887 {0x0000a5c0, 0x49805e72, 0x49805e72, 0x38801660, 0x38801660},
888 {0x0000a5c4, 0x4e805eb2, 0x4e805eb2, 0x3b801861, 0x3b801861},
889 {0x0000a5c8, 0x53805f12, 0x53805f12, 0x3e801a81, 0x3e801a81},
890 {0x0000a5cc, 0x59825eb2, 0x59825eb2, 0x42801a83, 0x42801a83},
891 {0x0000a5d0, 0x5e825f12, 0x5e825f12, 0x44801c84, 0x44801c84},
892 {0x0000a5d4, 0x61827f12, 0x61827f12, 0x48801ce3, 0x48801ce3},
893 {0x0000a5d8, 0x6782bf12, 0x6782bf12, 0x4c801ce5, 0x4c801ce5},
894 {0x0000a5dc, 0x6b82bf14, 0x6b82bf14, 0x50801ce9, 0x50801ce9},
895 {0x0000a5e0, 0x6f82bf16, 0x6f82bf16, 0x54801ceb, 0x54801ceb},
896 {0x0000a5e4, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
897 {0x0000a5e8, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
898 {0x0000a5ec, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
899 {0x0000a5f0, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
900 {0x0000a5f4, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
901 {0x0000a5f8, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
902 {0x0000a5fc, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
903 {0x00016044, 0x056db2e6, 0x056db2e6, 0x056db2e6, 0x056db2e6},
904 {0x00016048, 0xae480001, 0xae480001, 0xae480001, 0xae480001},
905 {0x00016068, 0x6eb6db6c, 0x6eb6db6c, 0x6eb6db6c, 0x6eb6db6c},
906 {0x00016444, 0x056db2e6, 0x056db2e6, 0x056db2e6, 0x056db2e6},
907 {0x00016448, 0xae480001, 0xae480001, 0xae480001, 0xae480001},
908 {0x00016468, 0x6eb6db6c, 0x6eb6db6c, 0x6eb6db6c, 0x6eb6db6c},
909 {0x00016844, 0x056db2e6, 0x056db2e6, 0x056db2e6, 0x056db2e6},
910 {0x00016848, 0xae480001, 0xae480001, 0xae480001, 0xae480001},
911 {0x00016868, 0x6eb6db6c, 0x6eb6db6c, 0x6eb6db6c, 0x6eb6db6c},
912};
913
914static const u32 ar9300Modes_high_ob_db_tx_gain_table_2p0[][5] = {
915 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
916 {0x0000a410, 0x000050d8, 0x000050d8, 0x000050d9, 0x000050d9},
917 {0x0000a500, 0x00002220, 0x00002220, 0x00000000, 0x00000000},
918 {0x0000a504, 0x04002222, 0x04002222, 0x04000002, 0x04000002},
919 {0x0000a508, 0x09002421, 0x09002421, 0x08000004, 0x08000004},
920 {0x0000a50c, 0x0d002621, 0x0d002621, 0x0b000200, 0x0b000200},
921 {0x0000a510, 0x13004620, 0x13004620, 0x0f000202, 0x0f000202},
922 {0x0000a514, 0x19004a20, 0x19004a20, 0x11000400, 0x11000400},
923 {0x0000a518, 0x1d004e20, 0x1d004e20, 0x15000402, 0x15000402},
924 {0x0000a51c, 0x21005420, 0x21005420, 0x19000404, 0x19000404},
925 {0x0000a520, 0x26005e20, 0x26005e20, 0x1b000603, 0x1b000603},
926 {0x0000a524, 0x2b005e40, 0x2b005e40, 0x1f000a02, 0x1f000a02},
927 {0x0000a528, 0x2f005e42, 0x2f005e42, 0x23000a04, 0x23000a04},
928 {0x0000a52c, 0x33005e44, 0x33005e44, 0x26000a20, 0x26000a20},
929 {0x0000a530, 0x38005e65, 0x38005e65, 0x2a000e20, 0x2a000e20},
930 {0x0000a534, 0x3c005e69, 0x3c005e69, 0x2e000e22, 0x2e000e22},
931 {0x0000a538, 0x40005e6b, 0x40005e6b, 0x31000e24, 0x31000e24},
932 {0x0000a53c, 0x44005e6d, 0x44005e6d, 0x34001640, 0x34001640},
933 {0x0000a540, 0x49005e72, 0x49005e72, 0x38001660, 0x38001660},
934 {0x0000a544, 0x4e005eb2, 0x4e005eb2, 0x3b001861, 0x3b001861},
935 {0x0000a548, 0x53005f12, 0x53005f12, 0x3e001a81, 0x3e001a81},
936 {0x0000a54c, 0x59025eb5, 0x59025eb5, 0x42001a83, 0x42001a83},
937 {0x0000a550, 0x5e025f12, 0x5e025f12, 0x44001c84, 0x44001c84},
938 {0x0000a554, 0x61027f12, 0x61027f12, 0x48001ce3, 0x48001ce3},
939 {0x0000a558, 0x6702bf12, 0x6702bf12, 0x4c001ce5, 0x4c001ce5},
940 {0x0000a55c, 0x6b02bf14, 0x6b02bf14, 0x50001ce9, 0x50001ce9},
941 {0x0000a560, 0x6f02bf16, 0x6f02bf16, 0x54001ceb, 0x54001ceb},
942 {0x0000a564, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
943 {0x0000a568, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
944 {0x0000a56c, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
945 {0x0000a570, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
946 {0x0000a574, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
947 {0x0000a578, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
948 {0x0000a57c, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
949 {0x0000a580, 0x00802220, 0x00802220, 0x00800000, 0x00800000},
950 {0x0000a584, 0x04802222, 0x04802222, 0x04800002, 0x04800002},
951 {0x0000a588, 0x09802421, 0x09802421, 0x08800004, 0x08800004},
952 {0x0000a58c, 0x0d802621, 0x0d802621, 0x0b800200, 0x0b800200},
953 {0x0000a590, 0x13804620, 0x13804620, 0x0f800202, 0x0f800202},
954 {0x0000a594, 0x19804a20, 0x19804a20, 0x11800400, 0x11800400},
955 {0x0000a598, 0x1d804e20, 0x1d804e20, 0x15800402, 0x15800402},
956 {0x0000a59c, 0x21805420, 0x21805420, 0x19800404, 0x19800404},
957 {0x0000a5a0, 0x26805e20, 0x26805e20, 0x1b800603, 0x1b800603},
958 {0x0000a5a4, 0x2b805e40, 0x2b805e40, 0x1f800a02, 0x1f800a02},
959 {0x0000a5a8, 0x2f805e42, 0x2f805e42, 0x23800a04, 0x23800a04},
960 {0x0000a5ac, 0x33805e44, 0x33805e44, 0x26800a20, 0x26800a20},
961 {0x0000a5b0, 0x38805e65, 0x38805e65, 0x2a800e20, 0x2a800e20},
962 {0x0000a5b4, 0x3c805e69, 0x3c805e69, 0x2e800e22, 0x2e800e22},
963 {0x0000a5b8, 0x40805e6b, 0x40805e6b, 0x31800e24, 0x31800e24},
964 {0x0000a5bc, 0x44805e6d, 0x44805e6d, 0x34801640, 0x34801640},
965 {0x0000a5c0, 0x49805e72, 0x49805e72, 0x38801660, 0x38801660},
966 {0x0000a5c4, 0x4e805eb2, 0x4e805eb2, 0x3b801861, 0x3b801861},
967 {0x0000a5c8, 0x53805f12, 0x53805f12, 0x3e801a81, 0x3e801a81},
968 {0x0000a5cc, 0x59825eb2, 0x59825eb2, 0x42801a83, 0x42801a83},
969 {0x0000a5d0, 0x5e825f12, 0x5e825f12, 0x44801c84, 0x44801c84},
970 {0x0000a5d4, 0x61827f12, 0x61827f12, 0x48801ce3, 0x48801ce3},
971 {0x0000a5d8, 0x6782bf12, 0x6782bf12, 0x4c801ce5, 0x4c801ce5},
972 {0x0000a5dc, 0x6b82bf14, 0x6b82bf14, 0x50801ce9, 0x50801ce9},
973 {0x0000a5e0, 0x6f82bf16, 0x6f82bf16, 0x54801ceb, 0x54801ceb},
974 {0x0000a5e4, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
975 {0x0000a5e8, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
976 {0x0000a5ec, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
977 {0x0000a5f0, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
978 {0x0000a5f4, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
979 {0x0000a5f8, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
980 {0x0000a5fc, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
981 {0x00016044, 0x056db2e4, 0x056db2e4, 0x056db2e4, 0x056db2e4},
982 {0x00016048, 0x8e480001, 0x8e480001, 0x8e480001, 0x8e480001},
983 {0x00016068, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
984 {0x00016444, 0x056db2e4, 0x056db2e4, 0x056db2e4, 0x056db2e4},
985 {0x00016448, 0x8e480001, 0x8e480001, 0x8e480001, 0x8e480001},
986 {0x00016468, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
987 {0x00016844, 0x056db2e4, 0x056db2e4, 0x056db2e4, 0x056db2e4},
988 {0x00016848, 0x8e480001, 0x8e480001, 0x8e480001, 0x8e480001},
989 {0x00016868, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
990};
991
992static const u32 ar9300Common_rx_gain_table_2p0[][2] = {
993 /* Addr allmodes */
994 {0x0000a000, 0x00010000},
995 {0x0000a004, 0x00030002},
996 {0x0000a008, 0x00050004},
997 {0x0000a00c, 0x00810080},
998 {0x0000a010, 0x00830082},
999 {0x0000a014, 0x01810180},
1000 {0x0000a018, 0x01830182},
1001 {0x0000a01c, 0x01850184},
1002 {0x0000a020, 0x01890188},
1003 {0x0000a024, 0x018b018a},
1004 {0x0000a028, 0x018d018c},
1005 {0x0000a02c, 0x01910190},
1006 {0x0000a030, 0x01930192},
1007 {0x0000a034, 0x01950194},
1008 {0x0000a038, 0x038a0196},
1009 {0x0000a03c, 0x038c038b},
1010 {0x0000a040, 0x0390038d},
1011 {0x0000a044, 0x03920391},
1012 {0x0000a048, 0x03940393},
1013 {0x0000a04c, 0x03960395},
1014 {0x0000a050, 0x00000000},
1015 {0x0000a054, 0x00000000},
1016 {0x0000a058, 0x00000000},
1017 {0x0000a05c, 0x00000000},
1018 {0x0000a060, 0x00000000},
1019 {0x0000a064, 0x00000000},
1020 {0x0000a068, 0x00000000},
1021 {0x0000a06c, 0x00000000},
1022 {0x0000a070, 0x00000000},
1023 {0x0000a074, 0x00000000},
1024 {0x0000a078, 0x00000000},
1025 {0x0000a07c, 0x00000000},
1026 {0x0000a080, 0x22222229},
1027 {0x0000a084, 0x1d1d1d1d},
1028 {0x0000a088, 0x1d1d1d1d},
1029 {0x0000a08c, 0x1d1d1d1d},
1030 {0x0000a090, 0x171d1d1d},
1031 {0x0000a094, 0x11111717},
1032 {0x0000a098, 0x00030311},
1033 {0x0000a09c, 0x00000000},
1034 {0x0000a0a0, 0x00000000},
1035 {0x0000a0a4, 0x00000000},
1036 {0x0000a0a8, 0x00000000},
1037 {0x0000a0ac, 0x00000000},
1038 {0x0000a0b0, 0x00000000},
1039 {0x0000a0b4, 0x00000000},
1040 {0x0000a0b8, 0x00000000},
1041 {0x0000a0bc, 0x00000000},
1042 {0x0000a0c0, 0x001f0000},
1043 {0x0000a0c4, 0x01000101},
1044 {0x0000a0c8, 0x011e011f},
1045 {0x0000a0cc, 0x011c011d},
1046 {0x0000a0d0, 0x02030204},
1047 {0x0000a0d4, 0x02010202},
1048 {0x0000a0d8, 0x021f0200},
1049 {0x0000a0dc, 0x0302021e},
1050 {0x0000a0e0, 0x03000301},
1051 {0x0000a0e4, 0x031e031f},
1052 {0x0000a0e8, 0x0402031d},
1053 {0x0000a0ec, 0x04000401},
1054 {0x0000a0f0, 0x041e041f},
1055 {0x0000a0f4, 0x0502041d},
1056 {0x0000a0f8, 0x05000501},
1057 {0x0000a0fc, 0x051e051f},
1058 {0x0000a100, 0x06010602},
1059 {0x0000a104, 0x061f0600},
1060 {0x0000a108, 0x061d061e},
1061 {0x0000a10c, 0x07020703},
1062 {0x0000a110, 0x07000701},
1063 {0x0000a114, 0x00000000},
1064 {0x0000a118, 0x00000000},
1065 {0x0000a11c, 0x00000000},
1066 {0x0000a120, 0x00000000},
1067 {0x0000a124, 0x00000000},
1068 {0x0000a128, 0x00000000},
1069 {0x0000a12c, 0x00000000},
1070 {0x0000a130, 0x00000000},
1071 {0x0000a134, 0x00000000},
1072 {0x0000a138, 0x00000000},
1073 {0x0000a13c, 0x00000000},
1074 {0x0000a140, 0x001f0000},
1075 {0x0000a144, 0x01000101},
1076 {0x0000a148, 0x011e011f},
1077 {0x0000a14c, 0x011c011d},
1078 {0x0000a150, 0x02030204},
1079 {0x0000a154, 0x02010202},
1080 {0x0000a158, 0x021f0200},
1081 {0x0000a15c, 0x0302021e},
1082 {0x0000a160, 0x03000301},
1083 {0x0000a164, 0x031e031f},
1084 {0x0000a168, 0x0402031d},
1085 {0x0000a16c, 0x04000401},
1086 {0x0000a170, 0x041e041f},
1087 {0x0000a174, 0x0502041d},
1088 {0x0000a178, 0x05000501},
1089 {0x0000a17c, 0x051e051f},
1090 {0x0000a180, 0x06010602},
1091 {0x0000a184, 0x061f0600},
1092 {0x0000a188, 0x061d061e},
1093 {0x0000a18c, 0x07020703},
1094 {0x0000a190, 0x07000701},
1095 {0x0000a194, 0x00000000},
1096 {0x0000a198, 0x00000000},
1097 {0x0000a19c, 0x00000000},
1098 {0x0000a1a0, 0x00000000},
1099 {0x0000a1a4, 0x00000000},
1100 {0x0000a1a8, 0x00000000},
1101 {0x0000a1ac, 0x00000000},
1102 {0x0000a1b0, 0x00000000},
1103 {0x0000a1b4, 0x00000000},
1104 {0x0000a1b8, 0x00000000},
1105 {0x0000a1bc, 0x00000000},
1106 {0x0000a1c0, 0x00000000},
1107 {0x0000a1c4, 0x00000000},
1108 {0x0000a1c8, 0x00000000},
1109 {0x0000a1cc, 0x00000000},
1110 {0x0000a1d0, 0x00000000},
1111 {0x0000a1d4, 0x00000000},
1112 {0x0000a1d8, 0x00000000},
1113 {0x0000a1dc, 0x00000000},
1114 {0x0000a1e0, 0x00000000},
1115 {0x0000a1e4, 0x00000000},
1116 {0x0000a1e8, 0x00000000},
1117 {0x0000a1ec, 0x00000000},
1118 {0x0000a1f0, 0x00000396},
1119 {0x0000a1f4, 0x00000396},
1120 {0x0000a1f8, 0x00000396},
1121 {0x0000a1fc, 0x00000196},
1122 {0x0000b000, 0x00010000},
1123 {0x0000b004, 0x00030002},
1124 {0x0000b008, 0x00050004},
1125 {0x0000b00c, 0x00810080},
1126 {0x0000b010, 0x00830082},
1127 {0x0000b014, 0x01810180},
1128 {0x0000b018, 0x01830182},
1129 {0x0000b01c, 0x01850184},
1130 {0x0000b020, 0x02810280},
1131 {0x0000b024, 0x02830282},
1132 {0x0000b028, 0x02850284},
1133 {0x0000b02c, 0x02890288},
1134 {0x0000b030, 0x028b028a},
1135 {0x0000b034, 0x0388028c},
1136 {0x0000b038, 0x038a0389},
1137 {0x0000b03c, 0x038c038b},
1138 {0x0000b040, 0x0390038d},
1139 {0x0000b044, 0x03920391},
1140 {0x0000b048, 0x03940393},
1141 {0x0000b04c, 0x03960395},
1142 {0x0000b050, 0x00000000},
1143 {0x0000b054, 0x00000000},
1144 {0x0000b058, 0x00000000},
1145 {0x0000b05c, 0x00000000},
1146 {0x0000b060, 0x00000000},
1147 {0x0000b064, 0x00000000},
1148 {0x0000b068, 0x00000000},
1149 {0x0000b06c, 0x00000000},
1150 {0x0000b070, 0x00000000},
1151 {0x0000b074, 0x00000000},
1152 {0x0000b078, 0x00000000},
1153 {0x0000b07c, 0x00000000},
1154 {0x0000b080, 0x32323232},
1155 {0x0000b084, 0x2f2f3232},
1156 {0x0000b088, 0x23282a2d},
1157 {0x0000b08c, 0x1c1e2123},
1158 {0x0000b090, 0x14171919},
1159 {0x0000b094, 0x0e0e1214},
1160 {0x0000b098, 0x03050707},
1161 {0x0000b09c, 0x00030303},
1162 {0x0000b0a0, 0x00000000},
1163 {0x0000b0a4, 0x00000000},
1164 {0x0000b0a8, 0x00000000},
1165 {0x0000b0ac, 0x00000000},
1166 {0x0000b0b0, 0x00000000},
1167 {0x0000b0b4, 0x00000000},
1168 {0x0000b0b8, 0x00000000},
1169 {0x0000b0bc, 0x00000000},
1170 {0x0000b0c0, 0x003f0020},
1171 {0x0000b0c4, 0x00400041},
1172 {0x0000b0c8, 0x0140005f},
1173 {0x0000b0cc, 0x0160015f},
1174 {0x0000b0d0, 0x017e017f},
1175 {0x0000b0d4, 0x02410242},
1176 {0x0000b0d8, 0x025f0240},
1177 {0x0000b0dc, 0x027f0260},
1178 {0x0000b0e0, 0x0341027e},
1179 {0x0000b0e4, 0x035f0340},
1180 {0x0000b0e8, 0x037f0360},
1181 {0x0000b0ec, 0x04400441},
1182 {0x0000b0f0, 0x0460045f},
1183 {0x0000b0f4, 0x0541047f},
1184 {0x0000b0f8, 0x055f0540},
1185 {0x0000b0fc, 0x057f0560},
1186 {0x0000b100, 0x06400641},
1187 {0x0000b104, 0x0660065f},
1188 {0x0000b108, 0x067e067f},
1189 {0x0000b10c, 0x07410742},
1190 {0x0000b110, 0x075f0740},
1191 {0x0000b114, 0x077f0760},
1192 {0x0000b118, 0x07800781},
1193 {0x0000b11c, 0x07a0079f},
1194 {0x0000b120, 0x07c107bf},
1195 {0x0000b124, 0x000007c0},
1196 {0x0000b128, 0x00000000},
1197 {0x0000b12c, 0x00000000},
1198 {0x0000b130, 0x00000000},
1199 {0x0000b134, 0x00000000},
1200 {0x0000b138, 0x00000000},
1201 {0x0000b13c, 0x00000000},
1202 {0x0000b140, 0x003f0020},
1203 {0x0000b144, 0x00400041},
1204 {0x0000b148, 0x0140005f},
1205 {0x0000b14c, 0x0160015f},
1206 {0x0000b150, 0x017e017f},
1207 {0x0000b154, 0x02410242},
1208 {0x0000b158, 0x025f0240},
1209 {0x0000b15c, 0x027f0260},
1210 {0x0000b160, 0x0341027e},
1211 {0x0000b164, 0x035f0340},
1212 {0x0000b168, 0x037f0360},
1213 {0x0000b16c, 0x04400441},
1214 {0x0000b170, 0x0460045f},
1215 {0x0000b174, 0x0541047f},
1216 {0x0000b178, 0x055f0540},
1217 {0x0000b17c, 0x057f0560},
1218 {0x0000b180, 0x06400641},
1219 {0x0000b184, 0x0660065f},
1220 {0x0000b188, 0x067e067f},
1221 {0x0000b18c, 0x07410742},
1222 {0x0000b190, 0x075f0740},
1223 {0x0000b194, 0x077f0760},
1224 {0x0000b198, 0x07800781},
1225 {0x0000b19c, 0x07a0079f},
1226 {0x0000b1a0, 0x07c107bf},
1227 {0x0000b1a4, 0x000007c0},
1228 {0x0000b1a8, 0x00000000},
1229 {0x0000b1ac, 0x00000000},
1230 {0x0000b1b0, 0x00000000},
1231 {0x0000b1b4, 0x00000000},
1232 {0x0000b1b8, 0x00000000},
1233 {0x0000b1bc, 0x00000000},
1234 {0x0000b1c0, 0x00000000},
1235 {0x0000b1c4, 0x00000000},
1236 {0x0000b1c8, 0x00000000},
1237 {0x0000b1cc, 0x00000000},
1238 {0x0000b1d0, 0x00000000},
1239 {0x0000b1d4, 0x00000000},
1240 {0x0000b1d8, 0x00000000},
1241 {0x0000b1dc, 0x00000000},
1242 {0x0000b1e0, 0x00000000},
1243 {0x0000b1e4, 0x00000000},
1244 {0x0000b1e8, 0x00000000},
1245 {0x0000b1ec, 0x00000000},
1246 {0x0000b1f0, 0x00000396},
1247 {0x0000b1f4, 0x00000396},
1248 {0x0000b1f8, 0x00000396},
1249 {0x0000b1fc, 0x00000196},
1250};
1251
1252static const u32 ar9300Modes_low_ob_db_tx_gain_table_2p0[][5] = {
1253 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
1254 {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d9, 0x000050d9},
1255 {0x0000a500, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1256 {0x0000a504, 0x06000003, 0x06000003, 0x04000002, 0x04000002},
1257 {0x0000a508, 0x0a000020, 0x0a000020, 0x08000004, 0x08000004},
1258 {0x0000a50c, 0x10000023, 0x10000023, 0x0b000200, 0x0b000200},
1259 {0x0000a510, 0x16000220, 0x16000220, 0x0f000202, 0x0f000202},
1260 {0x0000a514, 0x1c000223, 0x1c000223, 0x12000400, 0x12000400},
1261 {0x0000a518, 0x21020220, 0x21020220, 0x16000402, 0x16000402},
1262 {0x0000a51c, 0x27020223, 0x27020223, 0x19000404, 0x19000404},
1263 {0x0000a520, 0x2b022220, 0x2b022220, 0x1c000603, 0x1c000603},
1264 {0x0000a524, 0x2f022222, 0x2f022222, 0x21000a02, 0x21000a02},
1265 {0x0000a528, 0x34022225, 0x34022225, 0x25000a04, 0x25000a04},
1266 {0x0000a52c, 0x3a02222a, 0x3a02222a, 0x28000a20, 0x28000a20},
1267 {0x0000a530, 0x3e02222c, 0x3e02222c, 0x2c000e20, 0x2c000e20},
1268 {0x0000a534, 0x4202242a, 0x4202242a, 0x30000e22, 0x30000e22},
1269 {0x0000a538, 0x4702244a, 0x4702244a, 0x34000e24, 0x34000e24},
1270 {0x0000a53c, 0x4b02244c, 0x4b02244c, 0x38001640, 0x38001640},
1271 {0x0000a540, 0x4e02246c, 0x4e02246c, 0x3c001660, 0x3c001660},
1272 {0x0000a544, 0x5302266c, 0x5302266c, 0x3f001861, 0x3f001861},
1273 {0x0000a548, 0x5702286c, 0x5702286c, 0x43001a81, 0x43001a81},
1274 {0x0000a54c, 0x5c04286b, 0x5c04286b, 0x47001a83, 0x47001a83},
1275 {0x0000a550, 0x61042a6c, 0x61042a6c, 0x4a001c84, 0x4a001c84},
1276 {0x0000a554, 0x66062a6c, 0x66062a6c, 0x4e001ce3, 0x4e001ce3},
1277 {0x0000a558, 0x6b062e6c, 0x6b062e6c, 0x52001ce5, 0x52001ce5},
1278 {0x0000a55c, 0x7006308c, 0x7006308c, 0x56001ce9, 0x56001ce9},
1279 {0x0000a560, 0x730a308a, 0x730a308a, 0x5a001ceb, 0x5a001ceb},
1280 {0x0000a564, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
1281 {0x0000a568, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
1282 {0x0000a56c, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
1283 {0x0000a570, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
1284 {0x0000a574, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
1285 {0x0000a578, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
1286 {0x0000a57c, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
1287 {0x0000a580, 0x00800000, 0x00800000, 0x00800000, 0x00800000},
1288 {0x0000a584, 0x06800003, 0x06800003, 0x04800002, 0x04800002},
1289 {0x0000a588, 0x0a800020, 0x0a800020, 0x08800004, 0x08800004},
1290 {0x0000a58c, 0x10800023, 0x10800023, 0x0b800200, 0x0b800200},
1291 {0x0000a590, 0x16800220, 0x16800220, 0x0f800202, 0x0f800202},
1292 {0x0000a594, 0x1c800223, 0x1c800223, 0x12800400, 0x12800400},
1293 {0x0000a598, 0x21820220, 0x21820220, 0x16800402, 0x16800402},
1294 {0x0000a59c, 0x27820223, 0x27820223, 0x19800404, 0x19800404},
1295 {0x0000a5a0, 0x2b822220, 0x2b822220, 0x1c800603, 0x1c800603},
1296 {0x0000a5a4, 0x2f822222, 0x2f822222, 0x21800a02, 0x21800a02},
1297 {0x0000a5a8, 0x34822225, 0x34822225, 0x25800a04, 0x25800a04},
1298 {0x0000a5ac, 0x3a82222a, 0x3a82222a, 0x28800a20, 0x28800a20},
1299 {0x0000a5b0, 0x3e82222c, 0x3e82222c, 0x2c800e20, 0x2c800e20},
1300 {0x0000a5b4, 0x4282242a, 0x4282242a, 0x30800e22, 0x30800e22},
1301 {0x0000a5b8, 0x4782244a, 0x4782244a, 0x34800e24, 0x34800e24},
1302 {0x0000a5bc, 0x4b82244c, 0x4b82244c, 0x38801640, 0x38801640},
1303 {0x0000a5c0, 0x4e82246c, 0x4e82246c, 0x3c801660, 0x3c801660},
1304 {0x0000a5c4, 0x5382266c, 0x5382266c, 0x3f801861, 0x3f801861},
1305 {0x0000a5c8, 0x5782286c, 0x5782286c, 0x43801a81, 0x43801a81},
1306 {0x0000a5cc, 0x5c84286b, 0x5c84286b, 0x47801a83, 0x47801a83},
1307 {0x0000a5d0, 0x61842a6c, 0x61842a6c, 0x4a801c84, 0x4a801c84},
1308 {0x0000a5d4, 0x66862a6c, 0x66862a6c, 0x4e801ce3, 0x4e801ce3},
1309 {0x0000a5d8, 0x6b862e6c, 0x6b862e6c, 0x52801ce5, 0x52801ce5},
1310 {0x0000a5dc, 0x7086308c, 0x7086308c, 0x56801ce9, 0x56801ce9},
1311 {0x0000a5e0, 0x738a308a, 0x738a308a, 0x5a801ceb, 0x5a801ceb},
1312 {0x0000a5e4, 0x778a308c, 0x778a308c, 0x5d801eec, 0x5d801eec},
1313 {0x0000a5e8, 0x778a308c, 0x778a308c, 0x5d801eec, 0x5d801eec},
1314 {0x0000a5ec, 0x778a308c, 0x778a308c, 0x5d801eec, 0x5d801eec},
1315 {0x0000a5f0, 0x778a308c, 0x778a308c, 0x5d801eec, 0x5d801eec},
1316 {0x0000a5f4, 0x778a308c, 0x778a308c, 0x5d801eec, 0x5d801eec},
1317 {0x0000a5f8, 0x778a308c, 0x778a308c, 0x5d801eec, 0x5d801eec},
1318 {0x0000a5fc, 0x778a308c, 0x778a308c, 0x5d801eec, 0x5d801eec},
1319 {0x00016044, 0x012492d4, 0x012492d4, 0x012492d4, 0x012492d4},
1320 {0x00016048, 0x64000001, 0x64000001, 0x64000001, 0x64000001},
1321 {0x00016068, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
1322 {0x00016444, 0x012492d4, 0x012492d4, 0x012492d4, 0x012492d4},
1323 {0x00016448, 0x64000001, 0x64000001, 0x64000001, 0x64000001},
1324 {0x00016468, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
1325 {0x00016844, 0x012492d4, 0x012492d4, 0x012492d4, 0x012492d4},
1326 {0x00016848, 0x64000001, 0x64000001, 0x64000001, 0x64000001},
1327 {0x00016868, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
1328};
1329
1330static const u32 ar9300_2p0_mac_core[][2] = {
1331 /* Addr allmodes */
1332 {0x00000008, 0x00000000},
1333 {0x00000030, 0x00020085},
1334 {0x00000034, 0x00000005},
1335 {0x00000040, 0x00000000},
1336 {0x00000044, 0x00000000},
1337 {0x00000048, 0x00000008},
1338 {0x0000004c, 0x00000010},
1339 {0x00000050, 0x00000000},
1340 {0x00001040, 0x002ffc0f},
1341 {0x00001044, 0x002ffc0f},
1342 {0x00001048, 0x002ffc0f},
1343 {0x0000104c, 0x002ffc0f},
1344 {0x00001050, 0x002ffc0f},
1345 {0x00001054, 0x002ffc0f},
1346 {0x00001058, 0x002ffc0f},
1347 {0x0000105c, 0x002ffc0f},
1348 {0x00001060, 0x002ffc0f},
1349 {0x00001064, 0x002ffc0f},
1350 {0x000010f0, 0x00000100},
1351 {0x00001270, 0x00000000},
1352 {0x000012b0, 0x00000000},
1353 {0x000012f0, 0x00000000},
1354 {0x0000143c, 0x00000000},
1355 {0x0000147c, 0x00000000},
1356 {0x00008000, 0x00000000},
1357 {0x00008004, 0x00000000},
1358 {0x00008008, 0x00000000},
1359 {0x0000800c, 0x00000000},
1360 {0x00008018, 0x00000000},
1361 {0x00008020, 0x00000000},
1362 {0x00008038, 0x00000000},
1363 {0x0000803c, 0x00000000},
1364 {0x00008040, 0x00000000},
1365 {0x00008044, 0x00000000},
1366 {0x00008048, 0x00000000},
1367 {0x0000804c, 0xffffffff},
1368 {0x00008054, 0x00000000},
1369 {0x00008058, 0x00000000},
1370 {0x0000805c, 0x000fc78f},
1371 {0x00008060, 0x0000000f},
1372 {0x00008064, 0x00000000},
1373 {0x00008070, 0x00000310},
1374 {0x00008074, 0x00000020},
1375 {0x00008078, 0x00000000},
1376 {0x0000809c, 0x0000000f},
1377 {0x000080a0, 0x00000000},
1378 {0x000080a4, 0x02ff0000},
1379 {0x000080a8, 0x0e070605},
1380 {0x000080ac, 0x0000000d},
1381 {0x000080b0, 0x00000000},
1382 {0x000080b4, 0x00000000},
1383 {0x000080b8, 0x00000000},
1384 {0x000080bc, 0x00000000},
1385 {0x000080c0, 0x2a800000},
1386 {0x000080c4, 0x06900168},
1387 {0x000080c8, 0x13881c20},
1388 {0x000080cc, 0x01f40000},
1389 {0x000080d0, 0x00252500},
1390 {0x000080d4, 0x00a00000},
1391 {0x000080d8, 0x00400000},
1392 {0x000080dc, 0x00000000},
1393 {0x000080e0, 0xffffffff},
1394 {0x000080e4, 0x0000ffff},
1395 {0x000080e8, 0x3f3f3f3f},
1396 {0x000080ec, 0x00000000},
1397 {0x000080f0, 0x00000000},
1398 {0x000080f4, 0x00000000},
1399 {0x000080fc, 0x00020000},
1400 {0x00008100, 0x00000000},
1401 {0x00008108, 0x00000052},
1402 {0x0000810c, 0x00000000},
1403 {0x00008110, 0x00000000},
1404 {0x00008114, 0x000007ff},
1405 {0x00008118, 0x000000aa},
1406 {0x0000811c, 0x00003210},
1407 {0x00008124, 0x00000000},
1408 {0x00008128, 0x00000000},
1409 {0x0000812c, 0x00000000},
1410 {0x00008130, 0x00000000},
1411 {0x00008134, 0x00000000},
1412 {0x00008138, 0x00000000},
1413 {0x0000813c, 0x0000ffff},
1414 {0x00008144, 0xffffffff},
1415 {0x00008168, 0x00000000},
1416 {0x0000816c, 0x00000000},
1417 {0x00008170, 0x18486200},
1418 {0x00008174, 0x33332210},
1419 {0x00008178, 0x00000000},
1420 {0x0000817c, 0x00020000},
1421 {0x000081c0, 0x00000000},
1422 {0x000081c4, 0x33332210},
1423 {0x000081c8, 0x00000000},
1424 {0x000081cc, 0x00000000},
1425 {0x000081d4, 0x00000000},
1426 {0x000081ec, 0x00000000},
1427 {0x000081f0, 0x00000000},
1428 {0x000081f4, 0x00000000},
1429 {0x000081f8, 0x00000000},
1430 {0x000081fc, 0x00000000},
1431 {0x00008240, 0x00100000},
1432 {0x00008244, 0x0010f424},
1433 {0x00008248, 0x00000800},
1434 {0x0000824c, 0x0001e848},
1435 {0x00008250, 0x00000000},
1436 {0x00008254, 0x00000000},
1437 {0x00008258, 0x00000000},
1438 {0x0000825c, 0x40000000},
1439 {0x00008260, 0x00080922},
1440 {0x00008264, 0x98a00010},
1441 {0x00008268, 0xffffffff},
1442 {0x0000826c, 0x0000ffff},
1443 {0x00008270, 0x00000000},
1444 {0x00008274, 0x40000000},
1445 {0x00008278, 0x003e4180},
1446 {0x0000827c, 0x00000004},
1447 {0x00008284, 0x0000002c},
1448 {0x00008288, 0x0000002c},
1449 {0x0000828c, 0x000000ff},
1450 {0x00008294, 0x00000000},
1451 {0x00008298, 0x00000000},
1452 {0x0000829c, 0x00000000},
1453 {0x00008300, 0x00000140},
1454 {0x00008314, 0x00000000},
1455 {0x0000831c, 0x0000010d},
1456 {0x00008328, 0x00000000},
1457 {0x0000832c, 0x00000007},
1458 {0x00008330, 0x00000302},
1459 {0x00008334, 0x00000700},
1460 {0x00008338, 0x00ff0000},
1461 {0x0000833c, 0x02400000},
1462 {0x00008340, 0x000107ff},
1463 {0x00008344, 0xaa48105b},
1464 {0x00008348, 0x008f0000},
1465 {0x0000835c, 0x00000000},
1466 {0x00008360, 0xffffffff},
1467 {0x00008364, 0xffffffff},
1468 {0x00008368, 0x00000000},
1469 {0x00008370, 0x00000000},
1470 {0x00008374, 0x000000ff},
1471 {0x00008378, 0x00000000},
1472 {0x0000837c, 0x00000000},
1473 {0x00008380, 0xffffffff},
1474 {0x00008384, 0xffffffff},
1475 {0x00008390, 0xffffffff},
1476 {0x00008394, 0xffffffff},
1477 {0x00008398, 0x00000000},
1478 {0x0000839c, 0x00000000},
1479 {0x000083a0, 0x00000000},
1480 {0x000083a4, 0x0000fa14},
1481 {0x000083a8, 0x000f0c00},
1482 {0x000083ac, 0x33332210},
1483 {0x000083b0, 0x33332210},
1484 {0x000083b4, 0x33332210},
1485 {0x000083b8, 0x33332210},
1486 {0x000083bc, 0x00000000},
1487 {0x000083c0, 0x00000000},
1488 {0x000083c4, 0x00000000},
1489 {0x000083c8, 0x00000000},
1490 {0x000083cc, 0x00000200},
1491 {0x000083d0, 0x000301ff},
1492};
1493
1494static const u32 ar9300Common_wo_xlna_rx_gain_table_2p0[][2] = {
1495 /* Addr allmodes */
1496 {0x0000a000, 0x00010000},
1497 {0x0000a004, 0x00030002},
1498 {0x0000a008, 0x00050004},
1499 {0x0000a00c, 0x00810080},
1500 {0x0000a010, 0x00830082},
1501 {0x0000a014, 0x01810180},
1502 {0x0000a018, 0x01830182},
1503 {0x0000a01c, 0x01850184},
1504 {0x0000a020, 0x01890188},
1505 {0x0000a024, 0x018b018a},
1506 {0x0000a028, 0x018d018c},
1507 {0x0000a02c, 0x03820190},
1508 {0x0000a030, 0x03840383},
1509 {0x0000a034, 0x03880385},
1510 {0x0000a038, 0x038a0389},
1511 {0x0000a03c, 0x038c038b},
1512 {0x0000a040, 0x0390038d},
1513 {0x0000a044, 0x03920391},
1514 {0x0000a048, 0x03940393},
1515 {0x0000a04c, 0x03960395},
1516 {0x0000a050, 0x00000000},
1517 {0x0000a054, 0x00000000},
1518 {0x0000a058, 0x00000000},
1519 {0x0000a05c, 0x00000000},
1520 {0x0000a060, 0x00000000},
1521 {0x0000a064, 0x00000000},
1522 {0x0000a068, 0x00000000},
1523 {0x0000a06c, 0x00000000},
1524 {0x0000a070, 0x00000000},
1525 {0x0000a074, 0x00000000},
1526 {0x0000a078, 0x00000000},
1527 {0x0000a07c, 0x00000000},
1528 {0x0000a080, 0x29292929},
1529 {0x0000a084, 0x29292929},
1530 {0x0000a088, 0x29292929},
1531 {0x0000a08c, 0x29292929},
1532 {0x0000a090, 0x22292929},
1533 {0x0000a094, 0x1d1d2222},
1534 {0x0000a098, 0x0c111117},
1535 {0x0000a09c, 0x00030303},
1536 {0x0000a0a0, 0x00000000},
1537 {0x0000a0a4, 0x00000000},
1538 {0x0000a0a8, 0x00000000},
1539 {0x0000a0ac, 0x00000000},
1540 {0x0000a0b0, 0x00000000},
1541 {0x0000a0b4, 0x00000000},
1542 {0x0000a0b8, 0x00000000},
1543 {0x0000a0bc, 0x00000000},
1544 {0x0000a0c0, 0x001f0000},
1545 {0x0000a0c4, 0x01000101},
1546 {0x0000a0c8, 0x011e011f},
1547 {0x0000a0cc, 0x011c011d},
1548 {0x0000a0d0, 0x02030204},
1549 {0x0000a0d4, 0x02010202},
1550 {0x0000a0d8, 0x021f0200},
1551 {0x0000a0dc, 0x0302021e},
1552 {0x0000a0e0, 0x03000301},
1553 {0x0000a0e4, 0x031e031f},
1554 {0x0000a0e8, 0x0402031d},
1555 {0x0000a0ec, 0x04000401},
1556 {0x0000a0f0, 0x041e041f},
1557 {0x0000a0f4, 0x0502041d},
1558 {0x0000a0f8, 0x05000501},
1559 {0x0000a0fc, 0x051e051f},
1560 {0x0000a100, 0x06010602},
1561 {0x0000a104, 0x061f0600},
1562 {0x0000a108, 0x061d061e},
1563 {0x0000a10c, 0x07020703},
1564 {0x0000a110, 0x07000701},
1565 {0x0000a114, 0x00000000},
1566 {0x0000a118, 0x00000000},
1567 {0x0000a11c, 0x00000000},
1568 {0x0000a120, 0x00000000},
1569 {0x0000a124, 0x00000000},
1570 {0x0000a128, 0x00000000},
1571 {0x0000a12c, 0x00000000},
1572 {0x0000a130, 0x00000000},
1573 {0x0000a134, 0x00000000},
1574 {0x0000a138, 0x00000000},
1575 {0x0000a13c, 0x00000000},
1576 {0x0000a140, 0x001f0000},
1577 {0x0000a144, 0x01000101},
1578 {0x0000a148, 0x011e011f},
1579 {0x0000a14c, 0x011c011d},
1580 {0x0000a150, 0x02030204},
1581 {0x0000a154, 0x02010202},
1582 {0x0000a158, 0x021f0200},
1583 {0x0000a15c, 0x0302021e},
1584 {0x0000a160, 0x03000301},
1585 {0x0000a164, 0x031e031f},
1586 {0x0000a168, 0x0402031d},
1587 {0x0000a16c, 0x04000401},
1588 {0x0000a170, 0x041e041f},
1589 {0x0000a174, 0x0502041d},
1590 {0x0000a178, 0x05000501},
1591 {0x0000a17c, 0x051e051f},
1592 {0x0000a180, 0x06010602},
1593 {0x0000a184, 0x061f0600},
1594 {0x0000a188, 0x061d061e},
1595 {0x0000a18c, 0x07020703},
1596 {0x0000a190, 0x07000701},
1597 {0x0000a194, 0x00000000},
1598 {0x0000a198, 0x00000000},
1599 {0x0000a19c, 0x00000000},
1600 {0x0000a1a0, 0x00000000},
1601 {0x0000a1a4, 0x00000000},
1602 {0x0000a1a8, 0x00000000},
1603 {0x0000a1ac, 0x00000000},
1604 {0x0000a1b0, 0x00000000},
1605 {0x0000a1b4, 0x00000000},
1606 {0x0000a1b8, 0x00000000},
1607 {0x0000a1bc, 0x00000000},
1608 {0x0000a1c0, 0x00000000},
1609 {0x0000a1c4, 0x00000000},
1610 {0x0000a1c8, 0x00000000},
1611 {0x0000a1cc, 0x00000000},
1612 {0x0000a1d0, 0x00000000},
1613 {0x0000a1d4, 0x00000000},
1614 {0x0000a1d8, 0x00000000},
1615 {0x0000a1dc, 0x00000000},
1616 {0x0000a1e0, 0x00000000},
1617 {0x0000a1e4, 0x00000000},
1618 {0x0000a1e8, 0x00000000},
1619 {0x0000a1ec, 0x00000000},
1620 {0x0000a1f0, 0x00000396},
1621 {0x0000a1f4, 0x00000396},
1622 {0x0000a1f8, 0x00000396},
1623 {0x0000a1fc, 0x00000196},
1624 {0x0000b000, 0x00010000},
1625 {0x0000b004, 0x00030002},
1626 {0x0000b008, 0x00050004},
1627 {0x0000b00c, 0x00810080},
1628 {0x0000b010, 0x00830082},
1629 {0x0000b014, 0x01810180},
1630 {0x0000b018, 0x01830182},
1631 {0x0000b01c, 0x01850184},
1632 {0x0000b020, 0x02810280},
1633 {0x0000b024, 0x02830282},
1634 {0x0000b028, 0x02850284},
1635 {0x0000b02c, 0x02890288},
1636 {0x0000b030, 0x028b028a},
1637 {0x0000b034, 0x0388028c},
1638 {0x0000b038, 0x038a0389},
1639 {0x0000b03c, 0x038c038b},
1640 {0x0000b040, 0x0390038d},
1641 {0x0000b044, 0x03920391},
1642 {0x0000b048, 0x03940393},
1643 {0x0000b04c, 0x03960395},
1644 {0x0000b050, 0x00000000},
1645 {0x0000b054, 0x00000000},
1646 {0x0000b058, 0x00000000},
1647 {0x0000b05c, 0x00000000},
1648 {0x0000b060, 0x00000000},
1649 {0x0000b064, 0x00000000},
1650 {0x0000b068, 0x00000000},
1651 {0x0000b06c, 0x00000000},
1652 {0x0000b070, 0x00000000},
1653 {0x0000b074, 0x00000000},
1654 {0x0000b078, 0x00000000},
1655 {0x0000b07c, 0x00000000},
1656 {0x0000b080, 0x32323232},
1657 {0x0000b084, 0x2f2f3232},
1658 {0x0000b088, 0x23282a2d},
1659 {0x0000b08c, 0x1c1e2123},
1660 {0x0000b090, 0x14171919},
1661 {0x0000b094, 0x0e0e1214},
1662 {0x0000b098, 0x03050707},
1663 {0x0000b09c, 0x00030303},
1664 {0x0000b0a0, 0x00000000},
1665 {0x0000b0a4, 0x00000000},
1666 {0x0000b0a8, 0x00000000},
1667 {0x0000b0ac, 0x00000000},
1668 {0x0000b0b0, 0x00000000},
1669 {0x0000b0b4, 0x00000000},
1670 {0x0000b0b8, 0x00000000},
1671 {0x0000b0bc, 0x00000000},
1672 {0x0000b0c0, 0x003f0020},
1673 {0x0000b0c4, 0x00400041},
1674 {0x0000b0c8, 0x0140005f},
1675 {0x0000b0cc, 0x0160015f},
1676 {0x0000b0d0, 0x017e017f},
1677 {0x0000b0d4, 0x02410242},
1678 {0x0000b0d8, 0x025f0240},
1679 {0x0000b0dc, 0x027f0260},
1680 {0x0000b0e0, 0x0341027e},
1681 {0x0000b0e4, 0x035f0340},
1682 {0x0000b0e8, 0x037f0360},
1683 {0x0000b0ec, 0x04400441},
1684 {0x0000b0f0, 0x0460045f},
1685 {0x0000b0f4, 0x0541047f},
1686 {0x0000b0f8, 0x055f0540},
1687 {0x0000b0fc, 0x057f0560},
1688 {0x0000b100, 0x06400641},
1689 {0x0000b104, 0x0660065f},
1690 {0x0000b108, 0x067e067f},
1691 {0x0000b10c, 0x07410742},
1692 {0x0000b110, 0x075f0740},
1693 {0x0000b114, 0x077f0760},
1694 {0x0000b118, 0x07800781},
1695 {0x0000b11c, 0x07a0079f},
1696 {0x0000b120, 0x07c107bf},
1697 {0x0000b124, 0x000007c0},
1698 {0x0000b128, 0x00000000},
1699 {0x0000b12c, 0x00000000},
1700 {0x0000b130, 0x00000000},
1701 {0x0000b134, 0x00000000},
1702 {0x0000b138, 0x00000000},
1703 {0x0000b13c, 0x00000000},
1704 {0x0000b140, 0x003f0020},
1705 {0x0000b144, 0x00400041},
1706 {0x0000b148, 0x0140005f},
1707 {0x0000b14c, 0x0160015f},
1708 {0x0000b150, 0x017e017f},
1709 {0x0000b154, 0x02410242},
1710 {0x0000b158, 0x025f0240},
1711 {0x0000b15c, 0x027f0260},
1712 {0x0000b160, 0x0341027e},
1713 {0x0000b164, 0x035f0340},
1714 {0x0000b168, 0x037f0360},
1715 {0x0000b16c, 0x04400441},
1716 {0x0000b170, 0x0460045f},
1717 {0x0000b174, 0x0541047f},
1718 {0x0000b178, 0x055f0540},
1719 {0x0000b17c, 0x057f0560},
1720 {0x0000b180, 0x06400641},
1721 {0x0000b184, 0x0660065f},
1722 {0x0000b188, 0x067e067f},
1723 {0x0000b18c, 0x07410742},
1724 {0x0000b190, 0x075f0740},
1725 {0x0000b194, 0x077f0760},
1726 {0x0000b198, 0x07800781},
1727 {0x0000b19c, 0x07a0079f},
1728 {0x0000b1a0, 0x07c107bf},
1729 {0x0000b1a4, 0x000007c0},
1730 {0x0000b1a8, 0x00000000},
1731 {0x0000b1ac, 0x00000000},
1732 {0x0000b1b0, 0x00000000},
1733 {0x0000b1b4, 0x00000000},
1734 {0x0000b1b8, 0x00000000},
1735 {0x0000b1bc, 0x00000000},
1736 {0x0000b1c0, 0x00000000},
1737 {0x0000b1c4, 0x00000000},
1738 {0x0000b1c8, 0x00000000},
1739 {0x0000b1cc, 0x00000000},
1740 {0x0000b1d0, 0x00000000},
1741 {0x0000b1d4, 0x00000000},
1742 {0x0000b1d8, 0x00000000},
1743 {0x0000b1dc, 0x00000000},
1744 {0x0000b1e0, 0x00000000},
1745 {0x0000b1e4, 0x00000000},
1746 {0x0000b1e8, 0x00000000},
1747 {0x0000b1ec, 0x00000000},
1748 {0x0000b1f0, 0x00000396},
1749 {0x0000b1f4, 0x00000396},
1750 {0x0000b1f8, 0x00000396},
1751 {0x0000b1fc, 0x00000196},
1752};
1753
1754static const u32 ar9300_2p0_soc_preamble[][2] = {
1755 /* Addr allmodes */
1756 {0x000040a4, 0x00a0c1c9},
1757 {0x00007008, 0x00000000},
1758 {0x00007020, 0x00000000},
1759 {0x00007034, 0x00000002},
1760 {0x00007038, 0x000004c2},
1761};
1762
1763static const u32 ar9300PciePhy_pll_on_clkreq_disable_L1_2p0[][2] = {
1764 /* Addr allmodes */
1765 {0x00004040, 0x08212e5e},
1766 {0x00004040, 0x0008003b},
1767 {0x00004044, 0x00000000},
1768};
1769
1770static const u32 ar9300PciePhy_clkreq_enable_L1_2p0[][2] = {
1771 /* Addr allmodes */
1772 {0x00004040, 0x08253e5e},
1773 {0x00004040, 0x0008003b},
1774 {0x00004044, 0x00000000},
1775};
1776
1777static const u32 ar9300PciePhy_clkreq_disable_L1_2p0[][2] = {
1778 /* Addr allmodes */
1779 {0x00004040, 0x08213e5e},
1780 {0x00004040, 0x0008003b},
1781 {0x00004044, 0x00000000},
1782};
1783
1784#endif /* INITVALS_9003_2P0_H */
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_calib.c b/drivers/net/wireless/ath/ath9k/ar9003_calib.c
index 4674ea8c9c99..9e6edffe0bd1 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_calib.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_calib.c
@@ -18,6 +18,11 @@
18#include "hw-ops.h" 18#include "hw-ops.h"
19#include "ar9003_phy.h" 19#include "ar9003_phy.h"
20 20
21enum ar9003_cal_types {
22 IQ_MISMATCH_CAL = BIT(0),
23 TEMP_COMP_CAL = BIT(1),
24};
25
21static void ar9003_hw_setup_calibration(struct ath_hw *ah, 26static void ar9003_hw_setup_calibration(struct ath_hw *ah,
22 struct ath9k_cal_list *currCal) 27 struct ath9k_cal_list *currCal)
23{ 28{
@@ -50,11 +55,6 @@ static void ar9003_hw_setup_calibration(struct ath_hw *ah,
50 ath_print(common, ATH_DBG_CALIBRATE, 55 ath_print(common, ATH_DBG_CALIBRATE,
51 "starting Temperature Compensation Calibration\n"); 56 "starting Temperature Compensation Calibration\n");
52 break; 57 break;
53 case ADC_DC_INIT_CAL:
54 case ADC_GAIN_CAL:
55 case ADC_DC_CAL:
56 /* Not yet */
57 break;
58 } 58 }
59} 59}
60 60
@@ -314,27 +314,6 @@ static const struct ath9k_percal_data iq_cal_single_sample = {
314static void ar9003_hw_init_cal_settings(struct ath_hw *ah) 314static void ar9003_hw_init_cal_settings(struct ath_hw *ah)
315{ 315{
316 ah->iq_caldata.calData = &iq_cal_single_sample; 316 ah->iq_caldata.calData = &iq_cal_single_sample;
317 ah->supp_cals = IQ_MISMATCH_CAL;
318}
319
320static bool ar9003_hw_iscal_supported(struct ath_hw *ah,
321 enum ath9k_cal_types calType)
322{
323 switch (calType & ah->supp_cals) {
324 case IQ_MISMATCH_CAL:
325 /*
326 * XXX: Run IQ Mismatch for non-CCK only
327 * Note that CHANNEL_B is never set though.
328 */
329 return true;
330 case ADC_GAIN_CAL:
331 case ADC_DC_CAL:
332 return false;
333 case TEMP_COMP_CAL:
334 return true;
335 }
336
337 return false;
338} 317}
339 318
340/* 319/*
@@ -773,15 +752,16 @@ static bool ar9003_hw_init_cal(struct ath_hw *ah,
773 752
774 /* Initialize list pointers */ 753 /* Initialize list pointers */
775 ah->cal_list = ah->cal_list_last = ah->cal_list_curr = NULL; 754 ah->cal_list = ah->cal_list_last = ah->cal_list_curr = NULL;
755 ah->supp_cals = IQ_MISMATCH_CAL;
776 756
777 if (ar9003_hw_iscal_supported(ah, IQ_MISMATCH_CAL)) { 757 if (ah->supp_cals & IQ_MISMATCH_CAL) {
778 INIT_CAL(&ah->iq_caldata); 758 INIT_CAL(&ah->iq_caldata);
779 INSERT_CAL(ah, &ah->iq_caldata); 759 INSERT_CAL(ah, &ah->iq_caldata);
780 ath_print(common, ATH_DBG_CALIBRATE, 760 ath_print(common, ATH_DBG_CALIBRATE,
781 "enabling IQ Calibration.\n"); 761 "enabling IQ Calibration.\n");
782 } 762 }
783 763
784 if (ar9003_hw_iscal_supported(ah, TEMP_COMP_CAL)) { 764 if (ah->supp_cals & TEMP_COMP_CAL) {
785 INIT_CAL(&ah->tempCompCalData); 765 INIT_CAL(&ah->tempCompCalData);
786 INSERT_CAL(ah, &ah->tempCompCalData); 766 INSERT_CAL(ah, &ah->tempCompCalData);
787 ath_print(common, ATH_DBG_CALIBRATE, 767 ath_print(common, ATH_DBG_CALIBRATE,
@@ -808,7 +788,6 @@ void ar9003_hw_attach_calib_ops(struct ath_hw *ah)
808 priv_ops->init_cal_settings = ar9003_hw_init_cal_settings; 788 priv_ops->init_cal_settings = ar9003_hw_init_cal_settings;
809 priv_ops->init_cal = ar9003_hw_init_cal; 789 priv_ops->init_cal = ar9003_hw_init_cal;
810 priv_ops->setup_calibration = ar9003_hw_setup_calibration; 790 priv_ops->setup_calibration = ar9003_hw_setup_calibration;
811 priv_ops->iscal_supported = ar9003_hw_iscal_supported;
812 791
813 ops->calibrate = ar9003_hw_calibrate; 792 ops->calibrate = ar9003_hw_calibrate;
814} 793}
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
index 057fb69ddf7f..c4182359bee4 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
@@ -968,7 +968,7 @@ static int ath9k_hw_ar9300_get_eeprom_rev(struct ath_hw *ah)
968} 968}
969 969
970static u8 ath9k_hw_ar9300_get_num_ant_config(struct ath_hw *ah, 970static u8 ath9k_hw_ar9300_get_num_ant_config(struct ath_hw *ah,
971 enum ieee80211_band freq_band) 971 enum ath9k_hal_freq_band freq_band)
972{ 972{
973 return 1; 973 return 1;
974} 974}
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_hw.c b/drivers/net/wireless/ath/ath9k/ar9003_hw.c
index 064168909108..c2a057156bfa 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_hw.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_hw.c
@@ -16,7 +16,6 @@
16 16
17#include "hw.h" 17#include "hw.h"
18#include "ar9003_mac.h" 18#include "ar9003_mac.h"
19#include "ar9003_2p0_initvals.h"
20#include "ar9003_2p2_initvals.h" 19#include "ar9003_2p2_initvals.h"
21 20
22/* General hardware code for the AR9003 hadware family */ 21/* General hardware code for the AR9003 hadware family */
@@ -32,79 +31,12 @@ static bool ar9003_hw_macversion_supported(u32 macversion)
32 return false; 31 return false;
33} 32}
34 33
35/* AR9003 2.0 */ 34/*
36static void ar9003_2p0_hw_init_mode_regs(struct ath_hw *ah) 35 * The AR9003 family uses a new INI format (pre, core, post
37{ 36 * arrays per subsystem). This provides support for the
38 /* mac */ 37 * AR9003 2.2 chipsets.
39 INIT_INI_ARRAY(&ah->iniMac[ATH_INI_PRE], NULL, 0, 0); 38 */
40 INIT_INI_ARRAY(&ah->iniMac[ATH_INI_CORE], 39static void ar9003_hw_init_mode_regs(struct ath_hw *ah)
41 ar9300_2p0_mac_core,
42 ARRAY_SIZE(ar9300_2p0_mac_core), 2);
43 INIT_INI_ARRAY(&ah->iniMac[ATH_INI_POST],
44 ar9300_2p0_mac_postamble,
45 ARRAY_SIZE(ar9300_2p0_mac_postamble), 5);
46
47 /* bb */
48 INIT_INI_ARRAY(&ah->iniBB[ATH_INI_PRE], NULL, 0, 0);
49 INIT_INI_ARRAY(&ah->iniBB[ATH_INI_CORE],
50 ar9300_2p0_baseband_core,
51 ARRAY_SIZE(ar9300_2p0_baseband_core), 2);
52 INIT_INI_ARRAY(&ah->iniBB[ATH_INI_POST],
53 ar9300_2p0_baseband_postamble,
54 ARRAY_SIZE(ar9300_2p0_baseband_postamble), 5);
55
56 /* radio */
57 INIT_INI_ARRAY(&ah->iniRadio[ATH_INI_PRE], NULL, 0, 0);
58 INIT_INI_ARRAY(&ah->iniRadio[ATH_INI_CORE],
59 ar9300_2p0_radio_core,
60 ARRAY_SIZE(ar9300_2p0_radio_core), 2);
61 INIT_INI_ARRAY(&ah->iniRadio[ATH_INI_POST],
62 ar9300_2p0_radio_postamble,
63 ARRAY_SIZE(ar9300_2p0_radio_postamble), 5);
64
65 /* soc */
66 INIT_INI_ARRAY(&ah->iniSOC[ATH_INI_PRE],
67 ar9300_2p0_soc_preamble,
68 ARRAY_SIZE(ar9300_2p0_soc_preamble), 2);
69 INIT_INI_ARRAY(&ah->iniSOC[ATH_INI_CORE], NULL, 0, 0);
70 INIT_INI_ARRAY(&ah->iniSOC[ATH_INI_POST],
71 ar9300_2p0_soc_postamble,
72 ARRAY_SIZE(ar9300_2p0_soc_postamble), 5);
73
74 /* rx/tx gain */
75 INIT_INI_ARRAY(&ah->iniModesRxGain,
76 ar9300Common_rx_gain_table_2p0,
77 ARRAY_SIZE(ar9300Common_rx_gain_table_2p0), 2);
78 INIT_INI_ARRAY(&ah->iniModesTxGain,
79 ar9300Modes_lowest_ob_db_tx_gain_table_2p0,
80 ARRAY_SIZE(ar9300Modes_lowest_ob_db_tx_gain_table_2p0),
81 5);
82
83 /* Load PCIE SERDES settings from INI */
84
85 /* Awake Setting */
86
87 INIT_INI_ARRAY(&ah->iniPcieSerdes,
88 ar9300PciePhy_pll_on_clkreq_disable_L1_2p0,
89 ARRAY_SIZE(ar9300PciePhy_pll_on_clkreq_disable_L1_2p0),
90 2);
91
92 /* Sleep Setting */
93
94 INIT_INI_ARRAY(&ah->iniPcieSerdesLowPower,
95 ar9300PciePhy_clkreq_enable_L1_2p0,
96 ARRAY_SIZE(ar9300PciePhy_clkreq_enable_L1_2p0),
97 2);
98
99 /* Fast clock modal settings */
100 INIT_INI_ARRAY(&ah->iniModesAdditional,
101 ar9300Modes_fast_clock_2p0,
102 ARRAY_SIZE(ar9300Modes_fast_clock_2p0),
103 3);
104}
105
106/* AR9003 2.2 */
107static void ar9003_2p2_hw_init_mode_regs(struct ath_hw *ah)
108{ 40{
109 /* mac */ 41 /* mac */
110 INIT_INI_ARRAY(&ah->iniMac[ATH_INI_PRE], NULL, 0, 0); 42 INIT_INI_ARRAY(&ah->iniMac[ATH_INI_PRE], NULL, 0, 0);
@@ -174,57 +106,27 @@ static void ar9003_2p2_hw_init_mode_regs(struct ath_hw *ah)
174 3); 106 3);
175} 107}
176 108
177/*
178 * The AR9003 family uses a new INI format (pre, core, post
179 * arrays per subsystem).
180 */
181static void ar9003_hw_init_mode_regs(struct ath_hw *ah)
182{
183 if (AR_SREV_9300_20(ah))
184 ar9003_2p0_hw_init_mode_regs(ah);
185 else
186 ar9003_2p2_hw_init_mode_regs(ah);
187}
188
189static void ar9003_tx_gain_table_apply(struct ath_hw *ah) 109static void ar9003_tx_gain_table_apply(struct ath_hw *ah)
190{ 110{
191 switch (ar9003_hw_get_tx_gain_idx(ah)) { 111 switch (ar9003_hw_get_tx_gain_idx(ah)) {
192 case 0: 112 case 0:
193 default: 113 default:
194 if (AR_SREV_9300_20(ah)) 114 INIT_INI_ARRAY(&ah->iniModesTxGain,
195 INIT_INI_ARRAY(&ah->iniModesTxGain, 115 ar9300Modes_lowest_ob_db_tx_gain_table_2p2,
196 ar9300Modes_lowest_ob_db_tx_gain_table_2p0, 116 ARRAY_SIZE(ar9300Modes_lowest_ob_db_tx_gain_table_2p2),
197 ARRAY_SIZE(ar9300Modes_lowest_ob_db_tx_gain_table_2p0), 117 5);
198 5);
199 else
200 INIT_INI_ARRAY(&ah->iniModesTxGain,
201 ar9300Modes_lowest_ob_db_tx_gain_table_2p2,
202 ARRAY_SIZE(ar9300Modes_lowest_ob_db_tx_gain_table_2p2),
203 5);
204 break; 118 break;
205 case 1: 119 case 1:
206 if (AR_SREV_9300_20(ah)) 120 INIT_INI_ARRAY(&ah->iniModesTxGain,
207 INIT_INI_ARRAY(&ah->iniModesTxGain, 121 ar9300Modes_high_ob_db_tx_gain_table_2p2,
208 ar9300Modes_high_ob_db_tx_gain_table_2p0, 122 ARRAY_SIZE(ar9300Modes_high_ob_db_tx_gain_table_2p2),
209 ARRAY_SIZE(ar9300Modes_high_ob_db_tx_gain_table_2p0), 123 5);
210 5);
211 else
212 INIT_INI_ARRAY(&ah->iniModesTxGain,
213 ar9300Modes_high_ob_db_tx_gain_table_2p2,
214 ARRAY_SIZE(ar9300Modes_high_ob_db_tx_gain_table_2p2),
215 5);
216 break; 124 break;
217 case 2: 125 case 2:
218 if (AR_SREV_9300_20(ah)) 126 INIT_INI_ARRAY(&ah->iniModesTxGain,
219 INIT_INI_ARRAY(&ah->iniModesTxGain, 127 ar9300Modes_low_ob_db_tx_gain_table_2p2,
220 ar9300Modes_low_ob_db_tx_gain_table_2p0, 128 ARRAY_SIZE(ar9300Modes_low_ob_db_tx_gain_table_2p2),
221 ARRAY_SIZE(ar9300Modes_low_ob_db_tx_gain_table_2p0), 129 5);
222 5);
223 else
224 INIT_INI_ARRAY(&ah->iniModesTxGain,
225 ar9300Modes_low_ob_db_tx_gain_table_2p2,
226 ARRAY_SIZE(ar9300Modes_low_ob_db_tx_gain_table_2p2),
227 5);
228 break; 130 break;
229 } 131 }
230} 132}
@@ -234,28 +136,16 @@ static void ar9003_rx_gain_table_apply(struct ath_hw *ah)
234 switch (ar9003_hw_get_rx_gain_idx(ah)) { 136 switch (ar9003_hw_get_rx_gain_idx(ah)) {
235 case 0: 137 case 0:
236 default: 138 default:
237 if (AR_SREV_9300_20(ah)) 139 INIT_INI_ARRAY(&ah->iniModesRxGain,
238 INIT_INI_ARRAY(&ah->iniModesRxGain, 140 ar9300Common_rx_gain_table_2p2,
239 ar9300Common_rx_gain_table_2p0, 141 ARRAY_SIZE(ar9300Common_rx_gain_table_2p2),
240 ARRAY_SIZE(ar9300Common_rx_gain_table_2p0), 142 2);
241 2);
242 else
243 INIT_INI_ARRAY(&ah->iniModesRxGain,
244 ar9300Common_rx_gain_table_2p2,
245 ARRAY_SIZE(ar9300Common_rx_gain_table_2p2),
246 2);
247 break; 143 break;
248 case 1: 144 case 1:
249 if (AR_SREV_9300_20(ah)) 145 INIT_INI_ARRAY(&ah->iniModesRxGain,
250 INIT_INI_ARRAY(&ah->iniModesRxGain, 146 ar9300Common_wo_xlna_rx_gain_table_2p2,
251 ar9300Common_wo_xlna_rx_gain_table_2p0, 147 ARRAY_SIZE(ar9300Common_wo_xlna_rx_gain_table_2p2),
252 ARRAY_SIZE(ar9300Common_wo_xlna_rx_gain_table_2p0), 148 2);
253 2);
254 else
255 INIT_INI_ARRAY(&ah->iniModesRxGain,
256 ar9300Common_wo_xlna_rx_gain_table_2p2,
257 ARRAY_SIZE(ar9300Common_wo_xlna_rx_gain_table_2p2),
258 2);
259 break; 149 break;
260 } 150 }
261} 151}
@@ -333,6 +223,4 @@ void ar9003_hw_attach_ops(struct ath_hw *ah)
333 ar9003_hw_attach_phy_ops(ah); 223 ar9003_hw_attach_phy_ops(ah);
334 ar9003_hw_attach_calib_ops(ah); 224 ar9003_hw_attach_calib_ops(ah);
335 ar9003_hw_attach_mac_ops(ah); 225 ar9003_hw_attach_mac_ops(ah);
336
337 ath9k_hw_attach_ani_ops_new(ah);
338} 226}
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mac.c b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
index a462da23e87e..3b424ca1ba84 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_mac.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
@@ -616,7 +616,8 @@ int ath9k_hw_process_rxdesc_edma(struct ath_hw *ah, struct ath_rx_status *rxs,
616 rxs->rs_status |= ATH9K_RXERR_DECRYPT; 616 rxs->rs_status |= ATH9K_RXERR_DECRYPT;
617 } else if (rxsp->status11 & AR_MichaelErr) { 617 } else if (rxsp->status11 & AR_MichaelErr) {
618 rxs->rs_status |= ATH9K_RXERR_MIC; 618 rxs->rs_status |= ATH9K_RXERR_MIC;
619 } 619 } else if (rxsp->status11 & AR_KeyMiss)
620 rxs->rs_status |= ATH9K_RXERR_DECRYPT;
620 } 621 }
621 622
622 return 0; 623 return 0;
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.c b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
index a491854fa38a..669b777729b3 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_phy.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
@@ -747,9 +747,9 @@ static void ar9003_hw_set_diversity(struct ath_hw *ah, bool value)
747static bool ar9003_hw_ani_control(struct ath_hw *ah, 747static bool ar9003_hw_ani_control(struct ath_hw *ah,
748 enum ath9k_ani_cmd cmd, int param) 748 enum ath9k_ani_cmd cmd, int param)
749{ 749{
750 struct ar5416AniState *aniState = ah->curani;
751 struct ath_common *common = ath9k_hw_common(ah); 750 struct ath_common *common = ath9k_hw_common(ah);
752 struct ath9k_channel *chan = ah->curchan; 751 struct ath9k_channel *chan = ah->curchan;
752 struct ar5416AniState *aniState = &chan->ani;
753 s32 value, value2; 753 s32 value, value2;
754 754
755 switch (cmd & ah->ani_function) { 755 switch (cmd & ah->ani_function) {
@@ -1005,15 +1005,13 @@ static bool ar9003_hw_ani_control(struct ath_hw *ah,
1005 1005
1006 ath_print(common, ATH_DBG_ANI, 1006 ath_print(common, ATH_DBG_ANI,
1007 "ANI parameters: SI=%d, ofdmWS=%s FS=%d " 1007 "ANI parameters: SI=%d, ofdmWS=%s FS=%d "
1008 "MRCcck=%s listenTime=%d CC=%d listen=%d " 1008 "MRCcck=%s listenTime=%d "
1009 "ofdmErrs=%d cckErrs=%d\n", 1009 "ofdmErrs=%d cckErrs=%d\n",
1010 aniState->spurImmunityLevel, 1010 aniState->spurImmunityLevel,
1011 !aniState->ofdmWeakSigDetectOff ? "on" : "off", 1011 !aniState->ofdmWeakSigDetectOff ? "on" : "off",
1012 aniState->firstepLevel, 1012 aniState->firstepLevel,
1013 !aniState->mrcCCKOff ? "on" : "off", 1013 !aniState->mrcCCKOff ? "on" : "off",
1014 aniState->listenTime, 1014 aniState->listenTime,
1015 aniState->cycleCount,
1016 aniState->listenTime,
1017 aniState->ofdmPhyErrCount, 1015 aniState->ofdmPhyErrCount,
1018 aniState->cckPhyErrCount); 1016 aniState->cckPhyErrCount);
1019 return true; 1017 return true;
@@ -1067,12 +1065,9 @@ static void ar9003_hw_ani_cache_ini_regs(struct ath_hw *ah)
1067 struct ath_common *common = ath9k_hw_common(ah); 1065 struct ath_common *common = ath9k_hw_common(ah);
1068 struct ath9k_channel *chan = ah->curchan; 1066 struct ath9k_channel *chan = ah->curchan;
1069 struct ath9k_ani_default *iniDef; 1067 struct ath9k_ani_default *iniDef;
1070 int index;
1071 u32 val; 1068 u32 val;
1072 1069
1073 index = ath9k_hw_get_ani_channel_idx(ah, chan); 1070 aniState = &ah->curchan->ani;
1074 aniState = &ah->ani[index];
1075 ah->curani = aniState;
1076 iniDef = &aniState->iniDef; 1071 iniDef = &aniState->iniDef;
1077 1072
1078 ath_print(common, ATH_DBG_ANI, 1073 ath_print(common, ATH_DBG_ANI,
@@ -1116,8 +1111,6 @@ static void ar9003_hw_ani_cache_ini_regs(struct ath_hw *ah)
1116 aniState->firstepLevel = ATH9K_ANI_FIRSTEP_LVL_NEW; 1111 aniState->firstepLevel = ATH9K_ANI_FIRSTEP_LVL_NEW;
1117 aniState->ofdmWeakSigDetectOff = !ATH9K_ANI_USE_OFDM_WEAK_SIG; 1112 aniState->ofdmWeakSigDetectOff = !ATH9K_ANI_USE_OFDM_WEAK_SIG;
1118 aniState->mrcCCKOff = !ATH9K_ANI_ENABLE_MRC_CCK; 1113 aniState->mrcCCKOff = !ATH9K_ANI_ENABLE_MRC_CCK;
1119
1120 aniState->cycleCount = 0;
1121} 1114}
1122 1115
1123void ar9003_hw_attach_phy_ops(struct ath_hw *ah) 1116void ar9003_hw_attach_phy_ops(struct ath_hw *ah)
@@ -1232,7 +1225,7 @@ void ar9003_hw_bb_watchdog_read(struct ath_hw *ah)
1232void ar9003_hw_bb_watchdog_dbg_info(struct ath_hw *ah) 1225void ar9003_hw_bb_watchdog_dbg_info(struct ath_hw *ah)
1233{ 1226{
1234 struct ath_common *common = ath9k_hw_common(ah); 1227 struct ath_common *common = ath9k_hw_common(ah);
1235 u32 rxc_pcnt = 0, rxf_pcnt = 0, txf_pcnt = 0, status; 1228 u32 status;
1236 1229
1237 if (likely(!(common->debug_mask & ATH_DBG_RESET))) 1230 if (likely(!(common->debug_mask & ATH_DBG_RESET)))
1238 return; 1231 return;
@@ -1261,11 +1254,12 @@ void ar9003_hw_bb_watchdog_dbg_info(struct ath_hw *ah)
1261 "** BB mode: BB_gen_controls=0x%08x **\n", 1254 "** BB mode: BB_gen_controls=0x%08x **\n",
1262 REG_READ(ah, AR_PHY_GEN_CTRL)); 1255 REG_READ(ah, AR_PHY_GEN_CTRL));
1263 1256
1264 if (ath9k_hw_GetMibCycleCountsPct(ah, &rxc_pcnt, &rxf_pcnt, &txf_pcnt)) 1257#define PCT(_field) (common->cc_survey._field * 100 / common->cc_survey.cycles)
1258 if (common->cc_survey.cycles)
1265 ath_print(common, ATH_DBG_RESET, 1259 ath_print(common, ATH_DBG_RESET,
1266 "** BB busy times: rx_clear=%d%%, " 1260 "** BB busy times: rx_clear=%d%%, "
1267 "rx_frame=%d%%, tx_frame=%d%% **\n", 1261 "rx_frame=%d%%, tx_frame=%d%% **\n",
1268 rxc_pcnt, rxf_pcnt, txf_pcnt); 1262 PCT(rx_busy), PCT(rx_frame), PCT(tx_frame));
1269 1263
1270 ath_print(common, ATH_DBG_RESET, 1264 ath_print(common, ATH_DBG_RESET,
1271 "==== BB update: done ====\n\n"); 1265 "==== BB update: done ====\n\n");
diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h
index f0197a6046ab..973c919fdd27 100644
--- a/drivers/net/wireless/ath/ath9k/ath9k.h
+++ b/drivers/net/wireless/ath/ath9k/ath9k.h
@@ -239,13 +239,11 @@ struct ath_buf {
239 struct sk_buff *bf_mpdu; /* enclosing frame structure */ 239 struct sk_buff *bf_mpdu; /* enclosing frame structure */
240 void *bf_desc; /* virtual addr of desc */ 240 void *bf_desc; /* virtual addr of desc */
241 dma_addr_t bf_daddr; /* physical addr of desc */ 241 dma_addr_t bf_daddr; /* physical addr of desc */
242 dma_addr_t bf_buf_addr; /* physical addr of data buffer */ 242 dma_addr_t bf_buf_addr; /* physical addr of data buffer, for DMA */
243 bool bf_stale; 243 bool bf_stale;
244 bool bf_isnullfunc;
245 bool bf_tx_aborted; 244 bool bf_tx_aborted;
246 u16 bf_flags; 245 u16 bf_flags;
247 struct ath_buf_state bf_state; 246 struct ath_buf_state bf_state;
248 dma_addr_t bf_dmacontext;
249 struct ath_wiphy *aphy; 247 struct ath_wiphy *aphy;
250}; 248};
251 249
@@ -254,7 +252,7 @@ struct ath_atx_tid {
254 struct list_head buf_q; 252 struct list_head buf_q;
255 struct ath_node *an; 253 struct ath_node *an;
256 struct ath_atx_ac *ac; 254 struct ath_atx_ac *ac;
257 struct ath_buf *tx_buf[ATH_TID_MAX_BUFS]; 255 unsigned long tx_buf[BITS_TO_LONGS(ATH_TID_MAX_BUFS)];
258 u16 seq_start; 256 u16 seq_start;
259 u16 seq_next; 257 u16 seq_next;
260 u16 baw_size; 258 u16 baw_size;
@@ -345,12 +343,10 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
345void ath_tx_tasklet(struct ath_softc *sc); 343void ath_tx_tasklet(struct ath_softc *sc);
346void ath_tx_edma_tasklet(struct ath_softc *sc); 344void ath_tx_edma_tasklet(struct ath_softc *sc);
347void ath_tx_cabq(struct ieee80211_hw *hw, struct sk_buff *skb); 345void ath_tx_cabq(struct ieee80211_hw *hw, struct sk_buff *skb);
348bool ath_tx_aggr_check(struct ath_softc *sc, struct ath_node *an, u8 tidno); 346int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
349void ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta, 347 u16 tid, u16 *ssn);
350 u16 tid, u16 *ssn);
351void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid); 348void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid);
352void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid); 349void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid);
353void ath9k_enable_ps(struct ath_softc *sc);
354 350
355/********/ 351/********/
356/* VIFs */ 352/* VIFs */
@@ -481,6 +477,60 @@ struct ath_led {
481void ath_init_leds(struct ath_softc *sc); 477void ath_init_leds(struct ath_softc *sc);
482void ath_deinit_leds(struct ath_softc *sc); 478void ath_deinit_leds(struct ath_softc *sc);
483 479
480/* Antenna diversity/combining */
481#define ATH_ANT_RX_CURRENT_SHIFT 4
482#define ATH_ANT_RX_MAIN_SHIFT 2
483#define ATH_ANT_RX_MASK 0x3
484
485#define ATH_ANT_DIV_COMB_SHORT_SCAN_INTR 50
486#define ATH_ANT_DIV_COMB_SHORT_SCAN_PKTCOUNT 0x100
487#define ATH_ANT_DIV_COMB_MAX_PKTCOUNT 0x200
488#define ATH_ANT_DIV_COMB_INIT_COUNT 95
489#define ATH_ANT_DIV_COMB_MAX_COUNT 100
490#define ATH_ANT_DIV_COMB_ALT_ANT_RATIO 30
491#define ATH_ANT_DIV_COMB_ALT_ANT_RATIO2 20
492
493#define ATH_ANT_DIV_COMB_LNA1_LNA2_DELTA -3
494#define ATH_ANT_DIV_COMB_LNA1_LNA2_SWITCH_DELTA -1
495#define ATH_ANT_DIV_COMB_LNA1_DELTA_HI -4
496#define ATH_ANT_DIV_COMB_LNA1_DELTA_MID -2
497#define ATH_ANT_DIV_COMB_LNA1_DELTA_LOW 2
498
499enum ath9k_ant_div_comb_lna_conf {
500 ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2,
501 ATH_ANT_DIV_COMB_LNA2,
502 ATH_ANT_DIV_COMB_LNA1,
503 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2,
504};
505
506struct ath_ant_comb {
507 u16 count;
508 u16 total_pkt_count;
509 bool scan;
510 bool scan_not_start;
511 int main_total_rssi;
512 int alt_total_rssi;
513 int alt_recv_cnt;
514 int main_recv_cnt;
515 int rssi_lna1;
516 int rssi_lna2;
517 int rssi_add;
518 int rssi_sub;
519 int rssi_first;
520 int rssi_second;
521 int rssi_third;
522 bool alt_good;
523 int quick_scan_cnt;
524 int main_conf;
525 enum ath9k_ant_div_comb_lna_conf first_quick_scan_conf;
526 enum ath9k_ant_div_comb_lna_conf second_quick_scan_conf;
527 int first_bias;
528 int second_bias;
529 bool first_ratio;
530 bool second_ratio;
531 unsigned long scan_start_time;
532};
533
484/********************/ 534/********************/
485/* Main driver core */ 535/* Main driver core */
486/********************/ 536/********************/
@@ -509,7 +559,6 @@ void ath_deinit_leds(struct ath_softc *sc);
509#define SC_OP_RXFLUSH BIT(7) 559#define SC_OP_RXFLUSH BIT(7)
510#define SC_OP_LED_ASSOCIATED BIT(8) 560#define SC_OP_LED_ASSOCIATED BIT(8)
511#define SC_OP_LED_ON BIT(9) 561#define SC_OP_LED_ON BIT(9)
512#define SC_OP_SCANNING BIT(10)
513#define SC_OP_TSF_RESET BIT(11) 562#define SC_OP_TSF_RESET BIT(11)
514#define SC_OP_BT_PRIORITY_DETECTED BIT(12) 563#define SC_OP_BT_PRIORITY_DETECTED BIT(12)
515#define SC_OP_BT_SCAN BIT(13) 564#define SC_OP_BT_SCAN BIT(13)
@@ -521,8 +570,6 @@ void ath_deinit_leds(struct ath_softc *sc);
521#define PS_WAIT_FOR_PSPOLL_DATA BIT(2) 570#define PS_WAIT_FOR_PSPOLL_DATA BIT(2)
522#define PS_WAIT_FOR_TX_ACK BIT(3) 571#define PS_WAIT_FOR_TX_ACK BIT(3)
523#define PS_BEACON_SYNC BIT(4) 572#define PS_BEACON_SYNC BIT(4)
524#define PS_NULLFUNC_COMPLETED BIT(5)
525#define PS_ENABLED BIT(6)
526 573
527struct ath_wiphy; 574struct ath_wiphy;
528struct ath_rate_table; 575struct ath_rate_table;
@@ -545,6 +592,8 @@ struct ath_softc {
545 struct delayed_work wiphy_work; 592 struct delayed_work wiphy_work;
546 unsigned long wiphy_scheduler_int; 593 unsigned long wiphy_scheduler_int;
547 int wiphy_scheduler_index; 594 int wiphy_scheduler_index;
595 struct survey_info *cur_survey;
596 struct survey_info survey[ATH9K_NUM_CHANNELS];
548 597
549 struct tasklet_struct intr_tq; 598 struct tasklet_struct intr_tq;
550 struct tasklet_struct bcon_tasklet; 599 struct tasklet_struct bcon_tasklet;
@@ -573,8 +622,6 @@ struct ath_softc {
573 struct ath_rx rx; 622 struct ath_rx rx;
574 struct ath_tx tx; 623 struct ath_tx tx;
575 struct ath_beacon beacon; 624 struct ath_beacon beacon;
576 const struct ath_rate_table *cur_rate_table;
577 enum wireless_mode cur_rate_mode;
578 struct ieee80211_supported_band sbands[IEEE80211_NUM_BANDS]; 625 struct ieee80211_supported_band sbands[IEEE80211_NUM_BANDS];
579 626
580 struct ath_led radio_led; 627 struct ath_led radio_led;
@@ -597,6 +644,8 @@ struct ath_softc {
597 struct ath_btcoex btcoex; 644 struct ath_btcoex btcoex;
598 645
599 struct ath_descdma txsdma; 646 struct ath_descdma txsdma;
647
648 struct ath_ant_comb ant_comb;
600}; 649};
601 650
602struct ath_wiphy { 651struct ath_wiphy {
@@ -663,7 +712,7 @@ static inline void ath_ahb_exit(void) {};
663void ath9k_ps_wakeup(struct ath_softc *sc); 712void ath9k_ps_wakeup(struct ath_softc *sc);
664void ath9k_ps_restore(struct ath_softc *sc); 713void ath9k_ps_restore(struct ath_softc *sc);
665 714
666void ath9k_set_bssid_mask(struct ieee80211_hw *hw); 715void ath9k_set_bssid_mask(struct ieee80211_hw *hw, struct ieee80211_vif *vif);
667int ath9k_wiphy_add(struct ath_softc *sc); 716int ath9k_wiphy_add(struct ath_softc *sc);
668int ath9k_wiphy_del(struct ath_wiphy *aphy); 717int ath9k_wiphy_del(struct ath_wiphy *aphy);
669void ath9k_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb); 718void ath9k_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb);
diff --git a/drivers/net/wireless/ath/ath9k/beacon.c b/drivers/net/wireless/ath/ath9k/beacon.c
index 081192e78a46..4ed010d4ef96 100644
--- a/drivers/net/wireless/ath/ath9k/beacon.c
+++ b/drivers/net/wireless/ath/ath9k/beacon.c
@@ -136,9 +136,10 @@ static struct ath_buf *ath_beacon_generate(struct ieee80211_hw *hw,
136 bf = avp->av_bcbuf; 136 bf = avp->av_bcbuf;
137 skb = bf->bf_mpdu; 137 skb = bf->bf_mpdu;
138 if (skb) { 138 if (skb) {
139 dma_unmap_single(sc->dev, bf->bf_dmacontext, 139 dma_unmap_single(sc->dev, bf->bf_buf_addr,
140 skb->len, DMA_TO_DEVICE); 140 skb->len, DMA_TO_DEVICE);
141 dev_kfree_skb_any(skb); 141 dev_kfree_skb_any(skb);
142 bf->bf_buf_addr = 0;
142 } 143 }
143 144
144 /* Get a new beacon from mac80211 */ 145 /* Get a new beacon from mac80211 */
@@ -162,12 +163,12 @@ static struct ath_buf *ath_beacon_generate(struct ieee80211_hw *hw,
162 hdr->seq_ctrl |= cpu_to_le16(sc->tx.seq_no); 163 hdr->seq_ctrl |= cpu_to_le16(sc->tx.seq_no);
163 } 164 }
164 165
165 bf->bf_buf_addr = bf->bf_dmacontext = 166 bf->bf_buf_addr = dma_map_single(sc->dev, skb->data,
166 dma_map_single(sc->dev, skb->data, 167 skb->len, DMA_TO_DEVICE);
167 skb->len, DMA_TO_DEVICE);
168 if (unlikely(dma_mapping_error(sc->dev, bf->bf_buf_addr))) { 168 if (unlikely(dma_mapping_error(sc->dev, bf->bf_buf_addr))) {
169 dev_kfree_skb_any(skb); 169 dev_kfree_skb_any(skb);
170 bf->bf_mpdu = NULL; 170 bf->bf_mpdu = NULL;
171 bf->bf_buf_addr = 0;
171 ath_print(common, ATH_DBG_FATAL, 172 ath_print(common, ATH_DBG_FATAL,
172 "dma_mapping_error on beaconing\n"); 173 "dma_mapping_error on beaconing\n");
173 return NULL; 174 return NULL;
@@ -252,10 +253,11 @@ int ath_beacon_alloc(struct ath_wiphy *aphy, struct ieee80211_vif *vif)
252 bf = avp->av_bcbuf; 253 bf = avp->av_bcbuf;
253 if (bf->bf_mpdu != NULL) { 254 if (bf->bf_mpdu != NULL) {
254 skb = bf->bf_mpdu; 255 skb = bf->bf_mpdu;
255 dma_unmap_single(sc->dev, bf->bf_dmacontext, 256 dma_unmap_single(sc->dev, bf->bf_buf_addr,
256 skb->len, DMA_TO_DEVICE); 257 skb->len, DMA_TO_DEVICE);
257 dev_kfree_skb_any(skb); 258 dev_kfree_skb_any(skb);
258 bf->bf_mpdu = NULL; 259 bf->bf_mpdu = NULL;
260 bf->bf_buf_addr = 0;
259 } 261 }
260 262
261 /* NB: the beacon data buffer must be 32-bit aligned. */ 263 /* NB: the beacon data buffer must be 32-bit aligned. */
@@ -296,12 +298,12 @@ int ath_beacon_alloc(struct ath_wiphy *aphy, struct ieee80211_vif *vif)
296 avp->tsf_adjust = cpu_to_le64(0); 298 avp->tsf_adjust = cpu_to_le64(0);
297 299
298 bf->bf_mpdu = skb; 300 bf->bf_mpdu = skb;
299 bf->bf_buf_addr = bf->bf_dmacontext = 301 bf->bf_buf_addr = dma_map_single(sc->dev, skb->data,
300 dma_map_single(sc->dev, skb->data, 302 skb->len, DMA_TO_DEVICE);
301 skb->len, DMA_TO_DEVICE);
302 if (unlikely(dma_mapping_error(sc->dev, bf->bf_buf_addr))) { 303 if (unlikely(dma_mapping_error(sc->dev, bf->bf_buf_addr))) {
303 dev_kfree_skb_any(skb); 304 dev_kfree_skb_any(skb);
304 bf->bf_mpdu = NULL; 305 bf->bf_mpdu = NULL;
306 bf->bf_buf_addr = 0;
305 ath_print(common, ATH_DBG_FATAL, 307 ath_print(common, ATH_DBG_FATAL,
306 "dma_mapping_error on beacon alloc\n"); 308 "dma_mapping_error on beacon alloc\n");
307 return -ENOMEM; 309 return -ENOMEM;
@@ -324,10 +326,11 @@ void ath_beacon_return(struct ath_softc *sc, struct ath_vif *avp)
324 bf = avp->av_bcbuf; 326 bf = avp->av_bcbuf;
325 if (bf->bf_mpdu != NULL) { 327 if (bf->bf_mpdu != NULL) {
326 struct sk_buff *skb = bf->bf_mpdu; 328 struct sk_buff *skb = bf->bf_mpdu;
327 dma_unmap_single(sc->dev, bf->bf_dmacontext, 329 dma_unmap_single(sc->dev, bf->bf_buf_addr,
328 skb->len, DMA_TO_DEVICE); 330 skb->len, DMA_TO_DEVICE);
329 dev_kfree_skb_any(skb); 331 dev_kfree_skb_any(skb);
330 bf->bf_mpdu = NULL; 332 bf->bf_mpdu = NULL;
333 bf->bf_buf_addr = 0;
331 } 334 }
332 list_add_tail(&bf->list, &sc->beacon.bbuf); 335 list_add_tail(&bf->list, &sc->beacon.bbuf);
333 336
diff --git a/drivers/net/wireless/ath/ath9k/calib.c b/drivers/net/wireless/ath/ath9k/calib.c
index 67ee5d735cc1..6d509484b5f6 100644
--- a/drivers/net/wireless/ath/ath9k/calib.c
+++ b/drivers/net/wireless/ath/ath9k/calib.c
@@ -186,7 +186,7 @@ bool ath9k_hw_reset_calvalid(struct ath_hw *ah)
186 return true; 186 return true;
187 } 187 }
188 188
189 if (!ath9k_hw_iscal_supported(ah, currCal->calData->calType)) 189 if (!(ah->supp_cals & currCal->calData->calType))
190 return true; 190 return true;
191 191
192 ath_print(common, ATH_DBG_CALIBRATE, 192 ath_print(common, ATH_DBG_CALIBRATE,
@@ -300,7 +300,6 @@ void ath9k_hw_loadnf(struct ath_hw *ah, struct ath9k_channel *chan)
300 } 300 }
301 } 301 }
302 REGWRITE_BUFFER_FLUSH(ah); 302 REGWRITE_BUFFER_FLUSH(ah);
303 DISABLE_REGWRITE_BUFFER(ah);
304} 303}
305 304
306 305
@@ -346,34 +345,34 @@ bool ath9k_hw_getnf(struct ath_hw *ah, struct ath9k_channel *chan)
346 struct ieee80211_channel *c = chan->chan; 345 struct ieee80211_channel *c = chan->chan;
347 struct ath9k_hw_cal_data *caldata = ah->caldata; 346 struct ath9k_hw_cal_data *caldata = ah->caldata;
348 347
349 if (!caldata)
350 return false;
351
352 chan->channelFlags &= (~CHANNEL_CW_INT); 348 chan->channelFlags &= (~CHANNEL_CW_INT);
353 if (REG_READ(ah, AR_PHY_AGC_CONTROL) & AR_PHY_AGC_CONTROL_NF) { 349 if (REG_READ(ah, AR_PHY_AGC_CONTROL) & AR_PHY_AGC_CONTROL_NF) {
354 ath_print(common, ATH_DBG_CALIBRATE, 350 ath_print(common, ATH_DBG_CALIBRATE,
355 "NF did not complete in calibration window\n"); 351 "NF did not complete in calibration window\n");
356 nf = 0;
357 caldata->rawNoiseFloor = nf;
358 return false; 352 return false;
359 } else { 353 }
360 ath9k_hw_do_getnf(ah, nfarray); 354
361 ath9k_hw_nf_sanitize(ah, nfarray); 355 ath9k_hw_do_getnf(ah, nfarray);
362 nf = nfarray[0]; 356 ath9k_hw_nf_sanitize(ah, nfarray);
363 if (ath9k_hw_get_nf_thresh(ah, c->band, &nfThresh) 357 nf = nfarray[0];
364 && nf > nfThresh) { 358 if (ath9k_hw_get_nf_thresh(ah, c->band, &nfThresh)
365 ath_print(common, ATH_DBG_CALIBRATE, 359 && nf > nfThresh) {
366 "noise floor failed detected; " 360 ath_print(common, ATH_DBG_CALIBRATE,
367 "detected %d, threshold %d\n", 361 "noise floor failed detected; "
368 nf, nfThresh); 362 "detected %d, threshold %d\n",
369 chan->channelFlags |= CHANNEL_CW_INT; 363 nf, nfThresh);
370 } 364 chan->channelFlags |= CHANNEL_CW_INT;
365 }
366
367 if (!caldata) {
368 chan->noisefloor = nf;
369 return false;
371 } 370 }
372 371
373 h = caldata->nfCalHist; 372 h = caldata->nfCalHist;
374 caldata->nfcal_pending = false; 373 caldata->nfcal_pending = false;
375 ath9k_hw_update_nfcal_hist_buffer(ah, caldata, nfarray); 374 ath9k_hw_update_nfcal_hist_buffer(ah, caldata, nfarray);
376 caldata->rawNoiseFloor = h[0].privNF; 375 chan->noisefloor = h[0].privNF;
377 return true; 376 return true;
378} 377}
379 378
@@ -401,10 +400,10 @@ void ath9k_init_nfcal_hist_buffer(struct ath_hw *ah,
401 400
402s16 ath9k_hw_getchan_noise(struct ath_hw *ah, struct ath9k_channel *chan) 401s16 ath9k_hw_getchan_noise(struct ath_hw *ah, struct ath9k_channel *chan)
403{ 402{
404 if (!ah->caldata || !ah->caldata->rawNoiseFloor) 403 if (!ah->curchan || !ah->curchan->noisefloor)
405 return ath9k_hw_get_default_nf(ah, chan); 404 return ath9k_hw_get_default_nf(ah, chan);
406 405
407 return ah->caldata->rawNoiseFloor; 406 return ah->curchan->noisefloor;
408} 407}
409EXPORT_SYMBOL(ath9k_hw_getchan_noise); 408EXPORT_SYMBOL(ath9k_hw_getchan_noise);
410 409
diff --git a/drivers/net/wireless/ath/ath9k/calib.h b/drivers/net/wireless/ath/ath9k/calib.h
index 5b053a6260b2..b8973eb8d858 100644
--- a/drivers/net/wireless/ath/ath9k/calib.h
+++ b/drivers/net/wireless/ath/ath9k/calib.h
@@ -58,14 +58,6 @@ struct ar5416IniArray {
58 } \ 58 } \
59 } while (0) 59 } while (0)
60 60
61enum ath9k_cal_types {
62 ADC_DC_INIT_CAL = 0x1,
63 ADC_GAIN_CAL = 0x2,
64 ADC_DC_CAL = 0x4,
65 IQ_MISMATCH_CAL = 0x8,
66 TEMP_COMP_CAL = 0x10,
67};
68
69enum ath9k_cal_state { 61enum ath9k_cal_state {
70 CAL_INACTIVE, 62 CAL_INACTIVE,
71 CAL_WAITING, 63 CAL_WAITING,
@@ -80,7 +72,7 @@ enum ath9k_cal_state {
80#define PER_MAX_LOG_COUNT 10 72#define PER_MAX_LOG_COUNT 10
81 73
82struct ath9k_percal_data { 74struct ath9k_percal_data {
83 enum ath9k_cal_types calType; 75 u32 calType;
84 u32 calNumSamples; 76 u32 calNumSamples;
85 u32 calCountMax; 77 u32 calCountMax;
86 void (*calCollect) (struct ath_hw *); 78 void (*calCollect) (struct ath_hw *);
diff --git a/drivers/net/wireless/ath/ath9k/common.c b/drivers/net/wireless/ath/ath9k/common.c
index 2dab64bb23a8..f43a2d98421c 100644
--- a/drivers/net/wireless/ath/ath9k/common.c
+++ b/drivers/net/wireless/ath/ath9k/common.c
@@ -148,276 +148,6 @@ struct ath9k_channel *ath9k_cmn_get_curchannel(struct ieee80211_hw *hw,
148} 148}
149EXPORT_SYMBOL(ath9k_cmn_get_curchannel); 149EXPORT_SYMBOL(ath9k_cmn_get_curchannel);
150 150
151static int ath_setkey_tkip(struct ath_common *common, u16 keyix, const u8 *key,
152 struct ath9k_keyval *hk, const u8 *addr,
153 bool authenticator)
154{
155 struct ath_hw *ah = common->ah;
156 const u8 *key_rxmic;
157 const u8 *key_txmic;
158
159 key_txmic = key + NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY;
160 key_rxmic = key + NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY;
161
162 if (addr == NULL) {
163 /*
164 * Group key installation - only two key cache entries are used
165 * regardless of splitmic capability since group key is only
166 * used either for TX or RX.
167 */
168 if (authenticator) {
169 memcpy(hk->kv_mic, key_txmic, sizeof(hk->kv_mic));
170 memcpy(hk->kv_txmic, key_txmic, sizeof(hk->kv_mic));
171 } else {
172 memcpy(hk->kv_mic, key_rxmic, sizeof(hk->kv_mic));
173 memcpy(hk->kv_txmic, key_rxmic, sizeof(hk->kv_mic));
174 }
175 return ath9k_hw_set_keycache_entry(ah, keyix, hk, addr);
176 }
177 if (!common->splitmic) {
178 /* TX and RX keys share the same key cache entry. */
179 memcpy(hk->kv_mic, key_rxmic, sizeof(hk->kv_mic));
180 memcpy(hk->kv_txmic, key_txmic, sizeof(hk->kv_txmic));
181 return ath9k_hw_set_keycache_entry(ah, keyix, hk, addr);
182 }
183
184 /* Separate key cache entries for TX and RX */
185
186 /* TX key goes at first index, RX key at +32. */
187 memcpy(hk->kv_mic, key_txmic, sizeof(hk->kv_mic));
188 if (!ath9k_hw_set_keycache_entry(ah, keyix, hk, NULL)) {
189 /* TX MIC entry failed. No need to proceed further */
190 ath_print(common, ATH_DBG_FATAL,
191 "Setting TX MIC Key Failed\n");
192 return 0;
193 }
194
195 memcpy(hk->kv_mic, key_rxmic, sizeof(hk->kv_mic));
196 /* XXX delete tx key on failure? */
197 return ath9k_hw_set_keycache_entry(ah, keyix + 32, hk, addr);
198}
199
200static int ath_reserve_key_cache_slot_tkip(struct ath_common *common)
201{
202 int i;
203
204 for (i = IEEE80211_WEP_NKID; i < common->keymax / 2; i++) {
205 if (test_bit(i, common->keymap) ||
206 test_bit(i + 64, common->keymap))
207 continue; /* At least one part of TKIP key allocated */
208 if (common->splitmic &&
209 (test_bit(i + 32, common->keymap) ||
210 test_bit(i + 64 + 32, common->keymap)))
211 continue; /* At least one part of TKIP key allocated */
212
213 /* Found a free slot for a TKIP key */
214 return i;
215 }
216 return -1;
217}
218
219static int ath_reserve_key_cache_slot(struct ath_common *common,
220 u32 cipher)
221{
222 int i;
223
224 if (cipher == WLAN_CIPHER_SUITE_TKIP)
225 return ath_reserve_key_cache_slot_tkip(common);
226
227 /* First, try to find slots that would not be available for TKIP. */
228 if (common->splitmic) {
229 for (i = IEEE80211_WEP_NKID; i < common->keymax / 4; i++) {
230 if (!test_bit(i, common->keymap) &&
231 (test_bit(i + 32, common->keymap) ||
232 test_bit(i + 64, common->keymap) ||
233 test_bit(i + 64 + 32, common->keymap)))
234 return i;
235 if (!test_bit(i + 32, common->keymap) &&
236 (test_bit(i, common->keymap) ||
237 test_bit(i + 64, common->keymap) ||
238 test_bit(i + 64 + 32, common->keymap)))
239 return i + 32;
240 if (!test_bit(i + 64, common->keymap) &&
241 (test_bit(i , common->keymap) ||
242 test_bit(i + 32, common->keymap) ||
243 test_bit(i + 64 + 32, common->keymap)))
244 return i + 64;
245 if (!test_bit(i + 64 + 32, common->keymap) &&
246 (test_bit(i, common->keymap) ||
247 test_bit(i + 32, common->keymap) ||
248 test_bit(i + 64, common->keymap)))
249 return i + 64 + 32;
250 }
251 } else {
252 for (i = IEEE80211_WEP_NKID; i < common->keymax / 2; i++) {
253 if (!test_bit(i, common->keymap) &&
254 test_bit(i + 64, common->keymap))
255 return i;
256 if (test_bit(i, common->keymap) &&
257 !test_bit(i + 64, common->keymap))
258 return i + 64;
259 }
260 }
261
262 /* No partially used TKIP slots, pick any available slot */
263 for (i = IEEE80211_WEP_NKID; i < common->keymax; i++) {
264 /* Do not allow slots that could be needed for TKIP group keys
265 * to be used. This limitation could be removed if we know that
266 * TKIP will not be used. */
267 if (i >= 64 && i < 64 + IEEE80211_WEP_NKID)
268 continue;
269 if (common->splitmic) {
270 if (i >= 32 && i < 32 + IEEE80211_WEP_NKID)
271 continue;
272 if (i >= 64 + 32 && i < 64 + 32 + IEEE80211_WEP_NKID)
273 continue;
274 }
275
276 if (!test_bit(i, common->keymap))
277 return i; /* Found a free slot for a key */
278 }
279
280 /* No free slot found */
281 return -1;
282}
283
284/*
285 * Configure encryption in the HW.
286 */
287int ath9k_cmn_key_config(struct ath_common *common,
288 struct ieee80211_vif *vif,
289 struct ieee80211_sta *sta,
290 struct ieee80211_key_conf *key)
291{
292 struct ath_hw *ah = common->ah;
293 struct ath9k_keyval hk;
294 const u8 *mac = NULL;
295 u8 gmac[ETH_ALEN];
296 int ret = 0;
297 int idx;
298
299 memset(&hk, 0, sizeof(hk));
300
301 switch (key->cipher) {
302 case WLAN_CIPHER_SUITE_WEP40:
303 case WLAN_CIPHER_SUITE_WEP104:
304 hk.kv_type = ATH9K_CIPHER_WEP;
305 break;
306 case WLAN_CIPHER_SUITE_TKIP:
307 hk.kv_type = ATH9K_CIPHER_TKIP;
308 break;
309 case WLAN_CIPHER_SUITE_CCMP:
310 hk.kv_type = ATH9K_CIPHER_AES_CCM;
311 break;
312 default:
313 return -EOPNOTSUPP;
314 }
315
316 hk.kv_len = key->keylen;
317 memcpy(hk.kv_val, key->key, key->keylen);
318
319 if (!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)) {
320 switch (vif->type) {
321 case NL80211_IFTYPE_AP:
322 memcpy(gmac, vif->addr, ETH_ALEN);
323 gmac[0] |= 0x01;
324 mac = gmac;
325 idx = ath_reserve_key_cache_slot(common, key->cipher);
326 break;
327 case NL80211_IFTYPE_ADHOC:
328 if (!sta) {
329 idx = key->keyidx;
330 break;
331 }
332 memcpy(gmac, sta->addr, ETH_ALEN);
333 gmac[0] |= 0x01;
334 mac = gmac;
335 idx = ath_reserve_key_cache_slot(common, key->cipher);
336 break;
337 default:
338 idx = key->keyidx;
339 break;
340 }
341 } else if (key->keyidx) {
342 if (WARN_ON(!sta))
343 return -EOPNOTSUPP;
344 mac = sta->addr;
345
346 if (vif->type != NL80211_IFTYPE_AP) {
347 /* Only keyidx 0 should be used with unicast key, but
348 * allow this for client mode for now. */
349 idx = key->keyidx;
350 } else
351 return -EIO;
352 } else {
353 if (WARN_ON(!sta))
354 return -EOPNOTSUPP;
355 mac = sta->addr;
356
357 idx = ath_reserve_key_cache_slot(common, key->cipher);
358 }
359
360 if (idx < 0)
361 return -ENOSPC; /* no free key cache entries */
362
363 if (key->cipher == WLAN_CIPHER_SUITE_TKIP)
364 ret = ath_setkey_tkip(common, idx, key->key, &hk, mac,
365 vif->type == NL80211_IFTYPE_AP);
366 else
367 ret = ath9k_hw_set_keycache_entry(ah, idx, &hk, mac);
368
369 if (!ret)
370 return -EIO;
371
372 set_bit(idx, common->keymap);
373 if (key->cipher == WLAN_CIPHER_SUITE_TKIP) {
374 set_bit(idx + 64, common->keymap);
375 set_bit(idx, common->tkip_keymap);
376 set_bit(idx + 64, common->tkip_keymap);
377 if (common->splitmic) {
378 set_bit(idx + 32, common->keymap);
379 set_bit(idx + 64 + 32, common->keymap);
380 set_bit(idx + 32, common->tkip_keymap);
381 set_bit(idx + 64 + 32, common->tkip_keymap);
382 }
383 }
384
385 return idx;
386}
387EXPORT_SYMBOL(ath9k_cmn_key_config);
388
389/*
390 * Delete Key.
391 */
392void ath9k_cmn_key_delete(struct ath_common *common,
393 struct ieee80211_key_conf *key)
394{
395 struct ath_hw *ah = common->ah;
396
397 ath9k_hw_keyreset(ah, key->hw_key_idx);
398 if (key->hw_key_idx < IEEE80211_WEP_NKID)
399 return;
400
401 clear_bit(key->hw_key_idx, common->keymap);
402 if (key->cipher != WLAN_CIPHER_SUITE_TKIP)
403 return;
404
405 clear_bit(key->hw_key_idx + 64, common->keymap);
406
407 clear_bit(key->hw_key_idx, common->tkip_keymap);
408 clear_bit(key->hw_key_idx + 64, common->tkip_keymap);
409
410 if (common->splitmic) {
411 ath9k_hw_keyreset(ah, key->hw_key_idx + 32);
412 clear_bit(key->hw_key_idx + 32, common->keymap);
413 clear_bit(key->hw_key_idx + 64 + 32, common->keymap);
414
415 clear_bit(key->hw_key_idx + 32, common->tkip_keymap);
416 clear_bit(key->hw_key_idx + 64 + 32, common->tkip_keymap);
417 }
418}
419EXPORT_SYMBOL(ath9k_cmn_key_delete);
420
421int ath9k_cmn_count_streams(unsigned int chainmask, int max) 151int ath9k_cmn_count_streams(unsigned int chainmask, int max)
422{ 152{
423 int streams = 0; 153 int streams = 0;
diff --git a/drivers/net/wireless/ath/ath9k/common.h b/drivers/net/wireless/ath/ath9k/common.h
index 4aa4e7dbe4d2..fea3b3315391 100644
--- a/drivers/net/wireless/ath/ath9k/common.h
+++ b/drivers/net/wireless/ath/ath9k/common.h
@@ -66,12 +66,6 @@ void ath9k_cmn_update_ichannel(struct ieee80211_hw *hw,
66 struct ath9k_channel *ichan); 66 struct ath9k_channel *ichan);
67struct ath9k_channel *ath9k_cmn_get_curchannel(struct ieee80211_hw *hw, 67struct ath9k_channel *ath9k_cmn_get_curchannel(struct ieee80211_hw *hw,
68 struct ath_hw *ah); 68 struct ath_hw *ah);
69int ath9k_cmn_key_config(struct ath_common *common,
70 struct ieee80211_vif *vif,
71 struct ieee80211_sta *sta,
72 struct ieee80211_key_conf *key);
73void ath9k_cmn_key_delete(struct ath_common *common,
74 struct ieee80211_key_conf *key);
75int ath9k_cmn_count_streams(unsigned int chainmask, int max); 69int ath9k_cmn_count_streams(unsigned int chainmask, int max);
76void ath9k_cmn_btcoex_bt_stomp(struct ath_common *common, 70void ath9k_cmn_btcoex_bt_stomp(struct ath_common *common,
77 enum ath_stomp_type stomp_type); 71 enum ath_stomp_type stomp_type);
diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c
index 54aae931424e..7f764e3d1c0a 100644
--- a/drivers/net/wireless/ath/ath9k/debug.c
+++ b/drivers/net/wireless/ath/ath9k/debug.c
@@ -378,95 +378,6 @@ static const struct file_operations fops_interrupt = {
378 .owner = THIS_MODULE 378 .owner = THIS_MODULE
379}; 379};
380 380
381void ath_debug_stat_rc(struct ath_softc *sc, int final_rate)
382{
383 struct ath_rc_stats *stats;
384
385 stats = &sc->debug.stats.rcstats[final_rate];
386 stats->success++;
387}
388
389void ath_debug_stat_retries(struct ath_softc *sc, int rix,
390 int xretries, int retries, u8 per)
391{
392 struct ath_rc_stats *stats = &sc->debug.stats.rcstats[rix];
393
394 stats->xretries += xretries;
395 stats->retries += retries;
396 stats->per = per;
397}
398
399static ssize_t read_file_rcstat(struct file *file, char __user *user_buf,
400 size_t count, loff_t *ppos)
401{
402 struct ath_softc *sc = file->private_data;
403 char *buf;
404 unsigned int len = 0, max;
405 int i = 0;
406 ssize_t retval;
407
408 if (sc->cur_rate_table == NULL)
409 return 0;
410
411 max = 80 + sc->cur_rate_table->rate_cnt * 1024 + 1;
412 buf = kmalloc(max, GFP_KERNEL);
413 if (buf == NULL)
414 return -ENOMEM;
415
416 len += sprintf(buf, "%6s %6s %6s "
417 "%10s %10s %10s %10s\n",
418 "HT", "MCS", "Rate",
419 "Success", "Retries", "XRetries", "PER");
420
421 for (i = 0; i < sc->cur_rate_table->rate_cnt; i++) {
422 u32 ratekbps = sc->cur_rate_table->info[i].ratekbps;
423 struct ath_rc_stats *stats = &sc->debug.stats.rcstats[i];
424 char mcs[5];
425 char htmode[5];
426 int used_mcs = 0, used_htmode = 0;
427
428 if (WLAN_RC_PHY_HT(sc->cur_rate_table->info[i].phy)) {
429 used_mcs = snprintf(mcs, 5, "%d",
430 sc->cur_rate_table->info[i].ratecode);
431
432 if (WLAN_RC_PHY_40(sc->cur_rate_table->info[i].phy))
433 used_htmode = snprintf(htmode, 5, "HT40");
434 else if (WLAN_RC_PHY_20(sc->cur_rate_table->info[i].phy))
435 used_htmode = snprintf(htmode, 5, "HT20");
436 else
437 used_htmode = snprintf(htmode, 5, "????");
438 }
439
440 mcs[used_mcs] = '\0';
441 htmode[used_htmode] = '\0';
442
443 len += snprintf(buf + len, max - len,
444 "%6s %6s %3u.%d: "
445 "%10u %10u %10u %10u\n",
446 htmode,
447 mcs,
448 ratekbps / 1000,
449 (ratekbps % 1000) / 100,
450 stats->success,
451 stats->retries,
452 stats->xretries,
453 stats->per);
454 }
455
456 if (len > max)
457 len = max;
458
459 retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
460 kfree(buf);
461 return retval;
462}
463
464static const struct file_operations fops_rcstat = {
465 .read = read_file_rcstat,
466 .open = ath9k_debugfs_open,
467 .owner = THIS_MODULE
468};
469
470static const char * ath_wiphy_state_str(enum ath_wiphy_state state) 381static const char * ath_wiphy_state_str(enum ath_wiphy_state state)
471{ 382{
472 switch (state) { 383 switch (state) {
@@ -488,26 +399,20 @@ static ssize_t read_file_wiphy(struct file *file, char __user *user_buf,
488 size_t count, loff_t *ppos) 399 size_t count, loff_t *ppos)
489{ 400{
490 struct ath_softc *sc = file->private_data; 401 struct ath_softc *sc = file->private_data;
402 struct ath_wiphy *aphy = sc->pri_wiphy;
403 struct ieee80211_channel *chan = aphy->hw->conf.channel;
491 char buf[512]; 404 char buf[512];
492 unsigned int len = 0; 405 unsigned int len = 0;
493 int i; 406 int i;
494 u8 addr[ETH_ALEN]; 407 u8 addr[ETH_ALEN];
408 u32 tmp;
495 409
496 len += snprintf(buf + len, sizeof(buf) - len, 410 len += snprintf(buf + len, sizeof(buf) - len,
497 "primary: %s (%s chan=%d ht=%d)\n", 411 "primary: %s (%s chan=%d ht=%d)\n",
498 wiphy_name(sc->pri_wiphy->hw->wiphy), 412 wiphy_name(sc->pri_wiphy->hw->wiphy),
499 ath_wiphy_state_str(sc->pri_wiphy->state), 413 ath_wiphy_state_str(sc->pri_wiphy->state),
500 sc->pri_wiphy->chan_idx, sc->pri_wiphy->chan_is_ht); 414 ieee80211_frequency_to_channel(chan->center_freq),
501 for (i = 0; i < sc->num_sec_wiphy; i++) { 415 aphy->chan_is_ht);
502 struct ath_wiphy *aphy = sc->sec_wiphy[i];
503 if (aphy == NULL)
504 continue;
505 len += snprintf(buf + len, sizeof(buf) - len,
506 "secondary: %s (%s chan=%d ht=%d)\n",
507 wiphy_name(aphy->hw->wiphy),
508 ath_wiphy_state_str(aphy->state),
509 aphy->chan_idx, aphy->chan_is_ht);
510 }
511 416
512 put_unaligned_le32(REG_READ_D(sc->sc_ah, AR_STA_ID0), addr); 417 put_unaligned_le32(REG_READ_D(sc->sc_ah, AR_STA_ID0), addr);
513 put_unaligned_le16(REG_READ_D(sc->sc_ah, AR_STA_ID1) & 0xffff, addr + 4); 418 put_unaligned_le16(REG_READ_D(sc->sc_ah, AR_STA_ID1) & 0xffff, addr + 4);
@@ -517,7 +422,51 @@ static ssize_t read_file_wiphy(struct file *file, char __user *user_buf,
517 put_unaligned_le16(REG_READ_D(sc->sc_ah, AR_BSSMSKU) & 0xffff, addr + 4); 422 put_unaligned_le16(REG_READ_D(sc->sc_ah, AR_BSSMSKU) & 0xffff, addr + 4);
518 len += snprintf(buf + len, sizeof(buf) - len, 423 len += snprintf(buf + len, sizeof(buf) - len,
519 "addrmask: %pM\n", addr); 424 "addrmask: %pM\n", addr);
520 425 tmp = ath9k_hw_getrxfilter(sc->sc_ah);
426 len += snprintf(buf + len, sizeof(buf) - len,
427 "rfilt: 0x%x", tmp);
428 if (tmp & ATH9K_RX_FILTER_UCAST)
429 len += snprintf(buf + len, sizeof(buf) - len, " UCAST");
430 if (tmp & ATH9K_RX_FILTER_MCAST)
431 len += snprintf(buf + len, sizeof(buf) - len, " MCAST");
432 if (tmp & ATH9K_RX_FILTER_BCAST)
433 len += snprintf(buf + len, sizeof(buf) - len, " BCAST");
434 if (tmp & ATH9K_RX_FILTER_CONTROL)
435 len += snprintf(buf + len, sizeof(buf) - len, " CONTROL");
436 if (tmp & ATH9K_RX_FILTER_BEACON)
437 len += snprintf(buf + len, sizeof(buf) - len, " BEACON");
438 if (tmp & ATH9K_RX_FILTER_PROM)
439 len += snprintf(buf + len, sizeof(buf) - len, " PROM");
440 if (tmp & ATH9K_RX_FILTER_PROBEREQ)
441 len += snprintf(buf + len, sizeof(buf) - len, " PROBEREQ");
442 if (tmp & ATH9K_RX_FILTER_PHYERR)
443 len += snprintf(buf + len, sizeof(buf) - len, " PHYERR");
444 if (tmp & ATH9K_RX_FILTER_MYBEACON)
445 len += snprintf(buf + len, sizeof(buf) - len, " MYBEACON");
446 if (tmp & ATH9K_RX_FILTER_COMP_BAR)
447 len += snprintf(buf + len, sizeof(buf) - len, " COMP_BAR");
448 if (tmp & ATH9K_RX_FILTER_PSPOLL)
449 len += snprintf(buf + len, sizeof(buf) - len, " PSPOLL");
450 if (tmp & ATH9K_RX_FILTER_PHYRADAR)
451 len += snprintf(buf + len, sizeof(buf) - len, " PHYRADAR");
452 if (tmp & ATH9K_RX_FILTER_MCAST_BCAST_ALL)
453 len += snprintf(buf + len, sizeof(buf) - len, " MCAST_BCAST_ALL\n");
454 else
455 len += snprintf(buf + len, sizeof(buf) - len, "\n");
456
457 /* Put variable-length stuff down here, and check for overflows. */
458 for (i = 0; i < sc->num_sec_wiphy; i++) {
459 struct ath_wiphy *aphy = sc->sec_wiphy[i];
460 if (aphy == NULL)
461 continue;
462 chan = aphy->hw->conf.channel;
463 len += snprintf(buf + len, sizeof(buf) - len,
464 "secondary: %s (%s chan=%d ht=%d)\n",
465 wiphy_name(aphy->hw->wiphy),
466 ath_wiphy_state_str(aphy->state),
467 ieee80211_frequency_to_channel(chan->center_freq),
468 aphy->chan_is_ht);
469 }
521 if (len > sizeof(buf)) 470 if (len > sizeof(buf))
522 len = sizeof(buf); 471 len = sizeof(buf);
523 472
@@ -663,6 +612,8 @@ static ssize_t read_file_xmit(struct file *file, char __user *user_buf,
663 PR("DESC CFG Error: ", desc_cfg_err); 612 PR("DESC CFG Error: ", desc_cfg_err);
664 PR("DATA Underrun: ", data_underrun); 613 PR("DATA Underrun: ", data_underrun);
665 PR("DELIM Underrun: ", delim_underrun); 614 PR("DELIM Underrun: ", delim_underrun);
615 PR("TX-Pkts-All: ", tx_pkts_all);
616 PR("TX-Bytes-All: ", tx_bytes_all);
666 617
667 if (len > size) 618 if (len > size)
668 len = size; 619 len = size;
@@ -676,6 +627,9 @@ static ssize_t read_file_xmit(struct file *file, char __user *user_buf,
676void ath_debug_stat_tx(struct ath_softc *sc, struct ath_txq *txq, 627void ath_debug_stat_tx(struct ath_softc *sc, struct ath_txq *txq,
677 struct ath_buf *bf, struct ath_tx_status *ts) 628 struct ath_buf *bf, struct ath_tx_status *ts)
678{ 629{
630 TX_STAT_INC(txq->axq_qnum, tx_pkts_all);
631 sc->debug.stats.txstats[txq->axq_qnum].tx_bytes_all += bf->bf_mpdu->len;
632
679 if (bf_isampdu(bf)) { 633 if (bf_isampdu(bf)) {
680 if (bf_isxretried(bf)) 634 if (bf_isxretried(bf))
681 TX_STAT_INC(txq->axq_qnum, a_xretries); 635 TX_STAT_INC(txq->axq_qnum, a_xretries);
@@ -770,6 +724,13 @@ static ssize_t read_file_recv(struct file *file, char __user *user_buf,
770 PHY_ERR("HT-LENGTH", ATH9K_PHYERR_HT_LENGTH_ILLEGAL); 724 PHY_ERR("HT-LENGTH", ATH9K_PHYERR_HT_LENGTH_ILLEGAL);
771 PHY_ERR("HT-RATE", ATH9K_PHYERR_HT_RATE_ILLEGAL); 725 PHY_ERR("HT-RATE", ATH9K_PHYERR_HT_RATE_ILLEGAL);
772 726
727 len += snprintf(buf + len, size - len,
728 "%18s : %10u\n", "RX-Pkts-All",
729 sc->debug.stats.rxstats.rx_pkts_all);
730 len += snprintf(buf + len, size - len,
731 "%18s : %10u\n", "RX-Bytes-All",
732 sc->debug.stats.rxstats.rx_bytes_all);
733
773 if (len > size) 734 if (len > size)
774 len = size; 735 len = size;
775 736
@@ -788,6 +749,9 @@ void ath_debug_stat_rx(struct ath_softc *sc, struct ath_rx_status *rs)
788 749
789 u32 phyerr; 750 u32 phyerr;
790 751
752 RX_STAT_INC(rx_pkts_all);
753 sc->debug.stats.rxstats.rx_bytes_all += rs->rs_datalen;
754
791 if (rs->rs_status & ATH9K_RXERR_CRC) 755 if (rs->rs_status & ATH9K_RXERR_CRC)
792 RX_STAT_INC(crc_err); 756 RX_STAT_INC(crc_err);
793 if (rs->rs_status & ATH9K_RXERR_DECRYPT) 757 if (rs->rs_status & ATH9K_RXERR_DECRYPT)
@@ -924,10 +888,6 @@ int ath9k_init_debug(struct ath_hw *ah)
924 sc, &fops_interrupt)) 888 sc, &fops_interrupt))
925 goto err; 889 goto err;
926 890
927 if (!debugfs_create_file("rcstat", S_IRUSR, sc->debug.debugfs_phy,
928 sc, &fops_rcstat))
929 goto err;
930
931 if (!debugfs_create_file("wiphy", S_IRUSR | S_IWUSR, 891 if (!debugfs_create_file("wiphy", S_IRUSR | S_IWUSR,
932 sc->debug.debugfs_phy, sc, &fops_wiphy)) 892 sc->debug.debugfs_phy, sc, &fops_wiphy))
933 goto err; 893 goto err;
diff --git a/drivers/net/wireless/ath/ath9k/debug.h b/drivers/net/wireless/ath/ath9k/debug.h
index 5d21704e87ff..bb0823242ba0 100644
--- a/drivers/net/wireless/ath/ath9k/debug.h
+++ b/drivers/net/wireless/ath/ath9k/debug.h
@@ -80,15 +80,12 @@ struct ath_interrupt_stats {
80 u32 bb_watchdog; 80 u32 bb_watchdog;
81}; 81};
82 82
83struct ath_rc_stats {
84 u32 success;
85 u32 retries;
86 u32 xretries;
87 u8 per;
88};
89
90/** 83/**
91 * struct ath_tx_stats - Statistics about TX 84 * struct ath_tx_stats - Statistics about TX
85 * @tx_pkts_all: No. of total frames transmitted, including ones that
86 may have had errors.
87 * @tx_bytes_all: No. of total bytes transmitted, including ones that
88 may have had errors.
92 * @queued: Total MPDUs (non-aggr) queued 89 * @queued: Total MPDUs (non-aggr) queued
93 * @completed: Total MPDUs (non-aggr) completed 90 * @completed: Total MPDUs (non-aggr) completed
94 * @a_aggr: Total no. of aggregates queued 91 * @a_aggr: Total no. of aggregates queued
@@ -107,6 +104,8 @@ struct ath_rc_stats {
107 * @delim_urn: TX delimiter underrun errors 104 * @delim_urn: TX delimiter underrun errors
108 */ 105 */
109struct ath_tx_stats { 106struct ath_tx_stats {
107 u32 tx_pkts_all;
108 u32 tx_bytes_all;
110 u32 queued; 109 u32 queued;
111 u32 completed; 110 u32 completed;
112 u32 a_aggr; 111 u32 a_aggr;
@@ -124,6 +123,10 @@ struct ath_tx_stats {
124 123
125/** 124/**
126 * struct ath_rx_stats - RX Statistics 125 * struct ath_rx_stats - RX Statistics
126 * @rx_pkts_all: No. of total frames received, including ones that
127 may have had errors.
128 * @rx_bytes_all: No. of total bytes received, including ones that
129 may have had errors.
127 * @crc_err: No. of frames with incorrect CRC value 130 * @crc_err: No. of frames with incorrect CRC value
128 * @decrypt_crc_err: No. of frames whose CRC check failed after 131 * @decrypt_crc_err: No. of frames whose CRC check failed after
129 decryption process completed 132 decryption process completed
@@ -136,6 +139,8 @@ struct ath_tx_stats {
136 * @phy_err_stats: Individual PHY error statistics 139 * @phy_err_stats: Individual PHY error statistics
137 */ 140 */
138struct ath_rx_stats { 141struct ath_rx_stats {
142 u32 rx_pkts_all;
143 u32 rx_bytes_all;
139 u32 crc_err; 144 u32 crc_err;
140 u32 decrypt_crc_err; 145 u32 decrypt_crc_err;
141 u32 phy_err; 146 u32 phy_err;
@@ -148,7 +153,6 @@ struct ath_rx_stats {
148 153
149struct ath_stats { 154struct ath_stats {
150 struct ath_interrupt_stats istats; 155 struct ath_interrupt_stats istats;
151 struct ath_rc_stats rcstats[RATE_TABLE_SIZE];
152 struct ath_tx_stats txstats[ATH9K_NUM_TX_QUEUES]; 156 struct ath_tx_stats txstats[ATH9K_NUM_TX_QUEUES];
153 struct ath_rx_stats rxstats; 157 struct ath_rx_stats rxstats;
154}; 158};
@@ -165,12 +169,9 @@ void ath9k_exit_debug(struct ath_hw *ah);
165int ath9k_debug_create_root(void); 169int ath9k_debug_create_root(void);
166void ath9k_debug_remove_root(void); 170void ath9k_debug_remove_root(void);
167void ath_debug_stat_interrupt(struct ath_softc *sc, enum ath9k_int status); 171void ath_debug_stat_interrupt(struct ath_softc *sc, enum ath9k_int status);
168void ath_debug_stat_rc(struct ath_softc *sc, int final_rate);
169void ath_debug_stat_tx(struct ath_softc *sc, struct ath_txq *txq, 172void ath_debug_stat_tx(struct ath_softc *sc, struct ath_txq *txq,
170 struct ath_buf *bf, struct ath_tx_status *ts); 173 struct ath_buf *bf, struct ath_tx_status *ts);
171void ath_debug_stat_rx(struct ath_softc *sc, struct ath_rx_status *rs); 174void ath_debug_stat_rx(struct ath_softc *sc, struct ath_rx_status *rs);
172void ath_debug_stat_retries(struct ath_softc *sc, int rix,
173 int xretries, int retries, u8 per);
174 175
175#else 176#else
176 177
@@ -197,11 +198,6 @@ static inline void ath_debug_stat_interrupt(struct ath_softc *sc,
197{ 198{
198} 199}
199 200
200static inline void ath_debug_stat_rc(struct ath_softc *sc,
201 int final_rate)
202{
203}
204
205static inline void ath_debug_stat_tx(struct ath_softc *sc, 201static inline void ath_debug_stat_tx(struct ath_softc *sc,
206 struct ath_txq *txq, 202 struct ath_txq *txq,
207 struct ath_buf *bf, 203 struct ath_buf *bf,
@@ -214,11 +210,6 @@ static inline void ath_debug_stat_rx(struct ath_softc *sc,
214{ 210{
215} 211}
216 212
217static inline void ath_debug_stat_retries(struct ath_softc *sc, int rix,
218 int xretries, int retries, u8 per)
219{
220}
221
222#endif /* CONFIG_ATH9K_DEBUGFS */ 213#endif /* CONFIG_ATH9K_DEBUGFS */
223 214
224#endif /* DEBUG_H */ 215#endif /* DEBUG_H */
diff --git a/drivers/net/wireless/ath/ath9k/eeprom.h b/drivers/net/wireless/ath/ath9k/eeprom.h
index 0b09db0f8e7d..dacb45e1b906 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom.h
+++ b/drivers/net/wireless/ath/ath9k/eeprom.h
@@ -101,7 +101,7 @@
101#define AR5416_VER_MASK (eep->baseEepHeader.version & AR5416_EEP_VER_MINOR_MASK) 101#define AR5416_VER_MASK (eep->baseEepHeader.version & AR5416_EEP_VER_MINOR_MASK)
102#define OLC_FOR_AR9280_20_LATER (AR_SREV_9280_20_OR_LATER(ah) && \ 102#define OLC_FOR_AR9280_20_LATER (AR_SREV_9280_20_OR_LATER(ah) && \
103 ah->eep_ops->get_eeprom(ah, EEP_OL_PWRCTRL)) 103 ah->eep_ops->get_eeprom(ah, EEP_OL_PWRCTRL))
104#define OLC_FOR_AR9287_10_LATER (AR_SREV_9287_10_OR_LATER(ah) && \ 104#define OLC_FOR_AR9287_10_LATER (AR_SREV_9287_11_OR_LATER(ah) && \
105 ah->eep_ops->get_eeprom(ah, EEP_OL_PWRCTRL)) 105 ah->eep_ops->get_eeprom(ah, EEP_OL_PWRCTRL))
106 106
107#define AR_EEPROM_RFSILENT_GPIO_SEL 0x001c 107#define AR_EEPROM_RFSILENT_GPIO_SEL 0x001c
@@ -266,6 +266,8 @@ enum eeprom_param {
266 EEP_INTERNAL_REGULATOR, 266 EEP_INTERNAL_REGULATOR,
267 EEP_SWREG, 267 EEP_SWREG,
268 EEP_PAPRD, 268 EEP_PAPRD,
269 EEP_MODAL_VER,
270 EEP_ANT_DIV_CTL1,
269}; 271};
270 272
271enum ar5416_rates { 273enum ar5416_rates {
@@ -670,7 +672,8 @@ struct eeprom_ops {
670 bool (*fill_eeprom)(struct ath_hw *hw); 672 bool (*fill_eeprom)(struct ath_hw *hw);
671 int (*get_eeprom_ver)(struct ath_hw *hw); 673 int (*get_eeprom_ver)(struct ath_hw *hw);
672 int (*get_eeprom_rev)(struct ath_hw *hw); 674 int (*get_eeprom_rev)(struct ath_hw *hw);
673 u8 (*get_num_ant_config)(struct ath_hw *hw, enum ieee80211_band band); 675 u8 (*get_num_ant_config)(struct ath_hw *hw,
676 enum ath9k_hal_freq_band band);
674 u32 (*get_eeprom_antenna_cfg)(struct ath_hw *hw, 677 u32 (*get_eeprom_antenna_cfg)(struct ath_hw *hw,
675 struct ath9k_channel *chan); 678 struct ath9k_channel *chan);
676 void (*set_board_values)(struct ath_hw *hw, struct ath9k_channel *chan); 679 void (*set_board_values)(struct ath_hw *hw, struct ath9k_channel *chan);
diff --git a/drivers/net/wireless/ath/ath9k/eeprom_4k.c b/drivers/net/wireless/ath/ath9k/eeprom_4k.c
index 9cccd12e8f21..4fa4d8e28c64 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom_4k.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom_4k.c
@@ -179,6 +179,9 @@ static u32 ath9k_hw_4k_get_eeprom(struct ath_hw *ah,
179 struct ar5416_eeprom_4k *eep = &ah->eeprom.map4k; 179 struct ar5416_eeprom_4k *eep = &ah->eeprom.map4k;
180 struct modal_eep_4k_header *pModal = &eep->modalHeader; 180 struct modal_eep_4k_header *pModal = &eep->modalHeader;
181 struct base_eep_header_4k *pBase = &eep->baseEepHeader; 181 struct base_eep_header_4k *pBase = &eep->baseEepHeader;
182 u16 ver_minor;
183
184 ver_minor = pBase->version & AR5416_EEP_VER_MINOR_MASK;
182 185
183 switch (param) { 186 switch (param) {
184 case EEP_NFTHRESH_2: 187 case EEP_NFTHRESH_2:
@@ -204,7 +207,7 @@ static u32 ath9k_hw_4k_get_eeprom(struct ath_hw *ah,
204 case EEP_DB_2: 207 case EEP_DB_2:
205 return pModal->db1_1; 208 return pModal->db1_1;
206 case EEP_MINOR_REV: 209 case EEP_MINOR_REV:
207 return pBase->version & AR5416_EEP_VER_MINOR_MASK; 210 return ver_minor;
208 case EEP_TX_MASK: 211 case EEP_TX_MASK:
209 return pBase->txMask; 212 return pBase->txMask;
210 case EEP_RX_MASK: 213 case EEP_RX_MASK:
@@ -213,6 +216,15 @@ static u32 ath9k_hw_4k_get_eeprom(struct ath_hw *ah,
213 return 0; 216 return 0;
214 case EEP_PWR_TABLE_OFFSET: 217 case EEP_PWR_TABLE_OFFSET:
215 return AR5416_PWR_TABLE_OFFSET_DB; 218 return AR5416_PWR_TABLE_OFFSET_DB;
219 case EEP_MODAL_VER:
220 return pModal->version;
221 case EEP_ANT_DIV_CTL1:
222 return pModal->antdiv_ctl1;
223 case EEP_TXGAIN_TYPE:
224 if (ver_minor >= AR5416_EEP_MINOR_VER_19)
225 return pBase->txGainType;
226 else
227 return AR5416_EEP_TXGAIN_ORIGINAL;
216 default: 228 default:
217 return 0; 229 return 0;
218 } 230 }
@@ -329,7 +341,7 @@ static void ath9k_hw_get_4k_gain_boundaries_pdadcs(struct ath_hw *ah,
329 } 341 }
330 342
331 if (i == 0) { 343 if (i == 0) {
332 if (AR_SREV_9280_10_OR_LATER(ah)) 344 if (AR_SREV_9280_20_OR_LATER(ah))
333 ss = (int16_t)(0 - (minPwrT4[i] / 2)); 345 ss = (int16_t)(0 - (minPwrT4[i] / 2));
334 else 346 else
335 ss = 0; 347 ss = 0;
@@ -496,7 +508,6 @@ static void ath9k_hw_set_4k_power_cal_table(struct ath_hw *ah,
496 } 508 }
497 509
498 REGWRITE_BUFFER_FLUSH(ah); 510 REGWRITE_BUFFER_FLUSH(ah);
499 DISABLE_REGWRITE_BUFFER(ah);
500 } 511 }
501 } 512 }
502 513
@@ -757,7 +768,7 @@ static void ath9k_hw_4k_set_txpower(struct ath_hw *ah,
757 768
758 regulatory->max_power_level = ratesArray[i]; 769 regulatory->max_power_level = ratesArray[i];
759 770
760 if (AR_SREV_9280_10_OR_LATER(ah)) { 771 if (AR_SREV_9280_20_OR_LATER(ah)) {
761 for (i = 0; i < Ar5416RateSize; i++) 772 for (i = 0; i < Ar5416RateSize; i++)
762 ratesArray[i] -= AR5416_PWR_TABLE_OFFSET_DB * 2; 773 ratesArray[i] -= AR5416_PWR_TABLE_OFFSET_DB * 2;
763 } 774 }
@@ -828,7 +839,6 @@ static void ath9k_hw_4k_set_txpower(struct ath_hw *ah,
828 } 839 }
829 840
830 REGWRITE_BUFFER_FLUSH(ah); 841 REGWRITE_BUFFER_FLUSH(ah);
831 DISABLE_REGWRITE_BUFFER(ah);
832} 842}
833 843
834static void ath9k_hw_4k_set_addac(struct ath_hw *ah, 844static void ath9k_hw_4k_set_addac(struct ath_hw *ah,
@@ -905,9 +915,6 @@ static void ath9k_hw_4k_set_gain(struct ath_hw *ah,
905 AR9280_PHY_RXGAIN_TXRX_ATTEN, txRxAttenLocal); 915 AR9280_PHY_RXGAIN_TXRX_ATTEN, txRxAttenLocal);
906 REG_RMW_FIELD(ah, AR_PHY_RXGAIN + 0x1000, 916 REG_RMW_FIELD(ah, AR_PHY_RXGAIN + 0x1000,
907 AR9280_PHY_RXGAIN_TXRX_MARGIN, pModal->rxTxMarginCh[0]); 917 AR9280_PHY_RXGAIN_TXRX_MARGIN, pModal->rxTxMarginCh[0]);
908
909 if (AR_SREV_9285_11(ah))
910 REG_WRITE(ah, AR9285_AN_TOP4, (AR9285_AN_TOP4_DEFAULT | 0x14));
911} 918}
912 919
913/* 920/*
@@ -1105,9 +1112,6 @@ static void ath9k_hw_4k_set_board_values(struct ath_hw *ah,
1105 } 1112 }
1106 1113
1107 1114
1108 if (AR_SREV_9285_11(ah))
1109 REG_WRITE(ah, AR9285_AN_TOP4, AR9285_AN_TOP4_DEFAULT);
1110
1111 REG_RMW_FIELD(ah, AR_PHY_SETTLING, AR_PHY_SETTLING_SWITCH, 1115 REG_RMW_FIELD(ah, AR_PHY_SETTLING, AR_PHY_SETTLING_SWITCH,
1112 pModal->switchSettling); 1116 pModal->switchSettling);
1113 REG_RMW_FIELD(ah, AR_PHY_DESIRED_SZ, AR_PHY_DESIRED_SZ_ADC, 1117 REG_RMW_FIELD(ah, AR_PHY_DESIRED_SZ, AR_PHY_DESIRED_SZ_ADC,
@@ -1157,7 +1161,7 @@ static u32 ath9k_hw_4k_get_eeprom_antenna_cfg(struct ath_hw *ah,
1157} 1161}
1158 1162
1159static u8 ath9k_hw_4k_get_num_ant_config(struct ath_hw *ah, 1163static u8 ath9k_hw_4k_get_num_ant_config(struct ath_hw *ah,
1160 enum ieee80211_band freq_band) 1164 enum ath9k_hal_freq_band freq_band)
1161{ 1165{
1162 return 1; 1166 return 1;
1163} 1167}
diff --git a/drivers/net/wireless/ath/ath9k/eeprom_9287.c b/drivers/net/wireless/ath/ath9k/eeprom_9287.c
index dff2da777312..966b9496a9dd 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom_9287.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom_9287.c
@@ -324,7 +324,7 @@ static void ath9k_hw_get_ar9287_gain_boundaries_pdadcs(struct ath_hw *ah,
324 minDelta = 0; 324 minDelta = 0;
325 325
326 if (i == 0) { 326 if (i == 0) {
327 if (AR_SREV_9280_10_OR_LATER(ah)) 327 if (AR_SREV_9280_20_OR_LATER(ah))
328 ss = (int16_t)(0 - (minPwrT4[i] / 2)); 328 ss = (int16_t)(0 - (minPwrT4[i] / 2));
329 else 329 else
330 ss = 0; 330 ss = 0;
@@ -883,7 +883,7 @@ static void ath9k_hw_ar9287_set_txpower(struct ath_hw *ah,
883 ratesArray[i] = AR9287_MAX_RATE_POWER; 883 ratesArray[i] = AR9287_MAX_RATE_POWER;
884 } 884 }
885 885
886 if (AR_SREV_9280_10_OR_LATER(ah)) { 886 if (AR_SREV_9280_20_OR_LATER(ah)) {
887 for (i = 0; i < Ar5416RateSize; i++) 887 for (i = 0; i < Ar5416RateSize; i++)
888 ratesArray[i] -= AR9287_PWR_TABLE_OFFSET_DB * 2; 888 ratesArray[i] -= AR9287_PWR_TABLE_OFFSET_DB * 2;
889 } 889 }
@@ -977,7 +977,7 @@ static void ath9k_hw_ar9287_set_txpower(struct ath_hw *ah,
977 else 977 else
978 i = rate6mb; 978 i = rate6mb;
979 979
980 if (AR_SREV_9280_10_OR_LATER(ah)) 980 if (AR_SREV_9280_20_OR_LATER(ah))
981 regulatory->max_power_level = 981 regulatory->max_power_level =
982 ratesArray[i] + AR9287_PWR_TABLE_OFFSET_DB * 2; 982 ratesArray[i] + AR9287_PWR_TABLE_OFFSET_DB * 2;
983 else 983 else
@@ -1126,7 +1126,7 @@ static void ath9k_hw_ar9287_set_board_values(struct ath_hw *ah,
1126} 1126}
1127 1127
1128static u8 ath9k_hw_ar9287_get_num_ant_config(struct ath_hw *ah, 1128static u8 ath9k_hw_ar9287_get_num_ant_config(struct ath_hw *ah,
1129 enum ieee80211_band freq_band) 1129 enum ath9k_hal_freq_band freq_band)
1130{ 1130{
1131 return 1; 1131 return 1;
1132} 1132}
diff --git a/drivers/net/wireless/ath/ath9k/eeprom_def.c b/drivers/net/wireless/ath/ath9k/eeprom_def.c
index afa2b73ddbdd..76b4d65472dd 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom_def.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom_def.c
@@ -223,7 +223,7 @@ static int ath9k_hw_def_check_eeprom(struct ath_hw *ah)
223 } 223 }
224 224
225 /* Enable fixup for AR_AN_TOP2 if necessary */ 225 /* Enable fixup for AR_AN_TOP2 if necessary */
226 if (AR_SREV_9280_10_OR_LATER(ah) && 226 if (AR_SREV_9280_20_OR_LATER(ah) &&
227 (eep->baseEepHeader.version & 0xff) > 0x0a && 227 (eep->baseEepHeader.version & 0xff) > 0x0a &&
228 eep->baseEepHeader.pwdclkind == 0) 228 eep->baseEepHeader.pwdclkind == 0)
229 ah->need_an_top2_fixup = 1; 229 ah->need_an_top2_fixup = 1;
@@ -317,7 +317,7 @@ static void ath9k_hw_def_set_gain(struct ath_hw *ah,
317 if (AR5416_VER_MASK >= AR5416_EEP_MINOR_VER_3) { 317 if (AR5416_VER_MASK >= AR5416_EEP_MINOR_VER_3) {
318 txRxAttenLocal = pModal->txRxAttenCh[i]; 318 txRxAttenLocal = pModal->txRxAttenCh[i];
319 319
320 if (AR_SREV_9280_10_OR_LATER(ah)) { 320 if (AR_SREV_9280_20_OR_LATER(ah)) {
321 REG_RMW_FIELD(ah, AR_PHY_GAIN_2GHZ + regChainOffset, 321 REG_RMW_FIELD(ah, AR_PHY_GAIN_2GHZ + regChainOffset,
322 AR_PHY_GAIN_2GHZ_XATTEN1_MARGIN, 322 AR_PHY_GAIN_2GHZ_XATTEN1_MARGIN,
323 pModal->bswMargin[i]); 323 pModal->bswMargin[i]);
@@ -344,7 +344,7 @@ static void ath9k_hw_def_set_gain(struct ath_hw *ah,
344 } 344 }
345 } 345 }
346 346
347 if (AR_SREV_9280_10_OR_LATER(ah)) { 347 if (AR_SREV_9280_20_OR_LATER(ah)) {
348 REG_RMW_FIELD(ah, 348 REG_RMW_FIELD(ah,
349 AR_PHY_RXGAIN + regChainOffset, 349 AR_PHY_RXGAIN + regChainOffset,
350 AR9280_PHY_RXGAIN_TXRX_ATTEN, txRxAttenLocal); 350 AR9280_PHY_RXGAIN_TXRX_ATTEN, txRxAttenLocal);
@@ -408,7 +408,7 @@ static void ath9k_hw_def_set_board_values(struct ath_hw *ah,
408 regChainOffset, i); 408 regChainOffset, i);
409 } 409 }
410 410
411 if (AR_SREV_9280_10_OR_LATER(ah)) { 411 if (AR_SREV_9280_20_OR_LATER(ah)) {
412 if (IS_CHAN_2GHZ(chan)) { 412 if (IS_CHAN_2GHZ(chan)) {
413 ath9k_hw_analog_shift_rmw(ah, AR_AN_RF2G1_CH0, 413 ath9k_hw_analog_shift_rmw(ah, AR_AN_RF2G1_CH0,
414 AR_AN_RF2G1_CH0_OB, 414 AR_AN_RF2G1_CH0_OB,
@@ -461,7 +461,7 @@ static void ath9k_hw_def_set_board_values(struct ath_hw *ah,
461 REG_RMW_FIELD(ah, AR_PHY_DESIRED_SZ, AR_PHY_DESIRED_SZ_ADC, 461 REG_RMW_FIELD(ah, AR_PHY_DESIRED_SZ, AR_PHY_DESIRED_SZ_ADC,
462 pModal->adcDesiredSize); 462 pModal->adcDesiredSize);
463 463
464 if (!AR_SREV_9280_10_OR_LATER(ah)) 464 if (!AR_SREV_9280_20_OR_LATER(ah))
465 REG_RMW_FIELD(ah, AR_PHY_DESIRED_SZ, 465 REG_RMW_FIELD(ah, AR_PHY_DESIRED_SZ,
466 AR_PHY_DESIRED_SZ_PGA, 466 AR_PHY_DESIRED_SZ_PGA,
467 pModal->pgaDesiredSize); 467 pModal->pgaDesiredSize);
@@ -478,7 +478,7 @@ static void ath9k_hw_def_set_board_values(struct ath_hw *ah,
478 REG_RMW_FIELD(ah, AR_PHY_RF_CTL3, AR_PHY_TX_END_TO_A2_RX_ON, 478 REG_RMW_FIELD(ah, AR_PHY_RF_CTL3, AR_PHY_TX_END_TO_A2_RX_ON,
479 pModal->txEndToRxOn); 479 pModal->txEndToRxOn);
480 480
481 if (AR_SREV_9280_10_OR_LATER(ah)) { 481 if (AR_SREV_9280_20_OR_LATER(ah)) {
482 REG_RMW_FIELD(ah, AR_PHY_CCA, AR9280_PHY_CCA_THRESH62, 482 REG_RMW_FIELD(ah, AR_PHY_CCA, AR9280_PHY_CCA_THRESH62,
483 pModal->thresh62); 483 pModal->thresh62);
484 REG_RMW_FIELD(ah, AR_PHY_EXT_CCA0, 484 REG_RMW_FIELD(ah, AR_PHY_EXT_CCA0,
@@ -696,7 +696,7 @@ static void ath9k_hw_get_def_gain_boundaries_pdadcs(struct ath_hw *ah,
696 } 696 }
697 697
698 if (i == 0) { 698 if (i == 0) {
699 if (AR_SREV_9280_10_OR_LATER(ah)) 699 if (AR_SREV_9280_20_OR_LATER(ah))
700 ss = (int16_t)(0 - (minPwrT4[i] / 2)); 700 ss = (int16_t)(0 - (minPwrT4[i] / 2));
701 else 701 else
702 ss = 0; 702 ss = 0;
@@ -1291,7 +1291,7 @@ static void ath9k_hw_def_set_txpower(struct ath_hw *ah,
1291 ratesArray[i] = AR5416_MAX_RATE_POWER; 1291 ratesArray[i] = AR5416_MAX_RATE_POWER;
1292 } 1292 }
1293 1293
1294 if (AR_SREV_9280_10_OR_LATER(ah)) { 1294 if (AR_SREV_9280_20_OR_LATER(ah)) {
1295 for (i = 0; i < Ar5416RateSize; i++) { 1295 for (i = 0; i < Ar5416RateSize; i++) {
1296 int8_t pwr_table_offset; 1296 int8_t pwr_table_offset;
1297 1297
@@ -1395,7 +1395,7 @@ static void ath9k_hw_def_set_txpower(struct ath_hw *ah,
1395 else if (IS_CHAN_HT20(chan)) 1395 else if (IS_CHAN_HT20(chan))
1396 i = rateHt20_0; 1396 i = rateHt20_0;
1397 1397
1398 if (AR_SREV_9280_10_OR_LATER(ah)) 1398 if (AR_SREV_9280_20_OR_LATER(ah))
1399 regulatory->max_power_level = 1399 regulatory->max_power_level =
1400 ratesArray[i] + AR5416_PWR_TABLE_OFFSET_DB * 2; 1400 ratesArray[i] + AR5416_PWR_TABLE_OFFSET_DB * 2;
1401 else 1401 else
@@ -1418,11 +1418,11 @@ static void ath9k_hw_def_set_txpower(struct ath_hw *ah,
1418} 1418}
1419 1419
1420static u8 ath9k_hw_def_get_num_ant_config(struct ath_hw *ah, 1420static u8 ath9k_hw_def_get_num_ant_config(struct ath_hw *ah,
1421 enum ieee80211_band freq_band) 1421 enum ath9k_hal_freq_band freq_band)
1422{ 1422{
1423 struct ar5416_eeprom_def *eep = &ah->eeprom.def; 1423 struct ar5416_eeprom_def *eep = &ah->eeprom.def;
1424 struct modal_eep_header *pModal = 1424 struct modal_eep_header *pModal =
1425 &(eep->modalHeader[ATH9K_HAL_FREQ_BAND_2GHZ == freq_band]); 1425 &(eep->modalHeader[freq_band]);
1426 struct base_eep_header *pBase = &eep->baseEepHeader; 1426 struct base_eep_header *pBase = &eep->baseEepHeader;
1427 u8 num_ant_config; 1427 u8 num_ant_config;
1428 1428
diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.c b/drivers/net/wireless/ath/ath9k/hif_usb.c
index 495f18950ac9..728d904c74d7 100644
--- a/drivers/net/wireless/ath/ath9k/hif_usb.c
+++ b/drivers/net/wireless/ath/ath9k/hif_usb.c
@@ -92,10 +92,10 @@ static int hif_usb_send_regout(struct hif_device_usb *hif_dev,
92 cmd->skb = skb; 92 cmd->skb = skb;
93 cmd->hif_dev = hif_dev; 93 cmd->hif_dev = hif_dev;
94 94
95 usb_fill_int_urb(urb, hif_dev->udev, 95 usb_fill_bulk_urb(urb, hif_dev->udev,
96 usb_sndintpipe(hif_dev->udev, USB_REG_OUT_PIPE), 96 usb_sndbulkpipe(hif_dev->udev, USB_REG_OUT_PIPE),
97 skb->data, skb->len, 97 skb->data, skb->len,
98 hif_usb_regout_cb, cmd, 1); 98 hif_usb_regout_cb, cmd);
99 99
100 usb_anchor_urb(urb, &hif_dev->regout_submitted); 100 usb_anchor_urb(urb, &hif_dev->regout_submitted);
101 ret = usb_submit_urb(urb, GFP_KERNEL); 101 ret = usb_submit_urb(urb, GFP_KERNEL);
@@ -541,7 +541,8 @@ static void ath9k_hif_usb_reg_in_cb(struct urb *urb)
541 } 541 }
542 542
543 usb_fill_int_urb(urb, hif_dev->udev, 543 usb_fill_int_urb(urb, hif_dev->udev,
544 usb_rcvintpipe(hif_dev->udev, USB_REG_IN_PIPE), 544 usb_rcvbulkpipe(hif_dev->udev,
545 USB_REG_IN_PIPE),
545 nskb->data, MAX_REG_IN_BUF_SIZE, 546 nskb->data, MAX_REG_IN_BUF_SIZE,
546 ath9k_hif_usb_reg_in_cb, nskb, 1); 547 ath9k_hif_usb_reg_in_cb, nskb, 1);
547 548
@@ -720,7 +721,8 @@ static int ath9k_hif_usb_alloc_reg_in_urb(struct hif_device_usb *hif_dev)
720 goto err; 721 goto err;
721 722
722 usb_fill_int_urb(hif_dev->reg_in_urb, hif_dev->udev, 723 usb_fill_int_urb(hif_dev->reg_in_urb, hif_dev->udev,
723 usb_rcvintpipe(hif_dev->udev, USB_REG_IN_PIPE), 724 usb_rcvbulkpipe(hif_dev->udev,
725 USB_REG_IN_PIPE),
724 skb->data, MAX_REG_IN_BUF_SIZE, 726 skb->data, MAX_REG_IN_BUF_SIZE,
725 ath9k_hif_usb_reg_in_cb, skb, 1); 727 ath9k_hif_usb_reg_in_cb, skb, 1);
726 728
@@ -822,7 +824,9 @@ static int ath9k_hif_usb_download_fw(struct hif_device_usb *hif_dev)
822 824
823static int ath9k_hif_usb_dev_init(struct hif_device_usb *hif_dev) 825static int ath9k_hif_usb_dev_init(struct hif_device_usb *hif_dev)
824{ 826{
825 int ret; 827 int ret, idx;
828 struct usb_host_interface *alt = &hif_dev->interface->altsetting[0];
829 struct usb_endpoint_descriptor *endp;
826 830
827 /* Request firmware */ 831 /* Request firmware */
828 ret = request_firmware(&hif_dev->firmware, hif_dev->fw_name, 832 ret = request_firmware(&hif_dev->firmware, hif_dev->fw_name,
@@ -850,6 +854,22 @@ static int ath9k_hif_usb_dev_init(struct hif_device_usb *hif_dev)
850 goto err_fw_download; 854 goto err_fw_download;
851 } 855 }
852 856
857 /* On downloading the firmware to the target, the USB descriptor of EP4
858 * is 'patched' to change the type of the endpoint to Bulk. This will
859 * bring down CPU usage during the scan period.
860 */
861 for (idx = 0; idx < alt->desc.bNumEndpoints; idx++) {
862 endp = &alt->endpoint[idx].desc;
863 if (((endp->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK)
864 == 0x04) &&
865 ((endp->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK)
866 == USB_ENDPOINT_XFER_INT)) {
867 endp->bmAttributes &= ~USB_ENDPOINT_XFERTYPE_MASK;
868 endp->bmAttributes |= USB_ENDPOINT_XFER_BULK;
869 endp->bInterval = 0;
870 }
871 }
872
853 return 0; 873 return 0;
854 874
855err_fw_download: 875err_fw_download:
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c b/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c
index bd1506e69105..1b72aa482ac7 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c
@@ -235,7 +235,14 @@ void ath9k_htc_beaconq_config(struct ath9k_htc_priv *priv)
235 ath9k_hw_get_txq_props(ah, qnum, &qi_be); 235 ath9k_hw_get_txq_props(ah, qnum, &qi_be);
236 236
237 qi.tqi_aifs = qi_be.tqi_aifs; 237 qi.tqi_aifs = qi_be.tqi_aifs;
238 qi.tqi_cwmin = 4*qi_be.tqi_cwmin; 238 /* For WIFI Beacon Distribution
239 * Long slot time : 2x cwmin
240 * Short slot time : 4x cwmin
241 */
242 if (ah->slottime == ATH9K_SLOT_TIME_20)
243 qi.tqi_cwmin = 2*qi_be.tqi_cwmin;
244 else
245 qi.tqi_cwmin = 4*qi_be.tqi_cwmin;
239 qi.tqi_cwmax = qi_be.tqi_cwmax; 246 qi.tqi_cwmax = qi_be.tqi_cwmax;
240 247
241 if (!ath9k_hw_set_txq_props(ah, priv->beaconq, &qi)) { 248 if (!ath9k_hw_set_txq_props(ah, priv->beaconq, &qi)) {
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_init.c b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
index 695e2b088d10..3d7b97f1b3ae 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_init.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
@@ -380,15 +380,6 @@ static void ath9k_enable_regwrite_buffer(void *hw_priv)
380 atomic_inc(&priv->wmi->mwrite_cnt); 380 atomic_inc(&priv->wmi->mwrite_cnt);
381} 381}
382 382
383static void ath9k_disable_regwrite_buffer(void *hw_priv)
384{
385 struct ath_hw *ah = (struct ath_hw *) hw_priv;
386 struct ath_common *common = ath9k_hw_common(ah);
387 struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *) common->priv;
388
389 atomic_dec(&priv->wmi->mwrite_cnt);
390}
391
392static void ath9k_regwrite_flush(void *hw_priv) 383static void ath9k_regwrite_flush(void *hw_priv)
393{ 384{
394 struct ath_hw *ah = (struct ath_hw *) hw_priv; 385 struct ath_hw *ah = (struct ath_hw *) hw_priv;
@@ -397,6 +388,8 @@ static void ath9k_regwrite_flush(void *hw_priv)
397 u32 rsp_status; 388 u32 rsp_status;
398 int r; 389 int r;
399 390
391 atomic_dec(&priv->wmi->mwrite_cnt);
392
400 mutex_lock(&priv->wmi->multi_write_mutex); 393 mutex_lock(&priv->wmi->multi_write_mutex);
401 394
402 if (priv->wmi->multi_write_idx) { 395 if (priv->wmi->multi_write_idx) {
@@ -420,7 +413,6 @@ static const struct ath_ops ath9k_common_ops = {
420 .read = ath9k_regread, 413 .read = ath9k_regread,
421 .write = ath9k_regwrite, 414 .write = ath9k_regwrite,
422 .enable_write_buffer = ath9k_enable_regwrite_buffer, 415 .enable_write_buffer = ath9k_enable_regwrite_buffer,
423 .disable_write_buffer = ath9k_disable_regwrite_buffer,
424 .write_flush = ath9k_regwrite_flush, 416 .write_flush = ath9k_regwrite_flush,
425}; 417};
426 418
@@ -561,17 +553,20 @@ static void ath9k_init_crypto(struct ath9k_htc_priv *priv)
561 common->keymax = ATH_KEYMAX; 553 common->keymax = ATH_KEYMAX;
562 } 554 }
563 555
556 if (priv->ah->misc_mode & AR_PCU_MIC_NEW_LOC_ENA)
557 common->crypt_caps |= ATH_CRYPT_CAP_MIC_COMBINED;
558
564 /* 559 /*
565 * Reset the key cache since some parts do not 560 * Reset the key cache since some parts do not
566 * reset the contents on initial power up. 561 * reset the contents on initial power up.
567 */ 562 */
568 for (i = 0; i < common->keymax; i++) 563 for (i = 0; i < common->keymax; i++)
569 ath9k_hw_keyreset(priv->ah, (u16) i); 564 ath_hw_keyreset(common, (u16) i);
570} 565}
571 566
572static void ath9k_init_channels_rates(struct ath9k_htc_priv *priv) 567static void ath9k_init_channels_rates(struct ath9k_htc_priv *priv)
573{ 568{
574 if (test_bit(ATH9K_MODE_11G, priv->ah->caps.wireless_modes)) { 569 if (priv->ah->caps.hw_caps & ATH9K_HW_CAP_2GHZ) {
575 priv->sbands[IEEE80211_BAND_2GHZ].channels = 570 priv->sbands[IEEE80211_BAND_2GHZ].channels =
576 ath9k_2ghz_channels; 571 ath9k_2ghz_channels;
577 priv->sbands[IEEE80211_BAND_2GHZ].band = IEEE80211_BAND_2GHZ; 572 priv->sbands[IEEE80211_BAND_2GHZ].band = IEEE80211_BAND_2GHZ;
@@ -582,7 +577,7 @@ static void ath9k_init_channels_rates(struct ath9k_htc_priv *priv)
582 ARRAY_SIZE(ath9k_legacy_rates); 577 ARRAY_SIZE(ath9k_legacy_rates);
583 } 578 }
584 579
585 if (test_bit(ATH9K_MODE_11A, priv->ah->caps.wireless_modes)) { 580 if (priv->ah->caps.hw_caps & ATH9K_HW_CAP_5GHZ) {
586 priv->sbands[IEEE80211_BAND_5GHZ].channels = ath9k_5ghz_channels; 581 priv->sbands[IEEE80211_BAND_5GHZ].channels = ath9k_5ghz_channels;
587 priv->sbands[IEEE80211_BAND_5GHZ].band = IEEE80211_BAND_5GHZ; 582 priv->sbands[IEEE80211_BAND_5GHZ].band = IEEE80211_BAND_5GHZ;
588 priv->sbands[IEEE80211_BAND_5GHZ].n_channels = 583 priv->sbands[IEEE80211_BAND_5GHZ].n_channels =
@@ -601,8 +596,7 @@ static void ath9k_init_misc(struct ath9k_htc_priv *priv)
601 common->tx_chainmask = priv->ah->caps.tx_chainmask; 596 common->tx_chainmask = priv->ah->caps.tx_chainmask;
602 common->rx_chainmask = priv->ah->caps.rx_chainmask; 597 common->rx_chainmask = priv->ah->caps.rx_chainmask;
603 598
604 if (priv->ah->caps.hw_caps & ATH9K_HW_CAP_BSSIDMASK) 599 memcpy(common->bssidmask, ath_bcast_mac, ETH_ALEN);
605 memcpy(common->bssidmask, ath_bcast_mac, ETH_ALEN);
606 600
607 priv->ah->opmode = NL80211_IFTYPE_STATION; 601 priv->ah->opmode = NL80211_IFTYPE_STATION;
608} 602}
@@ -746,18 +740,18 @@ static void ath9k_set_hw_capab(struct ath9k_htc_priv *priv,
746 hw->extra_tx_headroom = sizeof(struct tx_frame_hdr) + 740 hw->extra_tx_headroom = sizeof(struct tx_frame_hdr) +
747 sizeof(struct htc_frame_hdr) + 4; 741 sizeof(struct htc_frame_hdr) + 4;
748 742
749 if (test_bit(ATH9K_MODE_11G, priv->ah->caps.wireless_modes)) 743 if (priv->ah->caps.hw_caps & ATH9K_HW_CAP_2GHZ)
750 hw->wiphy->bands[IEEE80211_BAND_2GHZ] = 744 hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
751 &priv->sbands[IEEE80211_BAND_2GHZ]; 745 &priv->sbands[IEEE80211_BAND_2GHZ];
752 if (test_bit(ATH9K_MODE_11A, priv->ah->caps.wireless_modes)) 746 if (priv->ah->caps.hw_caps & ATH9K_HW_CAP_5GHZ)
753 hw->wiphy->bands[IEEE80211_BAND_5GHZ] = 747 hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
754 &priv->sbands[IEEE80211_BAND_5GHZ]; 748 &priv->sbands[IEEE80211_BAND_5GHZ];
755 749
756 if (priv->ah->caps.hw_caps & ATH9K_HW_CAP_HT) { 750 if (priv->ah->caps.hw_caps & ATH9K_HW_CAP_HT) {
757 if (test_bit(ATH9K_MODE_11G, priv->ah->caps.wireless_modes)) 751 if (priv->ah->caps.hw_caps & ATH9K_HW_CAP_2GHZ)
758 setup_ht_cap(priv, 752 setup_ht_cap(priv,
759 &priv->sbands[IEEE80211_BAND_2GHZ].ht_cap); 753 &priv->sbands[IEEE80211_BAND_2GHZ].ht_cap);
760 if (test_bit(ATH9K_MODE_11A, priv->ah->caps.wireless_modes)) 754 if (priv->ah->caps.hw_caps & ATH9K_HW_CAP_5GHZ)
761 setup_ht_cap(priv, 755 setup_ht_cap(priv,
762 &priv->sbands[IEEE80211_BAND_5GHZ].ht_cap); 756 &priv->sbands[IEEE80211_BAND_5GHZ].ht_cap);
763 } 757 }
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_main.c b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
index f4672073ac0a..55c80866dfc6 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_main.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
@@ -137,8 +137,6 @@ static int ath9k_htc_set_channel(struct ath9k_htc_priv *priv,
137 if (priv->op_flags & OP_FULL_RESET) 137 if (priv->op_flags & OP_FULL_RESET)
138 fastcc = false; 138 fastcc = false;
139 139
140 /* Fiddle around with fastcc later on, for now just use full reset */
141 fastcc = false;
142 ath9k_htc_ps_wakeup(priv); 140 ath9k_htc_ps_wakeup(priv);
143 htc_stop(priv->htc); 141 htc_stop(priv->htc);
144 WMI_CMD(WMI_DISABLE_INTR_CMDID); 142 WMI_CMD(WMI_DISABLE_INTR_CMDID);
@@ -146,9 +144,10 @@ static int ath9k_htc_set_channel(struct ath9k_htc_priv *priv,
146 WMI_CMD(WMI_STOP_RECV_CMDID); 144 WMI_CMD(WMI_STOP_RECV_CMDID);
147 145
148 ath_print(common, ATH_DBG_CONFIG, 146 ath_print(common, ATH_DBG_CONFIG,
149 "(%u MHz) -> (%u MHz), HT: %d, HT40: %d\n", 147 "(%u MHz) -> (%u MHz), HT: %d, HT40: %d fastcc: %d\n",
150 priv->ah->curchan->channel, 148 priv->ah->curchan->channel,
151 channel->center_freq, conf_is_ht(conf), conf_is_ht40(conf)); 149 channel->center_freq, conf_is_ht(conf), conf_is_ht40(conf),
150 fastcc);
152 151
153 caldata = &priv->caldata[channel->hw_value]; 152 caldata = &priv->caldata[channel->hw_value];
154 ret = ath9k_hw_reset(ah, hchan, caldata, fastcc); 153 ret = ath9k_hw_reset(ah, hchan, caldata, fastcc);
@@ -761,23 +760,12 @@ void ath9k_ani_work(struct work_struct *work)
761 ath9k_hw_ani_monitor(ah, ah->curchan); 760 ath9k_hw_ani_monitor(ah, ah->curchan);
762 761
763 /* Perform calibration if necessary */ 762 /* Perform calibration if necessary */
764 if (longcal || shortcal) { 763 if (longcal || shortcal)
765 common->ani.caldone = 764 common->ani.caldone =
766 ath9k_hw_calibrate(ah, ah->curchan, 765 ath9k_hw_calibrate(ah, ah->curchan,
767 common->rx_chainmask, 766 common->rx_chainmask,
768 longcal); 767 longcal);
769 768
770 if (longcal)
771 common->ani.noise_floor =
772 ath9k_hw_getchan_noise(ah, ah->curchan);
773
774 ath_print(common, ATH_DBG_ANI,
775 " calibrate chan %u/%x nf: %d\n",
776 ah->curchan->channel,
777 ah->curchan->channelFlags,
778 common->ani.noise_floor);
779 }
780
781 ath9k_htc_ps_restore(priv); 769 ath9k_htc_ps_restore(priv);
782 } 770 }
783 771
@@ -1466,6 +1454,7 @@ out:
1466 FIF_PSPOLL | \ 1454 FIF_PSPOLL | \
1467 FIF_OTHER_BSS | \ 1455 FIF_OTHER_BSS | \
1468 FIF_BCN_PRBRESP_PROMISC | \ 1456 FIF_BCN_PRBRESP_PROMISC | \
1457 FIF_PROBE_REQ | \
1469 FIF_FCSFAIL) 1458 FIF_FCSFAIL)
1470 1459
1471static void ath9k_htc_configure_filter(struct ieee80211_hw *hw, 1460static void ath9k_htc_configure_filter(struct ieee80211_hw *hw,
@@ -1591,7 +1580,7 @@ static int ath9k_htc_set_key(struct ieee80211_hw *hw,
1591 1580
1592 switch (cmd) { 1581 switch (cmd) {
1593 case SET_KEY: 1582 case SET_KEY:
1594 ret = ath9k_cmn_key_config(common, vif, sta, key); 1583 ret = ath_key_config(common, vif, sta, key);
1595 if (ret >= 0) { 1584 if (ret >= 0) {
1596 key->hw_key_idx = ret; 1585 key->hw_key_idx = ret;
1597 /* push IV and Michael MIC generation to stack */ 1586 /* push IV and Michael MIC generation to stack */
@@ -1605,7 +1594,7 @@ static int ath9k_htc_set_key(struct ieee80211_hw *hw,
1605 } 1594 }
1606 break; 1595 break;
1607 case DISABLE_KEY: 1596 case DISABLE_KEY:
1608 ath9k_cmn_key_delete(common, key); 1597 ath_key_delete(common, key);
1609 break; 1598 break;
1610 default: 1599 default:
1611 ret = -EINVAL; 1600 ret = -EINVAL;
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
index 2a6e45a293a9..3d19b5bc937f 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
@@ -369,8 +369,7 @@ u32 ath9k_htc_calcrxfilter(struct ath9k_htc_priv *priv)
369 | ATH9K_RX_FILTER_UCAST | ATH9K_RX_FILTER_BCAST 369 | ATH9K_RX_FILTER_UCAST | ATH9K_RX_FILTER_BCAST
370 | ATH9K_RX_FILTER_MCAST; 370 | ATH9K_RX_FILTER_MCAST;
371 371
372 /* If not a STA, enable processing of Probe Requests */ 372 if (priv->rxfilter & FIF_PROBE_REQ)
373 if (ah->opmode != NL80211_IFTYPE_STATION)
374 rfilt |= ATH9K_RX_FILTER_PROBEREQ; 373 rfilt |= ATH9K_RX_FILTER_PROBEREQ;
375 374
376 /* 375 /*
@@ -415,8 +414,7 @@ static void ath9k_htc_opmode_init(struct ath9k_htc_priv *priv)
415 ath9k_hw_setrxfilter(ah, rfilt); 414 ath9k_hw_setrxfilter(ah, rfilt);
416 415
417 /* configure bssid mask */ 416 /* configure bssid mask */
418 if (ah->caps.hw_caps & ATH9K_HW_CAP_BSSIDMASK) 417 ath_hw_setbssidmask(common);
419 ath_hw_setbssidmask(common);
420 418
421 /* configure operational mode */ 419 /* configure operational mode */
422 ath9k_hw_setopmode(ah); 420 ath9k_hw_setopmode(ah);
diff --git a/drivers/net/wireless/ath/ath9k/hw-ops.h b/drivers/net/wireless/ath/ath9k/hw-ops.h
index ffecbadaea4a..0a4ad348b699 100644
--- a/drivers/net/wireless/ath/ath9k/hw-ops.h
+++ b/drivers/net/wireless/ath/ath9k/hw-ops.h
@@ -128,17 +128,6 @@ static inline void ath9k_hw_set11n_virtualmorefrag(struct ath_hw *ah, void *ds,
128 ath9k_hw_ops(ah)->set11n_virtualmorefrag(ah, ds, vmf); 128 ath9k_hw_ops(ah)->set11n_virtualmorefrag(ah, ds, vmf);
129} 129}
130 130
131static inline void ath9k_hw_procmibevent(struct ath_hw *ah)
132{
133 ath9k_hw_ops(ah)->ani_proc_mib_event(ah);
134}
135
136static inline void ath9k_hw_ani_monitor(struct ath_hw *ah,
137 struct ath9k_channel *chan)
138{
139 ath9k_hw_ops(ah)->ani_monitor(ah, chan);
140}
141
142/* Private hardware call ops */ 131/* Private hardware call ops */
143 132
144/* PHY ops */ 133/* PHY ops */
@@ -276,15 +265,4 @@ static inline void ath9k_hw_setup_calibration(struct ath_hw *ah,
276 ath9k_hw_private_ops(ah)->setup_calibration(ah, currCal); 265 ath9k_hw_private_ops(ah)->setup_calibration(ah, currCal);
277} 266}
278 267
279static inline bool ath9k_hw_iscal_supported(struct ath_hw *ah,
280 enum ath9k_cal_types calType)
281{
282 return ath9k_hw_private_ops(ah)->iscal_supported(ah, calType);
283}
284
285static inline void ath9k_ani_reset(struct ath_hw *ah, bool is_scanning)
286{
287 ath9k_hw_private_ops(ah)->ani_reset(ah, is_scanning);
288}
289
290#endif /* ATH9K_HW_OPS_H */ 268#endif /* ATH9K_HW_OPS_H */
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index 3384ca164562..cc13ee117823 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -88,29 +88,32 @@ static void ath9k_hw_ani_cache_ini_regs(struct ath_hw *ah)
88/* Helper Functions */ 88/* Helper Functions */
89/********************/ 89/********************/
90 90
91static u32 ath9k_hw_mac_clks(struct ath_hw *ah, u32 usecs) 91static void ath9k_hw_set_clockrate(struct ath_hw *ah)
92{ 92{
93 struct ieee80211_conf *conf = &ath9k_hw_common(ah)->hw->conf; 93 struct ieee80211_conf *conf = &ath9k_hw_common(ah)->hw->conf;
94 struct ath_common *common = ath9k_hw_common(ah);
95 unsigned int clockrate;
94 96
95 if (!ah->curchan) /* should really check for CCK instead */ 97 if (!ah->curchan) /* should really check for CCK instead */
96 return usecs *ATH9K_CLOCK_RATE_CCK; 98 clockrate = ATH9K_CLOCK_RATE_CCK;
97 if (conf->channel->band == IEEE80211_BAND_2GHZ) 99 else if (conf->channel->band == IEEE80211_BAND_2GHZ)
98 return usecs *ATH9K_CLOCK_RATE_2GHZ_OFDM; 100 clockrate = ATH9K_CLOCK_RATE_2GHZ_OFDM;
99 101 else if (ah->caps.hw_caps & ATH9K_HW_CAP_FASTCLOCK)
100 if (ah->caps.hw_caps & ATH9K_HW_CAP_FASTCLOCK) 102 clockrate = ATH9K_CLOCK_FAST_RATE_5GHZ_OFDM;
101 return usecs * ATH9K_CLOCK_FAST_RATE_5GHZ_OFDM;
102 else 103 else
103 return usecs * ATH9K_CLOCK_RATE_5GHZ_OFDM; 104 clockrate = ATH9K_CLOCK_RATE_5GHZ_OFDM;
105
106 if (conf_is_ht40(conf))
107 clockrate *= 2;
108
109 common->clockrate = clockrate;
104} 110}
105 111
106static u32 ath9k_hw_mac_to_clks(struct ath_hw *ah, u32 usecs) 112static u32 ath9k_hw_mac_to_clks(struct ath_hw *ah, u32 usecs)
107{ 113{
108 struct ieee80211_conf *conf = &ath9k_hw_common(ah)->hw->conf; 114 struct ath_common *common = ath9k_hw_common(ah);
109 115
110 if (conf_is_ht40(conf)) 116 return usecs * common->clockrate;
111 return ath9k_hw_mac_clks(ah, usecs) * 2;
112 else
113 return ath9k_hw_mac_clks(ah, usecs);
114} 117}
115 118
116bool ath9k_hw_wait(struct ath_hw *ah, u32 reg, u32 mask, u32 val, u32 timeout) 119bool ath9k_hw_wait(struct ath_hw *ah, u32 reg, u32 mask, u32 val, u32 timeout)
@@ -299,7 +302,6 @@ static void ath9k_hw_disablepcie(struct ath_hw *ah)
299 REG_WRITE(ah, AR_PCIE_SERDES2, 0x00000000); 302 REG_WRITE(ah, AR_PCIE_SERDES2, 0x00000000);
300 303
301 REGWRITE_BUFFER_FLUSH(ah); 304 REGWRITE_BUFFER_FLUSH(ah);
302 DISABLE_REGWRITE_BUFFER(ah);
303} 305}
304 306
305/* This should work for all families including legacy */ 307/* This should work for all families including legacy */
@@ -371,10 +373,6 @@ static void ath9k_hw_init_config(struct ath_hw *ah)
371 ah->config.pcie_clock_req = 0; 373 ah->config.pcie_clock_req = 0;
372 ah->config.pcie_waen = 0; 374 ah->config.pcie_waen = 0;
373 ah->config.analog_shiftreg = 1; 375 ah->config.analog_shiftreg = 1;
374 ah->config.ofdm_trig_low = 200;
375 ah->config.ofdm_trig_high = 500;
376 ah->config.cck_trig_high = 200;
377 ah->config.cck_trig_low = 100;
378 ah->config.enable_ani = true; 376 ah->config.enable_ani = true;
379 377
380 for (i = 0; i < AR_EEPROM_MODAL_SPURS; i++) { 378 for (i = 0; i < AR_EEPROM_MODAL_SPURS; i++) {
@@ -565,7 +563,7 @@ static int __ath9k_hw_init(struct ath_hw *ah)
565 ath9k_hw_init_cal_settings(ah); 563 ath9k_hw_init_cal_settings(ah);
566 564
567 ah->ani_function = ATH9K_ANI_ALL; 565 ah->ani_function = ATH9K_ANI_ALL;
568 if (AR_SREV_9280_10_OR_LATER(ah) && !AR_SREV_9300_20_OR_LATER(ah)) 566 if (AR_SREV_9280_20_OR_LATER(ah) && !AR_SREV_9300_20_OR_LATER(ah))
569 ah->ani_function &= ~ATH9K_ANI_NOISE_IMMUNITY_LEVEL; 567 ah->ani_function &= ~ATH9K_ANI_NOISE_IMMUNITY_LEVEL;
570 if (!AR_SREV_9300_20_OR_LATER(ah)) 568 if (!AR_SREV_9300_20_OR_LATER(ah))
571 ah->ani_function &= ~ATH9K_ANI_MRC_CCK; 569 ah->ani_function &= ~ATH9K_ANI_MRC_CCK;
@@ -676,7 +674,6 @@ static void ath9k_hw_init_qos(struct ath_hw *ah)
676 REG_WRITE(ah, AR_TXOP_12_15, 0xFFFFFFFF); 674 REG_WRITE(ah, AR_TXOP_12_15, 0xFFFFFFFF);
677 675
678 REGWRITE_BUFFER_FLUSH(ah); 676 REGWRITE_BUFFER_FLUSH(ah);
679 DISABLE_REGWRITE_BUFFER(ah);
680} 677}
681 678
682static void ath9k_hw_init_pll(struct ath_hw *ah, 679static void ath9k_hw_init_pll(struct ath_hw *ah,
@@ -741,7 +738,6 @@ static void ath9k_hw_init_interrupt_masks(struct ath_hw *ah,
741 } 738 }
742 739
743 REGWRITE_BUFFER_FLUSH(ah); 740 REGWRITE_BUFFER_FLUSH(ah);
744 DISABLE_REGWRITE_BUFFER(ah);
745 741
746 if (AR_SREV_9300_20_OR_LATER(ah)) { 742 if (AR_SREV_9300_20_OR_LATER(ah)) {
747 REG_WRITE(ah, AR_INTR_PRIO_ASYNC_ENABLE, 0); 743 REG_WRITE(ah, AR_INTR_PRIO_ASYNC_ENABLE, 0);
@@ -885,7 +881,6 @@ static inline void ath9k_hw_set_dma(struct ath_hw *ah)
885 REG_WRITE(ah, AR_TXCFG, regval | AR_TXCFG_DMASZ_128B); 881 REG_WRITE(ah, AR_TXCFG, regval | AR_TXCFG_DMASZ_128B);
886 882
887 REGWRITE_BUFFER_FLUSH(ah); 883 REGWRITE_BUFFER_FLUSH(ah);
888 DISABLE_REGWRITE_BUFFER(ah);
889 884
890 /* 885 /*
891 * Restore TX Trigger Level to its pre-reset value. 886 * Restore TX Trigger Level to its pre-reset value.
@@ -933,7 +928,6 @@ static inline void ath9k_hw_set_dma(struct ath_hw *ah)
933 } 928 }
934 929
935 REGWRITE_BUFFER_FLUSH(ah); 930 REGWRITE_BUFFER_FLUSH(ah);
936 DISABLE_REGWRITE_BUFFER(ah);
937 931
938 if (AR_SREV_9300_20_OR_LATER(ah)) 932 if (AR_SREV_9300_20_OR_LATER(ah))
939 ath9k_hw_reset_txstatus_ring(ah); 933 ath9k_hw_reset_txstatus_ring(ah);
@@ -1031,7 +1025,6 @@ static bool ath9k_hw_set_reset(struct ath_hw *ah, int type)
1031 REG_WRITE(ah, AR_RTC_RC, rst_flags); 1025 REG_WRITE(ah, AR_RTC_RC, rst_flags);
1032 1026
1033 REGWRITE_BUFFER_FLUSH(ah); 1027 REGWRITE_BUFFER_FLUSH(ah);
1034 DISABLE_REGWRITE_BUFFER(ah);
1035 1028
1036 udelay(50); 1029 udelay(50);
1037 1030
@@ -1070,7 +1063,6 @@ static bool ath9k_hw_set_reset_power_on(struct ath_hw *ah)
1070 udelay(2); 1063 udelay(2);
1071 1064
1072 REGWRITE_BUFFER_FLUSH(ah); 1065 REGWRITE_BUFFER_FLUSH(ah);
1073 DISABLE_REGWRITE_BUFFER(ah);
1074 1066
1075 if (!AR_SREV_9300_20_OR_LATER(ah)) 1067 if (!AR_SREV_9300_20_OR_LATER(ah))
1076 udelay(2); 1068 udelay(2);
@@ -1167,6 +1159,7 @@ static bool ath9k_hw_channel_change(struct ath_hw *ah,
1167 "Failed to set channel\n"); 1159 "Failed to set channel\n");
1168 return false; 1160 return false;
1169 } 1161 }
1162 ath9k_hw_set_clockrate(ah);
1170 1163
1171 ah->eep_ops->set_txpower(ah, chan, 1164 ah->eep_ops->set_txpower(ah, chan,
1172 ath9k_regd_get_ctl(regulatory, chan), 1165 ath9k_regd_get_ctl(regulatory, chan),
@@ -1190,7 +1183,7 @@ bool ath9k_hw_check_alive(struct ath_hw *ah)
1190 int count = 50; 1183 int count = 50;
1191 u32 reg; 1184 u32 reg;
1192 1185
1193 if (AR_SREV_9285_10_OR_LATER(ah)) 1186 if (AR_SREV_9285_12_OR_LATER(ah))
1194 return true; 1187 return true;
1195 1188
1196 do { 1189 do {
@@ -1239,7 +1232,7 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
1239 if (!ath9k_hw_setpower(ah, ATH9K_PM_AWAKE)) 1232 if (!ath9k_hw_setpower(ah, ATH9K_PM_AWAKE))
1240 return -EIO; 1233 return -EIO;
1241 1234
1242 if (curchan && !ah->chip_fullsleep && ah->caldata) 1235 if (curchan && !ah->chip_fullsleep)
1243 ath9k_hw_getnf(ah, curchan); 1236 ath9k_hw_getnf(ah, curchan);
1244 1237
1245 ah->caldata = caldata; 1238 ah->caldata = caldata;
@@ -1258,11 +1251,13 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
1258 (chan->channel != ah->curchan->channel) && 1251 (chan->channel != ah->curchan->channel) &&
1259 ((chan->channelFlags & CHANNEL_ALL) == 1252 ((chan->channelFlags & CHANNEL_ALL) ==
1260 (ah->curchan->channelFlags & CHANNEL_ALL)) && 1253 (ah->curchan->channelFlags & CHANNEL_ALL)) &&
1261 !AR_SREV_9280(ah)) { 1254 (!AR_SREV_9280(ah) || AR_DEVID_7010(ah))) {
1262 1255
1263 if (ath9k_hw_channel_change(ah, chan)) { 1256 if (ath9k_hw_channel_change(ah, chan)) {
1264 ath9k_hw_loadnf(ah, ah->curchan); 1257 ath9k_hw_loadnf(ah, ah->curchan);
1265 ath9k_hw_start_nfcal(ah, true); 1258 ath9k_hw_start_nfcal(ah, true);
1259 if (AR_SREV_9271(ah))
1260 ar9002_hw_load_ani_reg(ah, chan);
1266 return 0; 1261 return 0;
1267 } 1262 }
1268 } 1263 }
@@ -1310,7 +1305,7 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
1310 if (tsf) 1305 if (tsf)
1311 ath9k_hw_settsf64(ah, tsf); 1306 ath9k_hw_settsf64(ah, tsf);
1312 1307
1313 if (AR_SREV_9280_10_OR_LATER(ah)) 1308 if (AR_SREV_9280_20_OR_LATER(ah))
1314 REG_SET_BIT(ah, AR_GPIO_INPUT_EN_VAL, AR_GPIO_JTAG_DISABLE); 1309 REG_SET_BIT(ah, AR_GPIO_INPUT_EN_VAL, AR_GPIO_JTAG_DISABLE);
1315 1310
1316 if (!AR_SREV_9300_20_OR_LATER(ah)) 1311 if (!AR_SREV_9300_20_OR_LATER(ah))
@@ -1372,19 +1367,19 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
1372 REG_WRITE(ah, AR_RSSI_THR, INIT_RSSI_THR); 1367 REG_WRITE(ah, AR_RSSI_THR, INIT_RSSI_THR);
1373 1368
1374 REGWRITE_BUFFER_FLUSH(ah); 1369 REGWRITE_BUFFER_FLUSH(ah);
1375 DISABLE_REGWRITE_BUFFER(ah);
1376 1370
1377 r = ath9k_hw_rf_set_freq(ah, chan); 1371 r = ath9k_hw_rf_set_freq(ah, chan);
1378 if (r) 1372 if (r)
1379 return r; 1373 return r;
1380 1374
1375 ath9k_hw_set_clockrate(ah);
1376
1381 ENABLE_REGWRITE_BUFFER(ah); 1377 ENABLE_REGWRITE_BUFFER(ah);
1382 1378
1383 for (i = 0; i < AR_NUM_DCU; i++) 1379 for (i = 0; i < AR_NUM_DCU; i++)
1384 REG_WRITE(ah, AR_DQCUMASK(i), 1 << i); 1380 REG_WRITE(ah, AR_DQCUMASK(i), 1 << i);
1385 1381
1386 REGWRITE_BUFFER_FLUSH(ah); 1382 REGWRITE_BUFFER_FLUSH(ah);
1387 DISABLE_REGWRITE_BUFFER(ah);
1388 1383
1389 ah->intr_txqs = 0; 1384 ah->intr_txqs = 0;
1390 for (i = 0; i < ah->caps.total_queues; i++) 1385 for (i = 0; i < ah->caps.total_queues; i++)
@@ -1432,7 +1427,6 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
1432 REG_WRITE(ah, AR_CFG_LED, saveLedState | AR_CFG_SCLK_32KHZ); 1427 REG_WRITE(ah, AR_CFG_LED, saveLedState | AR_CFG_SCLK_32KHZ);
1433 1428
1434 REGWRITE_BUFFER_FLUSH(ah); 1429 REGWRITE_BUFFER_FLUSH(ah);
1435 DISABLE_REGWRITE_BUFFER(ah);
1436 1430
1437 /* 1431 /*
1438 * For big endian systems turn on swapping for descriptors 1432 * For big endian systems turn on swapping for descriptors
@@ -1474,283 +1468,6 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
1474} 1468}
1475EXPORT_SYMBOL(ath9k_hw_reset); 1469EXPORT_SYMBOL(ath9k_hw_reset);
1476 1470
1477/************************/
1478/* Key Cache Management */
1479/************************/
1480
1481bool ath9k_hw_keyreset(struct ath_hw *ah, u16 entry)
1482{
1483 u32 keyType;
1484
1485 if (entry >= ah->caps.keycache_size) {
1486 ath_print(ath9k_hw_common(ah), ATH_DBG_FATAL,
1487 "keychache entry %u out of range\n", entry);
1488 return false;
1489 }
1490
1491 keyType = REG_READ(ah, AR_KEYTABLE_TYPE(entry));
1492
1493 REG_WRITE(ah, AR_KEYTABLE_KEY0(entry), 0);
1494 REG_WRITE(ah, AR_KEYTABLE_KEY1(entry), 0);
1495 REG_WRITE(ah, AR_KEYTABLE_KEY2(entry), 0);
1496 REG_WRITE(ah, AR_KEYTABLE_KEY3(entry), 0);
1497 REG_WRITE(ah, AR_KEYTABLE_KEY4(entry), 0);
1498 REG_WRITE(ah, AR_KEYTABLE_TYPE(entry), AR_KEYTABLE_TYPE_CLR);
1499 REG_WRITE(ah, AR_KEYTABLE_MAC0(entry), 0);
1500 REG_WRITE(ah, AR_KEYTABLE_MAC1(entry), 0);
1501
1502 if (keyType == AR_KEYTABLE_TYPE_TKIP && ATH9K_IS_MIC_ENABLED(ah)) {
1503 u16 micentry = entry + 64;
1504
1505 REG_WRITE(ah, AR_KEYTABLE_KEY0(micentry), 0);
1506 REG_WRITE(ah, AR_KEYTABLE_KEY1(micentry), 0);
1507 REG_WRITE(ah, AR_KEYTABLE_KEY2(micentry), 0);
1508 REG_WRITE(ah, AR_KEYTABLE_KEY3(micentry), 0);
1509
1510 }
1511
1512 return true;
1513}
1514EXPORT_SYMBOL(ath9k_hw_keyreset);
1515
1516static bool ath9k_hw_keysetmac(struct ath_hw *ah, u16 entry, const u8 *mac)
1517{
1518 u32 macHi, macLo;
1519 u32 unicast_flag = AR_KEYTABLE_VALID;
1520
1521 if (entry >= ah->caps.keycache_size) {
1522 ath_print(ath9k_hw_common(ah), ATH_DBG_FATAL,
1523 "keychache entry %u out of range\n", entry);
1524 return false;
1525 }
1526
1527 if (mac != NULL) {
1528 /*
1529 * AR_KEYTABLE_VALID indicates that the address is a unicast
1530 * address, which must match the transmitter address for
1531 * decrypting frames.
1532 * Not setting this bit allows the hardware to use the key
1533 * for multicast frame decryption.
1534 */
1535 if (mac[0] & 0x01)
1536 unicast_flag = 0;
1537
1538 macHi = (mac[5] << 8) | mac[4];
1539 macLo = (mac[3] << 24) |
1540 (mac[2] << 16) |
1541 (mac[1] << 8) |
1542 mac[0];
1543 macLo >>= 1;
1544 macLo |= (macHi & 1) << 31;
1545 macHi >>= 1;
1546 } else {
1547 macLo = macHi = 0;
1548 }
1549 REG_WRITE(ah, AR_KEYTABLE_MAC0(entry), macLo);
1550 REG_WRITE(ah, AR_KEYTABLE_MAC1(entry), macHi | unicast_flag);
1551
1552 return true;
1553}
1554
1555bool ath9k_hw_set_keycache_entry(struct ath_hw *ah, u16 entry,
1556 const struct ath9k_keyval *k,
1557 const u8 *mac)
1558{
1559 const struct ath9k_hw_capabilities *pCap = &ah->caps;
1560 struct ath_common *common = ath9k_hw_common(ah);
1561 u32 key0, key1, key2, key3, key4;
1562 u32 keyType;
1563
1564 if (entry >= pCap->keycache_size) {
1565 ath_print(common, ATH_DBG_FATAL,
1566 "keycache entry %u out of range\n", entry);
1567 return false;
1568 }
1569
1570 switch (k->kv_type) {
1571 case ATH9K_CIPHER_AES_OCB:
1572 keyType = AR_KEYTABLE_TYPE_AES;
1573 break;
1574 case ATH9K_CIPHER_AES_CCM:
1575 if (!(pCap->hw_caps & ATH9K_HW_CAP_CIPHER_AESCCM)) {
1576 ath_print(common, ATH_DBG_ANY,
1577 "AES-CCM not supported by mac rev 0x%x\n",
1578 ah->hw_version.macRev);
1579 return false;
1580 }
1581 keyType = AR_KEYTABLE_TYPE_CCM;
1582 break;
1583 case ATH9K_CIPHER_TKIP:
1584 keyType = AR_KEYTABLE_TYPE_TKIP;
1585 if (ATH9K_IS_MIC_ENABLED(ah)
1586 && entry + 64 >= pCap->keycache_size) {
1587 ath_print(common, ATH_DBG_ANY,
1588 "entry %u inappropriate for TKIP\n", entry);
1589 return false;
1590 }
1591 break;
1592 case ATH9K_CIPHER_WEP:
1593 if (k->kv_len < WLAN_KEY_LEN_WEP40) {
1594 ath_print(common, ATH_DBG_ANY,
1595 "WEP key length %u too small\n", k->kv_len);
1596 return false;
1597 }
1598 if (k->kv_len <= WLAN_KEY_LEN_WEP40)
1599 keyType = AR_KEYTABLE_TYPE_40;
1600 else if (k->kv_len <= WLAN_KEY_LEN_WEP104)
1601 keyType = AR_KEYTABLE_TYPE_104;
1602 else
1603 keyType = AR_KEYTABLE_TYPE_128;
1604 break;
1605 case ATH9K_CIPHER_CLR:
1606 keyType = AR_KEYTABLE_TYPE_CLR;
1607 break;
1608 default:
1609 ath_print(common, ATH_DBG_FATAL,
1610 "cipher %u not supported\n", k->kv_type);
1611 return false;
1612 }
1613
1614 key0 = get_unaligned_le32(k->kv_val + 0);
1615 key1 = get_unaligned_le16(k->kv_val + 4);
1616 key2 = get_unaligned_le32(k->kv_val + 6);
1617 key3 = get_unaligned_le16(k->kv_val + 10);
1618 key4 = get_unaligned_le32(k->kv_val + 12);
1619 if (k->kv_len <= WLAN_KEY_LEN_WEP104)
1620 key4 &= 0xff;
1621
1622 /*
1623 * Note: Key cache registers access special memory area that requires
1624 * two 32-bit writes to actually update the values in the internal
1625 * memory. Consequently, the exact order and pairs used here must be
1626 * maintained.
1627 */
1628
1629 if (keyType == AR_KEYTABLE_TYPE_TKIP && ATH9K_IS_MIC_ENABLED(ah)) {
1630 u16 micentry = entry + 64;
1631
1632 /*
1633 * Write inverted key[47:0] first to avoid Michael MIC errors
1634 * on frames that could be sent or received at the same time.
1635 * The correct key will be written in the end once everything
1636 * else is ready.
1637 */
1638 REG_WRITE(ah, AR_KEYTABLE_KEY0(entry), ~key0);
1639 REG_WRITE(ah, AR_KEYTABLE_KEY1(entry), ~key1);
1640
1641 /* Write key[95:48] */
1642 REG_WRITE(ah, AR_KEYTABLE_KEY2(entry), key2);
1643 REG_WRITE(ah, AR_KEYTABLE_KEY3(entry), key3);
1644
1645 /* Write key[127:96] and key type */
1646 REG_WRITE(ah, AR_KEYTABLE_KEY4(entry), key4);
1647 REG_WRITE(ah, AR_KEYTABLE_TYPE(entry), keyType);
1648
1649 /* Write MAC address for the entry */
1650 (void) ath9k_hw_keysetmac(ah, entry, mac);
1651
1652 if (ah->misc_mode & AR_PCU_MIC_NEW_LOC_ENA) {
1653 /*
1654 * TKIP uses two key cache entries:
1655 * Michael MIC TX/RX keys in the same key cache entry
1656 * (idx = main index + 64):
1657 * key0 [31:0] = RX key [31:0]
1658 * key1 [15:0] = TX key [31:16]
1659 * key1 [31:16] = reserved
1660 * key2 [31:0] = RX key [63:32]
1661 * key3 [15:0] = TX key [15:0]
1662 * key3 [31:16] = reserved
1663 * key4 [31:0] = TX key [63:32]
1664 */
1665 u32 mic0, mic1, mic2, mic3, mic4;
1666
1667 mic0 = get_unaligned_le32(k->kv_mic + 0);
1668 mic2 = get_unaligned_le32(k->kv_mic + 4);
1669 mic1 = get_unaligned_le16(k->kv_txmic + 2) & 0xffff;
1670 mic3 = get_unaligned_le16(k->kv_txmic + 0) & 0xffff;
1671 mic4 = get_unaligned_le32(k->kv_txmic + 4);
1672
1673 /* Write RX[31:0] and TX[31:16] */
1674 REG_WRITE(ah, AR_KEYTABLE_KEY0(micentry), mic0);
1675 REG_WRITE(ah, AR_KEYTABLE_KEY1(micentry), mic1);
1676
1677 /* Write RX[63:32] and TX[15:0] */
1678 REG_WRITE(ah, AR_KEYTABLE_KEY2(micentry), mic2);
1679 REG_WRITE(ah, AR_KEYTABLE_KEY3(micentry), mic3);
1680
1681 /* Write TX[63:32] and keyType(reserved) */
1682 REG_WRITE(ah, AR_KEYTABLE_KEY4(micentry), mic4);
1683 REG_WRITE(ah, AR_KEYTABLE_TYPE(micentry),
1684 AR_KEYTABLE_TYPE_CLR);
1685
1686 } else {
1687 /*
1688 * TKIP uses four key cache entries (two for group
1689 * keys):
1690 * Michael MIC TX/RX keys are in different key cache
1691 * entries (idx = main index + 64 for TX and
1692 * main index + 32 + 96 for RX):
1693 * key0 [31:0] = TX/RX MIC key [31:0]
1694 * key1 [31:0] = reserved
1695 * key2 [31:0] = TX/RX MIC key [63:32]
1696 * key3 [31:0] = reserved
1697 * key4 [31:0] = reserved
1698 *
1699 * Upper layer code will call this function separately
1700 * for TX and RX keys when these registers offsets are
1701 * used.
1702 */
1703 u32 mic0, mic2;
1704
1705 mic0 = get_unaligned_le32(k->kv_mic + 0);
1706 mic2 = get_unaligned_le32(k->kv_mic + 4);
1707
1708 /* Write MIC key[31:0] */
1709 REG_WRITE(ah, AR_KEYTABLE_KEY0(micentry), mic0);
1710 REG_WRITE(ah, AR_KEYTABLE_KEY1(micentry), 0);
1711
1712 /* Write MIC key[63:32] */
1713 REG_WRITE(ah, AR_KEYTABLE_KEY2(micentry), mic2);
1714 REG_WRITE(ah, AR_KEYTABLE_KEY3(micentry), 0);
1715
1716 /* Write TX[63:32] and keyType(reserved) */
1717 REG_WRITE(ah, AR_KEYTABLE_KEY4(micentry), 0);
1718 REG_WRITE(ah, AR_KEYTABLE_TYPE(micentry),
1719 AR_KEYTABLE_TYPE_CLR);
1720 }
1721
1722 /* MAC address registers are reserved for the MIC entry */
1723 REG_WRITE(ah, AR_KEYTABLE_MAC0(micentry), 0);
1724 REG_WRITE(ah, AR_KEYTABLE_MAC1(micentry), 0);
1725
1726 /*
1727 * Write the correct (un-inverted) key[47:0] last to enable
1728 * TKIP now that all other registers are set with correct
1729 * values.
1730 */
1731 REG_WRITE(ah, AR_KEYTABLE_KEY0(entry), key0);
1732 REG_WRITE(ah, AR_KEYTABLE_KEY1(entry), key1);
1733 } else {
1734 /* Write key[47:0] */
1735 REG_WRITE(ah, AR_KEYTABLE_KEY0(entry), key0);
1736 REG_WRITE(ah, AR_KEYTABLE_KEY1(entry), key1);
1737
1738 /* Write key[95:48] */
1739 REG_WRITE(ah, AR_KEYTABLE_KEY2(entry), key2);
1740 REG_WRITE(ah, AR_KEYTABLE_KEY3(entry), key3);
1741
1742 /* Write key[127:96] and key type */
1743 REG_WRITE(ah, AR_KEYTABLE_KEY4(entry), key4);
1744 REG_WRITE(ah, AR_KEYTABLE_TYPE(entry), keyType);
1745
1746 /* Write MAC address for the entry */
1747 (void) ath9k_hw_keysetmac(ah, entry, mac);
1748 }
1749
1750 return true;
1751}
1752EXPORT_SYMBOL(ath9k_hw_set_keycache_entry);
1753
1754/******************************/ 1471/******************************/
1755/* Power Management (Chipset) */ 1472/* Power Management (Chipset) */
1756/******************************/ 1473/******************************/
@@ -1959,7 +1676,6 @@ void ath9k_hw_beaconinit(struct ath_hw *ah, u32 next_beacon, u32 beacon_period)
1959 REG_WRITE(ah, AR_NDP_PERIOD, TU_TO_USEC(beacon_period)); 1676 REG_WRITE(ah, AR_NDP_PERIOD, TU_TO_USEC(beacon_period));
1960 1677
1961 REGWRITE_BUFFER_FLUSH(ah); 1678 REGWRITE_BUFFER_FLUSH(ah);
1962 DISABLE_REGWRITE_BUFFER(ah);
1963 1679
1964 beacon_period &= ~ATH9K_BEACON_ENA; 1680 beacon_period &= ~ATH9K_BEACON_ENA;
1965 if (beacon_period & ATH9K_BEACON_RESET_TSF) { 1681 if (beacon_period & ATH9K_BEACON_RESET_TSF) {
@@ -1987,7 +1703,6 @@ void ath9k_hw_set_sta_beacon_timers(struct ath_hw *ah,
1987 TU_TO_USEC(bs->bs_intval & ATH9K_BEACON_PERIOD)); 1703 TU_TO_USEC(bs->bs_intval & ATH9K_BEACON_PERIOD));
1988 1704
1989 REGWRITE_BUFFER_FLUSH(ah); 1705 REGWRITE_BUFFER_FLUSH(ah);
1990 DISABLE_REGWRITE_BUFFER(ah);
1991 1706
1992 REG_RMW_FIELD(ah, AR_RSSI_THR, 1707 REG_RMW_FIELD(ah, AR_RSSI_THR,
1993 AR_RSSI_THR_BM_THR, bs->bs_bmissthreshold); 1708 AR_RSSI_THR_BM_THR, bs->bs_bmissthreshold);
@@ -2033,7 +1748,6 @@ void ath9k_hw_set_sta_beacon_timers(struct ath_hw *ah,
2033 REG_WRITE(ah, AR_DTIM_PERIOD, TU_TO_USEC(dtimperiod)); 1748 REG_WRITE(ah, AR_DTIM_PERIOD, TU_TO_USEC(dtimperiod));
2034 1749
2035 REGWRITE_BUFFER_FLUSH(ah); 1750 REGWRITE_BUFFER_FLUSH(ah);
2036 DISABLE_REGWRITE_BUFFER(ah);
2037 1751
2038 REG_SET_BIT(ah, AR_TIMER_MODE, 1752 REG_SET_BIT(ah, AR_TIMER_MODE,
2039 AR_TBTT_TIMER_EN | AR_TIM_TIMER_EN | 1753 AR_TBTT_TIMER_EN | AR_TIM_TIMER_EN |
@@ -2056,12 +1770,13 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah)
2056 struct ath_btcoex_hw *btcoex_hw = &ah->btcoex_hw; 1770 struct ath_btcoex_hw *btcoex_hw = &ah->btcoex_hw;
2057 1771
2058 u16 capField = 0, eeval; 1772 u16 capField = 0, eeval;
1773 u8 ant_div_ctl1;
2059 1774
2060 eeval = ah->eep_ops->get_eeprom(ah, EEP_REG_0); 1775 eeval = ah->eep_ops->get_eeprom(ah, EEP_REG_0);
2061 regulatory->current_rd = eeval; 1776 regulatory->current_rd = eeval;
2062 1777
2063 eeval = ah->eep_ops->get_eeprom(ah, EEP_REG_1); 1778 eeval = ah->eep_ops->get_eeprom(ah, EEP_REG_1);
2064 if (AR_SREV_9285_10_OR_LATER(ah)) 1779 if (AR_SREV_9285_12_OR_LATER(ah))
2065 eeval |= AR9285_RDEXT_DEFAULT; 1780 eeval |= AR9285_RDEXT_DEFAULT;
2066 regulatory->current_rd_ext = eeval; 1781 regulatory->current_rd_ext = eeval;
2067 1782
@@ -2085,37 +1800,11 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah)
2085 return -EINVAL; 1800 return -EINVAL;
2086 } 1801 }
2087 1802
2088 bitmap_zero(pCap->wireless_modes, ATH9K_MODE_MAX); 1803 if (eeval & AR5416_OPFLAGS_11A)
1804 pCap->hw_caps |= ATH9K_HW_CAP_5GHZ;
2089 1805
2090 if (eeval & AR5416_OPFLAGS_11A) { 1806 if (eeval & AR5416_OPFLAGS_11G)
2091 set_bit(ATH9K_MODE_11A, pCap->wireless_modes); 1807 pCap->hw_caps |= ATH9K_HW_CAP_2GHZ;
2092 if (ah->config.ht_enable) {
2093 if (!(eeval & AR5416_OPFLAGS_N_5G_HT20))
2094 set_bit(ATH9K_MODE_11NA_HT20,
2095 pCap->wireless_modes);
2096 if (!(eeval & AR5416_OPFLAGS_N_5G_HT40)) {
2097 set_bit(ATH9K_MODE_11NA_HT40PLUS,
2098 pCap->wireless_modes);
2099 set_bit(ATH9K_MODE_11NA_HT40MINUS,
2100 pCap->wireless_modes);
2101 }
2102 }
2103 }
2104
2105 if (eeval & AR5416_OPFLAGS_11G) {
2106 set_bit(ATH9K_MODE_11G, pCap->wireless_modes);
2107 if (ah->config.ht_enable) {
2108 if (!(eeval & AR5416_OPFLAGS_N_2G_HT20))
2109 set_bit(ATH9K_MODE_11NG_HT20,
2110 pCap->wireless_modes);
2111 if (!(eeval & AR5416_OPFLAGS_N_2G_HT40)) {
2112 set_bit(ATH9K_MODE_11NG_HT40PLUS,
2113 pCap->wireless_modes);
2114 set_bit(ATH9K_MODE_11NG_HT40MINUS,
2115 pCap->wireless_modes);
2116 }
2117 }
2118 }
2119 1808
2120 pCap->tx_chainmask = ah->eep_ops->get_eeprom(ah, EEP_TX_MASK); 1809 pCap->tx_chainmask = ah->eep_ops->get_eeprom(ah, EEP_TX_MASK);
2121 /* 1810 /*
@@ -2131,8 +1820,7 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah)
2131 /* Use rx_chainmask from EEPROM. */ 1820 /* Use rx_chainmask from EEPROM. */
2132 pCap->rx_chainmask = ah->eep_ops->get_eeprom(ah, EEP_RX_MASK); 1821 pCap->rx_chainmask = ah->eep_ops->get_eeprom(ah, EEP_RX_MASK);
2133 1822
2134 if (!(AR_SREV_9280(ah) && (ah->hw_version.macRev == 0))) 1823 ah->misc_mode |= AR_PCU_MIC_NEW_LOC_ENA;
2135 ah->misc_mode |= AR_PCU_MIC_NEW_LOC_ENA;
2136 1824
2137 pCap->low_2ghz_chan = 2312; 1825 pCap->low_2ghz_chan = 2312;
2138 pCap->high_2ghz_chan = 2732; 1826 pCap->high_2ghz_chan = 2732;
@@ -2140,24 +1828,13 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah)
2140 pCap->low_5ghz_chan = 4920; 1828 pCap->low_5ghz_chan = 4920;
2141 pCap->high_5ghz_chan = 6100; 1829 pCap->high_5ghz_chan = 6100;
2142 1830
2143 pCap->hw_caps &= ~ATH9K_HW_CAP_CIPHER_CKIP; 1831 common->crypt_caps |= ATH_CRYPT_CAP_CIPHER_AESCCM;
2144 pCap->hw_caps |= ATH9K_HW_CAP_CIPHER_TKIP;
2145 pCap->hw_caps |= ATH9K_HW_CAP_CIPHER_AESCCM;
2146
2147 pCap->hw_caps &= ~ATH9K_HW_CAP_MIC_CKIP;
2148 pCap->hw_caps |= ATH9K_HW_CAP_MIC_TKIP;
2149 pCap->hw_caps |= ATH9K_HW_CAP_MIC_AESCCM;
2150 1832
2151 if (ah->config.ht_enable) 1833 if (ah->config.ht_enable)
2152 pCap->hw_caps |= ATH9K_HW_CAP_HT; 1834 pCap->hw_caps |= ATH9K_HW_CAP_HT;
2153 else 1835 else
2154 pCap->hw_caps &= ~ATH9K_HW_CAP_HT; 1836 pCap->hw_caps &= ~ATH9K_HW_CAP_HT;
2155 1837
2156 pCap->hw_caps |= ATH9K_HW_CAP_GTT;
2157 pCap->hw_caps |= ATH9K_HW_CAP_VEOL;
2158 pCap->hw_caps |= ATH9K_HW_CAP_BSSIDMASK;
2159 pCap->hw_caps &= ~ATH9K_HW_CAP_MCAST_KEYSEARCH;
2160
2161 if (capField & AR_EEPROM_EEPCAP_MAXQCU) 1838 if (capField & AR_EEPROM_EEPCAP_MAXQCU)
2162 pCap->total_queues = 1839 pCap->total_queues =
2163 MS(capField, AR_EEPROM_EEPCAP_MAXQCU); 1840 MS(capField, AR_EEPROM_EEPCAP_MAXQCU);
@@ -2170,8 +1847,6 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah)
2170 else 1847 else
2171 pCap->keycache_size = AR_KEYTABLE_SIZE; 1848 pCap->keycache_size = AR_KEYTABLE_SIZE;
2172 1849
2173 pCap->hw_caps |= ATH9K_HW_CAP_FASTCC;
2174
2175 if (AR_SREV_9285(ah) || AR_SREV_9271(ah)) 1850 if (AR_SREV_9285(ah) || AR_SREV_9271(ah))
2176 pCap->tx_triglevel_max = MAX_TX_FIFO_THRESHOLD >> 1; 1851 pCap->tx_triglevel_max = MAX_TX_FIFO_THRESHOLD >> 1;
2177 else 1852 else
@@ -2181,9 +1856,9 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah)
2181 pCap->num_gpio_pins = AR9271_NUM_GPIO; 1856 pCap->num_gpio_pins = AR9271_NUM_GPIO;
2182 else if (AR_DEVID_7010(ah)) 1857 else if (AR_DEVID_7010(ah))
2183 pCap->num_gpio_pins = AR7010_NUM_GPIO; 1858 pCap->num_gpio_pins = AR7010_NUM_GPIO;
2184 else if (AR_SREV_9285_10_OR_LATER(ah)) 1859 else if (AR_SREV_9285_12_OR_LATER(ah))
2185 pCap->num_gpio_pins = AR9285_NUM_GPIO; 1860 pCap->num_gpio_pins = AR9285_NUM_GPIO;
2186 else if (AR_SREV_9280_10_OR_LATER(ah)) 1861 else if (AR_SREV_9280_20_OR_LATER(ah))
2187 pCap->num_gpio_pins = AR928X_NUM_GPIO; 1862 pCap->num_gpio_pins = AR928X_NUM_GPIO;
2188 else 1863 else
2189 pCap->num_gpio_pins = AR_NUM_GPIO; 1864 pCap->num_gpio_pins = AR_NUM_GPIO;
@@ -2240,7 +1915,7 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah)
2240 pCap->num_antcfg_2ghz = 1915 pCap->num_antcfg_2ghz =
2241 ah->eep_ops->get_num_ant_config(ah, ATH9K_HAL_FREQ_BAND_2GHZ); 1916 ah->eep_ops->get_num_ant_config(ah, ATH9K_HAL_FREQ_BAND_2GHZ);
2242 1917
2243 if (AR_SREV_9280_10_OR_LATER(ah) && 1918 if (AR_SREV_9280_20_OR_LATER(ah) &&
2244 ath9k_hw_btcoex_supported(ah)) { 1919 ath9k_hw_btcoex_supported(ah)) {
2245 btcoex_hw->btactive_gpio = ATH_BTACTIVE_GPIO; 1920 btcoex_hw->btactive_gpio = ATH_BTACTIVE_GPIO;
2246 btcoex_hw->wlanactive_gpio = ATH_WLANACTIVE_GPIO; 1921 btcoex_hw->wlanactive_gpio = ATH_WLANACTIVE_GPIO;
@@ -2277,9 +1952,17 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah)
2277 if (AR_SREV_9300_20_OR_LATER(ah)) 1952 if (AR_SREV_9300_20_OR_LATER(ah))
2278 pCap->hw_caps |= ATH9K_HW_CAP_RAC_SUPPORTED; 1953 pCap->hw_caps |= ATH9K_HW_CAP_RAC_SUPPORTED;
2279 1954
2280 if (AR_SREV_9287_10_OR_LATER(ah) || AR_SREV_9271(ah)) 1955 if (AR_SREV_9287_11_OR_LATER(ah) || AR_SREV_9271(ah))
2281 pCap->hw_caps |= ATH9K_HW_CAP_SGI_20; 1956 pCap->hw_caps |= ATH9K_HW_CAP_SGI_20;
2282 1957
1958 if (AR_SREV_9285(ah))
1959 if (ah->eep_ops->get_eeprom(ah, EEP_MODAL_VER) >= 3) {
1960 ant_div_ctl1 =
1961 ah->eep_ops->get_eeprom(ah, EEP_ANT_DIV_CTL1);
1962 if ((ant_div_ctl1 & 0x1) && ((ant_div_ctl1 >> 3) & 0x1))
1963 pCap->hw_caps |= ATH9K_HW_CAP_ANT_DIV_COMB;
1964 }
1965
2283 return 0; 1966 return 0;
2284} 1967}
2285 1968
@@ -2353,11 +2036,11 @@ u32 ath9k_hw_gpio_get(struct ath_hw *ah, u32 gpio)
2353 return MS_REG_READ(AR9300, gpio) != 0; 2036 return MS_REG_READ(AR9300, gpio) != 0;
2354 else if (AR_SREV_9271(ah)) 2037 else if (AR_SREV_9271(ah))
2355 return MS_REG_READ(AR9271, gpio) != 0; 2038 return MS_REG_READ(AR9271, gpio) != 0;
2356 else if (AR_SREV_9287_10_OR_LATER(ah)) 2039 else if (AR_SREV_9287_11_OR_LATER(ah))
2357 return MS_REG_READ(AR9287, gpio) != 0; 2040 return MS_REG_READ(AR9287, gpio) != 0;
2358 else if (AR_SREV_9285_10_OR_LATER(ah)) 2041 else if (AR_SREV_9285_12_OR_LATER(ah))
2359 return MS_REG_READ(AR9285, gpio) != 0; 2042 return MS_REG_READ(AR9285, gpio) != 0;
2360 else if (AR_SREV_9280_10_OR_LATER(ah)) 2043 else if (AR_SREV_9280_20_OR_LATER(ah))
2361 return MS_REG_READ(AR928X, gpio) != 0; 2044 return MS_REG_READ(AR928X, gpio) != 0;
2362 else 2045 else
2363 return MS_REG_READ(AR, gpio) != 0; 2046 return MS_REG_READ(AR, gpio) != 0;
@@ -2456,7 +2139,6 @@ void ath9k_hw_setrxfilter(struct ath_hw *ah, u32 bits)
2456 REG_READ(ah, AR_RXCFG) & ~AR_RXCFG_ZLFDMA); 2139 REG_READ(ah, AR_RXCFG) & ~AR_RXCFG_ZLFDMA);
2457 2140
2458 REGWRITE_BUFFER_FLUSH(ah); 2141 REGWRITE_BUFFER_FLUSH(ah);
2459 DISABLE_REGWRITE_BUFFER(ah);
2460} 2142}
2461EXPORT_SYMBOL(ath9k_hw_setrxfilter); 2143EXPORT_SYMBOL(ath9k_hw_setrxfilter);
2462 2144
@@ -2854,7 +2536,7 @@ void ath9k_hw_name(struct ath_hw *ah, char *hw_name, size_t len)
2854 int used; 2536 int used;
2855 2537
2856 /* chipsets >= AR9280 are single-chip */ 2538 /* chipsets >= AR9280 are single-chip */
2857 if (AR_SREV_9280_10_OR_LATER(ah)) { 2539 if (AR_SREV_9280_20_OR_LATER(ah)) {
2858 used = snprintf(hw_name, len, 2540 used = snprintf(hw_name, len,
2859 "Atheros AR%s Rev:%x", 2541 "Atheros AR%s Rev:%x",
2860 ath9k_hw_mac_bb_name(ah->hw_version.macVersion), 2542 ath9k_hw_mac_bb_name(ah->hw_version.macVersion),
diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
index 1601dd439890..d032939768b0 100644
--- a/drivers/net/wireless/ath/ath9k/hw.h
+++ b/drivers/net/wireless/ath/ath9k/hw.h
@@ -61,6 +61,8 @@
61 61
62#define ATH9K_RSSI_BAD -128 62#define ATH9K_RSSI_BAD -128
63 63
64#define ATH9K_NUM_CHANNELS 38
65
64/* Register read/write primitives */ 66/* Register read/write primitives */
65#define REG_WRITE(_ah, _reg, _val) \ 67#define REG_WRITE(_ah, _reg, _val) \
66 ath9k_hw_common(_ah)->ops->write((_ah), (_val), (_reg)) 68 ath9k_hw_common(_ah)->ops->write((_ah), (_val), (_reg))
@@ -70,19 +72,13 @@
70 72
71#define ENABLE_REGWRITE_BUFFER(_ah) \ 73#define ENABLE_REGWRITE_BUFFER(_ah) \
72 do { \ 74 do { \
73 if (AR_SREV_9271(_ah)) \ 75 if (ath9k_hw_common(_ah)->ops->enable_write_buffer) \
74 ath9k_hw_common(_ah)->ops->enable_write_buffer((_ah)); \ 76 ath9k_hw_common(_ah)->ops->enable_write_buffer((_ah)); \
75 } while (0) 77 } while (0)
76 78
77#define DISABLE_REGWRITE_BUFFER(_ah) \
78 do { \
79 if (AR_SREV_9271(_ah)) \
80 ath9k_hw_common(_ah)->ops->disable_write_buffer((_ah)); \
81 } while (0)
82
83#define REGWRITE_BUFFER_FLUSH(_ah) \ 79#define REGWRITE_BUFFER_FLUSH(_ah) \
84 do { \ 80 do { \
85 if (AR_SREV_9271(_ah)) \ 81 if (ath9k_hw_common(_ah)->ops->write_flush) \
86 ath9k_hw_common(_ah)->ops->write_flush((_ah)); \ 82 ath9k_hw_common(_ah)->ops->write_flush((_ah)); \
87 } while (0) 83 } while (0)
88 84
@@ -168,47 +164,26 @@ enum ath_ini_subsys {
168 ATH_INI_NUM_SPLIT, 164 ATH_INI_NUM_SPLIT,
169}; 165};
170 166
171enum wireless_mode {
172 ATH9K_MODE_11A = 0,
173 ATH9K_MODE_11G,
174 ATH9K_MODE_11NA_HT20,
175 ATH9K_MODE_11NG_HT20,
176 ATH9K_MODE_11NA_HT40PLUS,
177 ATH9K_MODE_11NA_HT40MINUS,
178 ATH9K_MODE_11NG_HT40PLUS,
179 ATH9K_MODE_11NG_HT40MINUS,
180 ATH9K_MODE_MAX,
181};
182
183enum ath9k_hw_caps { 167enum ath9k_hw_caps {
184 ATH9K_HW_CAP_MIC_AESCCM = BIT(0), 168 ATH9K_HW_CAP_HT = BIT(0),
185 ATH9K_HW_CAP_MIC_CKIP = BIT(1), 169 ATH9K_HW_CAP_RFSILENT = BIT(1),
186 ATH9K_HW_CAP_MIC_TKIP = BIT(2), 170 ATH9K_HW_CAP_CST = BIT(2),
187 ATH9K_HW_CAP_CIPHER_AESCCM = BIT(3), 171 ATH9K_HW_CAP_ENHANCEDPM = BIT(3),
188 ATH9K_HW_CAP_CIPHER_CKIP = BIT(4), 172 ATH9K_HW_CAP_AUTOSLEEP = BIT(4),
189 ATH9K_HW_CAP_CIPHER_TKIP = BIT(5), 173 ATH9K_HW_CAP_4KB_SPLITTRANS = BIT(5),
190 ATH9K_HW_CAP_VEOL = BIT(6), 174 ATH9K_HW_CAP_EDMA = BIT(6),
191 ATH9K_HW_CAP_BSSIDMASK = BIT(7), 175 ATH9K_HW_CAP_RAC_SUPPORTED = BIT(7),
192 ATH9K_HW_CAP_MCAST_KEYSEARCH = BIT(8), 176 ATH9K_HW_CAP_LDPC = BIT(8),
193 ATH9K_HW_CAP_HT = BIT(9), 177 ATH9K_HW_CAP_FASTCLOCK = BIT(9),
194 ATH9K_HW_CAP_GTT = BIT(10), 178 ATH9K_HW_CAP_SGI_20 = BIT(10),
195 ATH9K_HW_CAP_FASTCC = BIT(11), 179 ATH9K_HW_CAP_PAPRD = BIT(11),
196 ATH9K_HW_CAP_RFSILENT = BIT(12), 180 ATH9K_HW_CAP_ANT_DIV_COMB = BIT(12),
197 ATH9K_HW_CAP_CST = BIT(13), 181 ATH9K_HW_CAP_2GHZ = BIT(13),
198 ATH9K_HW_CAP_ENHANCEDPM = BIT(14), 182 ATH9K_HW_CAP_5GHZ = BIT(14),
199 ATH9K_HW_CAP_AUTOSLEEP = BIT(15),
200 ATH9K_HW_CAP_4KB_SPLITTRANS = BIT(16),
201 ATH9K_HW_CAP_EDMA = BIT(17),
202 ATH9K_HW_CAP_RAC_SUPPORTED = BIT(18),
203 ATH9K_HW_CAP_LDPC = BIT(19),
204 ATH9K_HW_CAP_FASTCLOCK = BIT(20),
205 ATH9K_HW_CAP_SGI_20 = BIT(21),
206 ATH9K_HW_CAP_PAPRD = BIT(22),
207}; 183};
208 184
209struct ath9k_hw_capabilities { 185struct ath9k_hw_capabilities {
210 u32 hw_caps; /* ATH9K_HW_CAP_* from ath9k_hw_caps */ 186 u32 hw_caps; /* ATH9K_HW_CAP_* from ath9k_hw_caps */
211 DECLARE_BITMAP(wireless_modes, ATH9K_MODE_MAX); /* ATH9K_MODE_* */
212 u16 total_queues; 187 u16 total_queues;
213 u16 keycache_size; 188 u16 keycache_size;
214 u16 low_5ghz_chan, high_5ghz_chan; 189 u16 low_5ghz_chan, high_5ghz_chan;
@@ -352,7 +327,6 @@ struct ath9k_hw_cal_data {
352 int32_t CalValid; 327 int32_t CalValid;
353 int8_t iCoff; 328 int8_t iCoff;
354 int8_t qCoff; 329 int8_t qCoff;
355 int16_t rawNoiseFloor;
356 bool paprd_done; 330 bool paprd_done;
357 bool nfcal_pending; 331 bool nfcal_pending;
358 bool nfcal_interference; 332 bool nfcal_interference;
@@ -363,9 +337,11 @@ struct ath9k_hw_cal_data {
363 337
364struct ath9k_channel { 338struct ath9k_channel {
365 struct ieee80211_channel *chan; 339 struct ieee80211_channel *chan;
340 struct ar5416AniState ani;
366 u16 channel; 341 u16 channel;
367 u32 channelFlags; 342 u32 channelFlags;
368 u32 chanmode; 343 u32 chanmode;
344 s16 noisefloor;
369}; 345};
370 346
371#define IS_CHAN_G(_c) ((((_c)->channelFlags & (CHANNEL_G)) == CHANNEL_G) || \ 347#define IS_CHAN_G(_c) ((((_c)->channelFlags & (CHANNEL_G)) == CHANNEL_G) || \
@@ -495,6 +471,12 @@ struct ath_gen_timer_table {
495 } timer_mask; 471 } timer_mask;
496}; 472};
497 473
474struct ath_hw_antcomb_conf {
475 u8 main_lna_conf;
476 u8 alt_lna_conf;
477 u8 fast_div_bias;
478};
479
498/** 480/**
499 * struct ath_hw_private_ops - callbacks used internally by hardware code 481 * struct ath_hw_private_ops - callbacks used internally by hardware code
500 * 482 *
@@ -518,14 +500,6 @@ struct ath_gen_timer_table {
518 * @setup_calibration: set up calibration 500 * @setup_calibration: set up calibration
519 * @iscal_supported: used to query if a type of calibration is supported 501 * @iscal_supported: used to query if a type of calibration is supported
520 * 502 *
521 * @ani_reset: reset ANI parameters to default values
522 * @ani_lower_immunity: lower the noise immunity level. The level controls
523 * the power-based packet detection on hardware. If a power jump is
524 * detected the adapter takes it as an indication that a packet has
525 * arrived. The level ranges from 0-5. Each level corresponds to a
526 * few dB more of noise immunity. If you have a strong time-varying
527 * interference that is causing false detections (OFDM timing errors or
528 * CCK timing errors) the level can be increased.
529 * @ani_cache_ini_regs: cache the values for ANI from the initial 503 * @ani_cache_ini_regs: cache the values for ANI from the initial
530 * register settings through the register initialization. 504 * register settings through the register initialization.
531 */ 505 */
@@ -539,8 +513,6 @@ struct ath_hw_private_ops {
539 bool (*macversion_supported)(u32 macversion); 513 bool (*macversion_supported)(u32 macversion);
540 void (*setup_calibration)(struct ath_hw *ah, 514 void (*setup_calibration)(struct ath_hw *ah,
541 struct ath9k_cal_list *currCal); 515 struct ath9k_cal_list *currCal);
542 bool (*iscal_supported)(struct ath_hw *ah,
543 enum ath9k_cal_types calType);
544 516
545 /* PHY ops */ 517 /* PHY ops */
546 int (*rf_set_freq)(struct ath_hw *ah, 518 int (*rf_set_freq)(struct ath_hw *ah,
@@ -572,8 +544,6 @@ struct ath_hw_private_ops {
572 void (*do_getnf)(struct ath_hw *ah, int16_t nfarray[NUM_NF_READINGS]); 544 void (*do_getnf)(struct ath_hw *ah, int16_t nfarray[NUM_NF_READINGS]);
573 545
574 /* ANI */ 546 /* ANI */
575 void (*ani_reset)(struct ath_hw *ah, bool is_scanning);
576 void (*ani_lower_immunity)(struct ath_hw *ah);
577 void (*ani_cache_ini_regs)(struct ath_hw *ah); 547 void (*ani_cache_ini_regs)(struct ath_hw *ah);
578}; 548};
579 549
@@ -585,11 +555,6 @@ struct ath_hw_private_ops {
585 * 555 *
586 * @config_pci_powersave: 556 * @config_pci_powersave:
587 * @calibrate: periodic calibration for NF, ANI, IQ, ADC gain, ADC-DC 557 * @calibrate: periodic calibration for NF, ANI, IQ, ADC gain, ADC-DC
588 *
589 * @ani_proc_mib_event: process MIB events, this would happen upon specific ANI
590 * thresholds being reached or having overflowed.
591 * @ani_monitor: called periodically by the core driver to collect
592 * MIB stats and adjust ANI if specific thresholds have been reached.
593 */ 558 */
594struct ath_hw_ops { 559struct ath_hw_ops {
595 void (*config_pci_powersave)(struct ath_hw *ah, 560 void (*config_pci_powersave)(struct ath_hw *ah,
@@ -630,9 +595,6 @@ struct ath_hw_ops {
630 u32 burstDuration); 595 u32 burstDuration);
631 void (*set11n_virtualmorefrag)(struct ath_hw *ah, void *ds, 596 void (*set11n_virtualmorefrag)(struct ath_hw *ah, void *ds,
632 u32 vmf); 597 u32 vmf);
633
634 void (*ani_proc_mib_event)(struct ath_hw *ah);
635 void (*ani_monitor)(struct ath_hw *ah, struct ath9k_channel *chan);
636}; 598};
637 599
638struct ath_nf_limits { 600struct ath_nf_limits {
@@ -647,7 +609,7 @@ struct ath_hw {
647 struct ath9k_hw_version hw_version; 609 struct ath9k_hw_version hw_version;
648 struct ath9k_ops_config config; 610 struct ath9k_ops_config config;
649 struct ath9k_hw_capabilities caps; 611 struct ath9k_hw_capabilities caps;
650 struct ath9k_channel channels[38]; 612 struct ath9k_channel channels[ATH9K_NUM_CHANNELS];
651 struct ath9k_channel *curchan; 613 struct ath9k_channel *curchan;
652 614
653 union { 615 union {
@@ -693,10 +655,9 @@ struct ath_hw {
693 u32 atim_window; 655 u32 atim_window;
694 656
695 /* Calibration */ 657 /* Calibration */
696 enum ath9k_cal_types supp_cals; 658 u32 supp_cals;
697 struct ath9k_cal_list iq_caldata; 659 struct ath9k_cal_list iq_caldata;
698 struct ath9k_cal_list adcgain_caldata; 660 struct ath9k_cal_list adcgain_caldata;
699 struct ath9k_cal_list adcdc_calinitdata;
700 struct ath9k_cal_list adcdc_caldata; 661 struct ath9k_cal_list adcdc_caldata;
701 struct ath9k_cal_list tempCompCalData; 662 struct ath9k_cal_list tempCompCalData;
702 struct ath9k_cal_list *cal_list; 663 struct ath9k_cal_list *cal_list;
@@ -765,8 +726,6 @@ struct ath_hw {
765 /* ANI */ 726 /* ANI */
766 u32 proc_phyerr; 727 u32 proc_phyerr;
767 u32 aniperiod; 728 u32 aniperiod;
768 struct ar5416AniState *curani;
769 struct ar5416AniState ani[255];
770 int totalSizeDesired[5]; 729 int totalSizeDesired[5];
771 int coarse_high[5]; 730 int coarse_high[5];
772 int coarse_low[5]; 731 int coarse_low[5];
@@ -874,12 +833,6 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
874int ath9k_hw_fill_cap_info(struct ath_hw *ah); 833int ath9k_hw_fill_cap_info(struct ath_hw *ah);
875u32 ath9k_regd_get_ctl(struct ath_regulatory *reg, struct ath9k_channel *chan); 834u32 ath9k_regd_get_ctl(struct ath_regulatory *reg, struct ath9k_channel *chan);
876 835
877/* Key Cache Management */
878bool ath9k_hw_keyreset(struct ath_hw *ah, u16 entry);
879bool ath9k_hw_set_keycache_entry(struct ath_hw *ah, u16 entry,
880 const struct ath9k_keyval *k,
881 const u8 *mac);
882
883/* GPIO / RFKILL / Antennae */ 836/* GPIO / RFKILL / Antennae */
884void ath9k_hw_cfg_gpio_input(struct ath_hw *ah, u32 gpio); 837void ath9k_hw_cfg_gpio_input(struct ath_hw *ah, u32 gpio);
885u32 ath9k_hw_gpio_get(struct ath_hw *ah, u32 gpio); 838u32 ath9k_hw_gpio_get(struct ath_hw *ah, u32 gpio);
@@ -888,6 +841,10 @@ void ath9k_hw_cfg_output(struct ath_hw *ah, u32 gpio,
888void ath9k_hw_set_gpio(struct ath_hw *ah, u32 gpio, u32 val); 841void ath9k_hw_set_gpio(struct ath_hw *ah, u32 gpio, u32 val);
889u32 ath9k_hw_getdefantenna(struct ath_hw *ah); 842u32 ath9k_hw_getdefantenna(struct ath_hw *ah);
890void ath9k_hw_setantenna(struct ath_hw *ah, u32 antenna); 843void ath9k_hw_setantenna(struct ath_hw *ah, u32 antenna);
844void ath9k_hw_antdiv_comb_conf_get(struct ath_hw *ah,
845 struct ath_hw_antcomb_conf *antconf);
846void ath9k_hw_antdiv_comb_conf_set(struct ath_hw *ah,
847 struct ath_hw_antcomb_conf *antconf);
891 848
892/* General Operation */ 849/* General Operation */
893bool ath9k_hw_wait(struct ath_hw *ah, u32 reg, u32 mask, u32 val, u32 timeout); 850bool ath9k_hw_wait(struct ath_hw *ah, u32 reg, u32 mask, u32 val, u32 timeout);
@@ -985,6 +942,7 @@ void ar9003_hw_attach_calib_ops(struct ath_hw *ah);
985void ar9002_hw_attach_ops(struct ath_hw *ah); 942void ar9002_hw_attach_ops(struct ath_hw *ah);
986void ar9003_hw_attach_ops(struct ath_hw *ah); 943void ar9003_hw_attach_ops(struct ath_hw *ah);
987 944
945void ar9002_hw_load_ani_reg(struct ath_hw *ah, struct ath9k_channel *chan);
988/* 946/*
989 * ANI work can be shared between all families but a next 947 * ANI work can be shared between all families but a next
990 * generation implementation of ANI will be used only for AR9003 only 948 * generation implementation of ANI will be used only for AR9003 only
@@ -993,8 +951,9 @@ void ar9003_hw_attach_ops(struct ath_hw *ah);
993 * older families (AR5008, AR9001, AR9002) by using modparam_force_new_ani. 951 * older families (AR5008, AR9001, AR9002) by using modparam_force_new_ani.
994 */ 952 */
995extern int modparam_force_new_ani; 953extern int modparam_force_new_ani;
996void ath9k_hw_attach_ani_ops_old(struct ath_hw *ah); 954void ath9k_ani_reset(struct ath_hw *ah, bool is_scanning);
997void ath9k_hw_attach_ani_ops_new(struct ath_hw *ah); 955void ath9k_hw_proc_mib_event(struct ath_hw *ah);
956void ath9k_hw_ani_monitor(struct ath_hw *ah, struct ath9k_channel *chan);
998 957
999#define ATH_PCIE_CAP_LINK_CTRL 0x70 958#define ATH_PCIE_CAP_LINK_CTRL 0x70
1000#define ATH_PCIE_CAP_LINK_L0S 1 959#define ATH_PCIE_CAP_LINK_L0S 1
diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c
index 3dbff8d07766..bc6c4df9712c 100644
--- a/drivers/net/wireless/ath/ath9k/init.c
+++ b/drivers/net/wireless/ath/ath9k/init.c
@@ -56,7 +56,7 @@ MODULE_PARM_DESC(blink, "Enable LED blink on activity");
56 * on 5 MHz steps, we support the channels which we know 56 * on 5 MHz steps, we support the channels which we know
57 * we have calibration data for all cards though to make 57 * we have calibration data for all cards though to make
58 * this static */ 58 * this static */
59static struct ieee80211_channel ath9k_2ghz_chantable[] = { 59static const struct ieee80211_channel ath9k_2ghz_chantable[] = {
60 CHAN2G(2412, 0), /* Channel 1 */ 60 CHAN2G(2412, 0), /* Channel 1 */
61 CHAN2G(2417, 1), /* Channel 2 */ 61 CHAN2G(2417, 1), /* Channel 2 */
62 CHAN2G(2422, 2), /* Channel 3 */ 62 CHAN2G(2422, 2), /* Channel 3 */
@@ -77,7 +77,7 @@ static struct ieee80211_channel ath9k_2ghz_chantable[] = {
77 * on 5 MHz steps, we support the channels which we know 77 * on 5 MHz steps, we support the channels which we know
78 * we have calibration data for all cards though to make 78 * we have calibration data for all cards though to make
79 * this static */ 79 * this static */
80static struct ieee80211_channel ath9k_5ghz_chantable[] = { 80static const struct ieee80211_channel ath9k_5ghz_chantable[] = {
81 /* _We_ call this UNII 1 */ 81 /* _We_ call this UNII 1 */
82 CHAN5G(5180, 14), /* Channel 36 */ 82 CHAN5G(5180, 14), /* Channel 36 */
83 CHAN5G(5200, 15), /* Channel 40 */ 83 CHAN5G(5200, 15), /* Channel 40 */
@@ -211,7 +211,7 @@ static void setup_ht_cap(struct ath_softc *sc,
211 else 211 else
212 max_streams = 2; 212 max_streams = 2;
213 213
214 if (AR_SREV_9280_10_OR_LATER(ah)) { 214 if (AR_SREV_9280_20_OR_LATER(ah)) {
215 if (max_streams >= 2) 215 if (max_streams >= 2)
216 ht_info->cap |= IEEE80211_HT_CAP_TX_STBC; 216 ht_info->cap |= IEEE80211_HT_CAP_TX_STBC;
217 ht_info->cap |= (1 << IEEE80211_HT_CAP_RX_STBC_SHIFT); 217 ht_info->cap |= (1 << IEEE80211_HT_CAP_RX_STBC_SHIFT);
@@ -381,7 +381,7 @@ static void ath9k_init_crypto(struct ath_softc *sc)
381 * reset the contents on initial power up. 381 * reset the contents on initial power up.
382 */ 382 */
383 for (i = 0; i < common->keymax; i++) 383 for (i = 0; i < common->keymax; i++)
384 ath9k_hw_keyreset(sc->sc_ah, (u16) i); 384 ath_hw_keyreset(common, (u16) i);
385 385
386 /* 386 /*
387 * Check whether the separate key cache entries 387 * Check whether the separate key cache entries
@@ -389,8 +389,8 @@ static void ath9k_init_crypto(struct ath_softc *sc)
389 * With split mic keys the number of stations is limited 389 * With split mic keys the number of stations is limited
390 * to 27 otherwise 59. 390 * to 27 otherwise 59.
391 */ 391 */
392 if (!(sc->sc_ah->misc_mode & AR_PCU_MIC_NEW_LOC_ENA)) 392 if (sc->sc_ah->misc_mode & AR_PCU_MIC_NEW_LOC_ENA)
393 common->splitmic = 1; 393 common->crypt_caps |= ATH_CRYPT_CAP_MIC_COMBINED;
394} 394}
395 395
396static int ath9k_init_btcoex(struct ath_softc *sc) 396static int ath9k_init_btcoex(struct ath_softc *sc)
@@ -477,10 +477,21 @@ err:
477 return -EIO; 477 return -EIO;
478} 478}
479 479
480static void ath9k_init_channels_rates(struct ath_softc *sc) 480static int ath9k_init_channels_rates(struct ath_softc *sc)
481{ 481{
482 if (test_bit(ATH9K_MODE_11G, sc->sc_ah->caps.wireless_modes)) { 482 void *channels;
483 sc->sbands[IEEE80211_BAND_2GHZ].channels = ath9k_2ghz_chantable; 483
484 BUILD_BUG_ON(ARRAY_SIZE(ath9k_2ghz_chantable) +
485 ARRAY_SIZE(ath9k_5ghz_chantable) !=
486 ATH9K_NUM_CHANNELS);
487
488 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_2GHZ) {
489 channels = kmemdup(ath9k_2ghz_chantable,
490 sizeof(ath9k_2ghz_chantable), GFP_KERNEL);
491 if (!channels)
492 return -ENOMEM;
493
494 sc->sbands[IEEE80211_BAND_2GHZ].channels = channels;
484 sc->sbands[IEEE80211_BAND_2GHZ].band = IEEE80211_BAND_2GHZ; 495 sc->sbands[IEEE80211_BAND_2GHZ].band = IEEE80211_BAND_2GHZ;
485 sc->sbands[IEEE80211_BAND_2GHZ].n_channels = 496 sc->sbands[IEEE80211_BAND_2GHZ].n_channels =
486 ARRAY_SIZE(ath9k_2ghz_chantable); 497 ARRAY_SIZE(ath9k_2ghz_chantable);
@@ -489,8 +500,16 @@ static void ath9k_init_channels_rates(struct ath_softc *sc)
489 ARRAY_SIZE(ath9k_legacy_rates); 500 ARRAY_SIZE(ath9k_legacy_rates);
490 } 501 }
491 502
492 if (test_bit(ATH9K_MODE_11A, sc->sc_ah->caps.wireless_modes)) { 503 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_5GHZ) {
493 sc->sbands[IEEE80211_BAND_5GHZ].channels = ath9k_5ghz_chantable; 504 channels = kmemdup(ath9k_5ghz_chantable,
505 sizeof(ath9k_5ghz_chantable), GFP_KERNEL);
506 if (!channels) {
507 if (sc->sbands[IEEE80211_BAND_2GHZ].channels)
508 kfree(sc->sbands[IEEE80211_BAND_2GHZ].channels);
509 return -ENOMEM;
510 }
511
512 sc->sbands[IEEE80211_BAND_5GHZ].channels = channels;
494 sc->sbands[IEEE80211_BAND_5GHZ].band = IEEE80211_BAND_5GHZ; 513 sc->sbands[IEEE80211_BAND_5GHZ].band = IEEE80211_BAND_5GHZ;
495 sc->sbands[IEEE80211_BAND_5GHZ].n_channels = 514 sc->sbands[IEEE80211_BAND_5GHZ].n_channels =
496 ARRAY_SIZE(ath9k_5ghz_chantable); 515 ARRAY_SIZE(ath9k_5ghz_chantable);
@@ -499,6 +518,7 @@ static void ath9k_init_channels_rates(struct ath_softc *sc)
499 sc->sbands[IEEE80211_BAND_5GHZ].n_bitrates = 518 sc->sbands[IEEE80211_BAND_5GHZ].n_bitrates =
500 ARRAY_SIZE(ath9k_legacy_rates) - 4; 519 ARRAY_SIZE(ath9k_legacy_rates) - 4;
501 } 520 }
521 return 0;
502} 522}
503 523
504static void ath9k_init_misc(struct ath_softc *sc) 524static void ath9k_init_misc(struct ath_softc *sc)
@@ -506,7 +526,6 @@ static void ath9k_init_misc(struct ath_softc *sc)
506 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 526 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
507 int i = 0; 527 int i = 0;
508 528
509 common->ani.noise_floor = ATH_DEFAULT_NOISE_FLOOR;
510 setup_timer(&common->ani.timer, ath_ani_calibrate, (unsigned long)sc); 529 setup_timer(&common->ani.timer, ath_ani_calibrate, (unsigned long)sc);
511 530
512 sc->config.txpowlimit = ATH_TXPOWER_MAX; 531 sc->config.txpowlimit = ATH_TXPOWER_MAX;
@@ -522,8 +541,7 @@ static void ath9k_init_misc(struct ath_softc *sc)
522 ath9k_hw_set_diversity(sc->sc_ah, true); 541 ath9k_hw_set_diversity(sc->sc_ah, true);
523 sc->rx.defant = ath9k_hw_getdefantenna(sc->sc_ah); 542 sc->rx.defant = ath9k_hw_getdefantenna(sc->sc_ah);
524 543
525 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_BSSIDMASK) 544 memcpy(common->bssidmask, ath_bcast_mac, ETH_ALEN);
526 memcpy(common->bssidmask, ath_bcast_mac, ETH_ALEN);
527 545
528 sc->beacon.slottime = ATH9K_SLOT_TIME_9; 546 sc->beacon.slottime = ATH9K_SLOT_TIME_9;
529 547
@@ -531,6 +549,9 @@ static void ath9k_init_misc(struct ath_softc *sc)
531 sc->beacon.bslot[i] = NULL; 549 sc->beacon.bslot[i] = NULL;
532 sc->beacon.bslot_aphy[i] = NULL; 550 sc->beacon.bslot_aphy[i] = NULL;
533 } 551 }
552
553 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB)
554 sc->ant_comb.count = ATH_ANT_DIV_COMB_INIT_COUNT;
534} 555}
535 556
536static int ath9k_init_softc(u16 devid, struct ath_softc *sc, u16 subsysid, 557static int ath9k_init_softc(u16 devid, struct ath_softc *sc, u16 subsysid,
@@ -593,8 +614,11 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc, u16 subsysid,
593 if (ret) 614 if (ret)
594 goto err_btcoex; 615 goto err_btcoex;
595 616
617 ret = ath9k_init_channels_rates(sc);
618 if (ret)
619 goto err_btcoex;
620
596 ath9k_init_crypto(sc); 621 ath9k_init_crypto(sc);
597 ath9k_init_channels_rates(sc);
598 ath9k_init_misc(sc); 622 ath9k_init_misc(sc);
599 623
600 return 0; 624 return 0;
@@ -637,11 +661,13 @@ void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
637 661
638 hw->wiphy->interface_modes = 662 hw->wiphy->interface_modes =
639 BIT(NL80211_IFTYPE_AP) | 663 BIT(NL80211_IFTYPE_AP) |
664 BIT(NL80211_IFTYPE_WDS) |
640 BIT(NL80211_IFTYPE_STATION) | 665 BIT(NL80211_IFTYPE_STATION) |
641 BIT(NL80211_IFTYPE_ADHOC) | 666 BIT(NL80211_IFTYPE_ADHOC) |
642 BIT(NL80211_IFTYPE_MESH_POINT); 667 BIT(NL80211_IFTYPE_MESH_POINT);
643 668
644 hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT; 669 if (AR_SREV_5416(sc->sc_ah))
670 hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
645 671
646 hw->queues = 4; 672 hw->queues = 4;
647 hw->max_rates = 4; 673 hw->max_rates = 4;
@@ -651,19 +677,21 @@ void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
651 hw->sta_data_size = sizeof(struct ath_node); 677 hw->sta_data_size = sizeof(struct ath_node);
652 hw->vif_data_size = sizeof(struct ath_vif); 678 hw->vif_data_size = sizeof(struct ath_vif);
653 679
680#ifdef CONFIG_ATH9K_RATE_CONTROL
654 hw->rate_control_algorithm = "ath9k_rate_control"; 681 hw->rate_control_algorithm = "ath9k_rate_control";
682#endif
655 683
656 if (test_bit(ATH9K_MODE_11G, sc->sc_ah->caps.wireless_modes)) 684 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_2GHZ)
657 hw->wiphy->bands[IEEE80211_BAND_2GHZ] = 685 hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
658 &sc->sbands[IEEE80211_BAND_2GHZ]; 686 &sc->sbands[IEEE80211_BAND_2GHZ];
659 if (test_bit(ATH9K_MODE_11A, sc->sc_ah->caps.wireless_modes)) 687 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_5GHZ)
660 hw->wiphy->bands[IEEE80211_BAND_5GHZ] = 688 hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
661 &sc->sbands[IEEE80211_BAND_5GHZ]; 689 &sc->sbands[IEEE80211_BAND_5GHZ];
662 690
663 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT) { 691 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT) {
664 if (test_bit(ATH9K_MODE_11G, sc->sc_ah->caps.wireless_modes)) 692 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_2GHZ)
665 setup_ht_cap(sc, &sc->sbands[IEEE80211_BAND_2GHZ].ht_cap); 693 setup_ht_cap(sc, &sc->sbands[IEEE80211_BAND_2GHZ].ht_cap);
666 if (test_bit(ATH9K_MODE_11A, sc->sc_ah->caps.wireless_modes)) 694 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_5GHZ)
667 setup_ht_cap(sc, &sc->sbands[IEEE80211_BAND_5GHZ].ht_cap); 695 setup_ht_cap(sc, &sc->sbands[IEEE80211_BAND_5GHZ].ht_cap);
668 } 696 }
669 697
@@ -751,6 +779,12 @@ static void ath9k_deinit_softc(struct ath_softc *sc)
751{ 779{
752 int i = 0; 780 int i = 0;
753 781
782 if (sc->sbands[IEEE80211_BAND_2GHZ].channels)
783 kfree(sc->sbands[IEEE80211_BAND_2GHZ].channels);
784
785 if (sc->sbands[IEEE80211_BAND_5GHZ].channels)
786 kfree(sc->sbands[IEEE80211_BAND_5GHZ].channels);
787
754 if ((sc->btcoex.no_stomp_timer) && 788 if ((sc->btcoex.no_stomp_timer) &&
755 sc->sc_ah->btcoex_hw.scheme == ATH_BTCOEX_CFG_3WIRE) 789 sc->sc_ah->btcoex_hw.scheme == ATH_BTCOEX_CFG_3WIRE)
756 ath_gen_timer_free(sc->sc_ah, sc->btcoex.no_stomp_timer); 790 ath_gen_timer_free(sc->sc_ah, sc->btcoex.no_stomp_timer);
diff --git a/drivers/net/wireless/ath/ath9k/mac.c b/drivers/net/wireless/ath/ath9k/mac.c
index 0b7d1253f0c0..8c13479b17cd 100644
--- a/drivers/net/wireless/ath/ath9k/mac.c
+++ b/drivers/net/wireless/ath/ath9k/mac.c
@@ -40,7 +40,6 @@ static void ath9k_hw_set_txq_interrupts(struct ath_hw *ah,
40 REG_WRITE(ah, AR_IMR_S2, ah->imrs2_reg); 40 REG_WRITE(ah, AR_IMR_S2, ah->imrs2_reg);
41 41
42 REGWRITE_BUFFER_FLUSH(ah); 42 REGWRITE_BUFFER_FLUSH(ah);
43 DISABLE_REGWRITE_BUFFER(ah);
44} 43}
45 44
46u32 ath9k_hw_gettxbuf(struct ath_hw *ah, u32 q) 45u32 ath9k_hw_gettxbuf(struct ath_hw *ah, u32 q)
@@ -492,8 +491,6 @@ bool ath9k_hw_resettxqueue(struct ath_hw *ah, u32 q)
492 REG_WRITE(ah, AR_DMISC(q), 491 REG_WRITE(ah, AR_DMISC(q),
493 AR_D_MISC_CW_BKOFF_EN | AR_D_MISC_FRAG_WAIT_EN | 0x2); 492 AR_D_MISC_CW_BKOFF_EN | AR_D_MISC_FRAG_WAIT_EN | 0x2);
494 493
495 REGWRITE_BUFFER_FLUSH(ah);
496
497 if (qi->tqi_cbrPeriod) { 494 if (qi->tqi_cbrPeriod) {
498 REG_WRITE(ah, AR_QCBRCFG(q), 495 REG_WRITE(ah, AR_QCBRCFG(q),
499 SM(qi->tqi_cbrPeriod, AR_Q_CBRCFG_INTERVAL) | 496 SM(qi->tqi_cbrPeriod, AR_Q_CBRCFG_INTERVAL) |
@@ -509,8 +506,6 @@ bool ath9k_hw_resettxqueue(struct ath_hw *ah, u32 q)
509 AR_Q_RDYTIMECFG_EN); 506 AR_Q_RDYTIMECFG_EN);
510 } 507 }
511 508
512 REGWRITE_BUFFER_FLUSH(ah);
513
514 REG_WRITE(ah, AR_DCHNTIME(q), 509 REG_WRITE(ah, AR_DCHNTIME(q),
515 SM(qi->tqi_burstTime, AR_D_CHNTIME_DUR) | 510 SM(qi->tqi_burstTime, AR_D_CHNTIME_DUR) |
516 (qi->tqi_burstTime ? AR_D_CHNTIME_EN : 0)); 511 (qi->tqi_burstTime ? AR_D_CHNTIME_EN : 0));
@@ -530,7 +525,6 @@ bool ath9k_hw_resettxqueue(struct ath_hw *ah, u32 q)
530 } 525 }
531 526
532 REGWRITE_BUFFER_FLUSH(ah); 527 REGWRITE_BUFFER_FLUSH(ah);
533 DISABLE_REGWRITE_BUFFER(ah);
534 528
535 if (qi->tqi_qflags & TXQ_FLAG_FRAG_BURST_BACKOFF_ENABLE) { 529 if (qi->tqi_qflags & TXQ_FLAG_FRAG_BURST_BACKOFF_ENABLE) {
536 REG_WRITE(ah, AR_DMISC(q), 530 REG_WRITE(ah, AR_DMISC(q),
@@ -553,7 +547,6 @@ bool ath9k_hw_resettxqueue(struct ath_hw *ah, u32 q)
553 | AR_D_MISC_POST_FR_BKOFF_DIS); 547 | AR_D_MISC_POST_FR_BKOFF_DIS);
554 548
555 REGWRITE_BUFFER_FLUSH(ah); 549 REGWRITE_BUFFER_FLUSH(ah);
556 DISABLE_REGWRITE_BUFFER(ah);
557 550
558 /* 551 /*
559 * cwmin and cwmax should be 0 for beacon queue 552 * cwmin and cwmax should be 0 for beacon queue
@@ -585,7 +578,6 @@ bool ath9k_hw_resettxqueue(struct ath_hw *ah, u32 q)
585 AR_D_MISC_ARB_LOCKOUT_CNTRL_S)); 578 AR_D_MISC_ARB_LOCKOUT_CNTRL_S));
586 579
587 REGWRITE_BUFFER_FLUSH(ah); 580 REGWRITE_BUFFER_FLUSH(ah);
588 DISABLE_REGWRITE_BUFFER(ah);
589 581
590 break; 582 break;
591 case ATH9K_TX_QUEUE_PSPOLL: 583 case ATH9K_TX_QUEUE_PSPOLL:
@@ -714,6 +706,8 @@ int ath9k_hw_rxprocdesc(struct ath_hw *ah, struct ath_desc *ds,
714 else if ((ads.ds_rxstatus8 & AR_MichaelErr) && 706 else if ((ads.ds_rxstatus8 & AR_MichaelErr) &&
715 rs->rs_keyix != ATH9K_RXKEYIX_INVALID) 707 rs->rs_keyix != ATH9K_RXKEYIX_INVALID)
716 rs->rs_status |= ATH9K_RXERR_MIC; 708 rs->rs_status |= ATH9K_RXERR_MIC;
709 else if (ads.ds_rxstatus8 & AR_KeyMiss)
710 rs->rs_status |= ATH9K_RXERR_DECRYPT;
717 } 711 }
718 712
719 return 0; 713 return 0;
diff --git a/drivers/net/wireless/ath/ath9k/mac.h b/drivers/net/wireless/ath/ath9k/mac.h
index 2633896d3998..7c1a34d64f6d 100644
--- a/drivers/net/wireless/ath/ath9k/mac.h
+++ b/drivers/net/wireless/ath/ath9k/mac.h
@@ -660,17 +660,6 @@ struct ath9k_11n_rate_series {
660 u32 RateFlags; 660 u32 RateFlags;
661}; 661};
662 662
663struct ath9k_keyval {
664 u8 kv_type;
665 u8 kv_pad;
666 u16 kv_len;
667 u8 kv_val[16]; /* TK */
668 u8 kv_mic[8]; /* Michael MIC key */
669 u8 kv_txmic[8]; /* Michael MIC TX key (used only if the hardware
670 * supports both MIC keys in the same key cache entry;
671 * in that case, kv_mic is the RX key) */
672};
673
674enum ath9k_key_type { 663enum ath9k_key_type {
675 ATH9K_KEY_TYPE_CLEAR, 664 ATH9K_KEY_TYPE_CLEAR,
676 ATH9K_KEY_TYPE_WEP, 665 ATH9K_KEY_TYPE_WEP,
@@ -678,16 +667,6 @@ enum ath9k_key_type {
678 ATH9K_KEY_TYPE_TKIP, 667 ATH9K_KEY_TYPE_TKIP,
679}; 668};
680 669
681enum ath9k_cipher {
682 ATH9K_CIPHER_WEP = 0,
683 ATH9K_CIPHER_AES_OCB = 1,
684 ATH9K_CIPHER_AES_CCM = 2,
685 ATH9K_CIPHER_CKIP = 3,
686 ATH9K_CIPHER_TKIP = 4,
687 ATH9K_CIPHER_CLR = 5,
688 ATH9K_CIPHER_MIC = 127
689};
690
691struct ath_hw; 670struct ath_hw;
692struct ath9k_channel; 671struct ath9k_channel;
693 672
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index 1165f909ef04..3ff0e476c2b3 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -18,36 +18,6 @@
18#include "ath9k.h" 18#include "ath9k.h"
19#include "btcoex.h" 19#include "btcoex.h"
20 20
21static void ath_cache_conf_rate(struct ath_softc *sc,
22 struct ieee80211_conf *conf)
23{
24 switch (conf->channel->band) {
25 case IEEE80211_BAND_2GHZ:
26 if (conf_is_ht20(conf))
27 sc->cur_rate_mode = ATH9K_MODE_11NG_HT20;
28 else if (conf_is_ht40_minus(conf))
29 sc->cur_rate_mode = ATH9K_MODE_11NG_HT40MINUS;
30 else if (conf_is_ht40_plus(conf))
31 sc->cur_rate_mode = ATH9K_MODE_11NG_HT40PLUS;
32 else
33 sc->cur_rate_mode = ATH9K_MODE_11G;
34 break;
35 case IEEE80211_BAND_5GHZ:
36 if (conf_is_ht20(conf))
37 sc->cur_rate_mode = ATH9K_MODE_11NA_HT20;
38 else if (conf_is_ht40_minus(conf))
39 sc->cur_rate_mode = ATH9K_MODE_11NA_HT40MINUS;
40 else if (conf_is_ht40_plus(conf))
41 sc->cur_rate_mode = ATH9K_MODE_11NA_HT40PLUS;
42 else
43 sc->cur_rate_mode = ATH9K_MODE_11A;
44 break;
45 default:
46 BUG_ON(1);
47 break;
48 }
49}
50
51static void ath_update_txpow(struct ath_softc *sc) 21static void ath_update_txpow(struct ath_softc *sc)
52{ 22{
53 struct ath_hw *ah = sc->sc_ah; 23 struct ath_hw *ah = sc->sc_ah;
@@ -121,6 +91,7 @@ bool ath9k_setpower(struct ath_softc *sc, enum ath9k_power_mode mode)
121 91
122void ath9k_ps_wakeup(struct ath_softc *sc) 92void ath9k_ps_wakeup(struct ath_softc *sc)
123{ 93{
94 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
124 unsigned long flags; 95 unsigned long flags;
125 96
126 spin_lock_irqsave(&sc->sc_pm_lock, flags); 97 spin_lock_irqsave(&sc->sc_pm_lock, flags);
@@ -129,18 +100,33 @@ void ath9k_ps_wakeup(struct ath_softc *sc)
129 100
130 ath9k_hw_setpower(sc->sc_ah, ATH9K_PM_AWAKE); 101 ath9k_hw_setpower(sc->sc_ah, ATH9K_PM_AWAKE);
131 102
103 /*
104 * While the hardware is asleep, the cycle counters contain no
105 * useful data. Better clear them now so that they don't mess up
106 * survey data results.
107 */
108 spin_lock(&common->cc_lock);
109 ath_hw_cycle_counters_update(common);
110 memset(&common->cc_survey, 0, sizeof(common->cc_survey));
111 spin_unlock(&common->cc_lock);
112
132 unlock: 113 unlock:
133 spin_unlock_irqrestore(&sc->sc_pm_lock, flags); 114 spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
134} 115}
135 116
136void ath9k_ps_restore(struct ath_softc *sc) 117void ath9k_ps_restore(struct ath_softc *sc)
137{ 118{
119 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
138 unsigned long flags; 120 unsigned long flags;
139 121
140 spin_lock_irqsave(&sc->sc_pm_lock, flags); 122 spin_lock_irqsave(&sc->sc_pm_lock, flags);
141 if (--sc->ps_usecount != 0) 123 if (--sc->ps_usecount != 0)
142 goto unlock; 124 goto unlock;
143 125
126 spin_lock(&common->cc_lock);
127 ath_hw_cycle_counters_update(common);
128 spin_unlock(&common->cc_lock);
129
144 if (sc->ps_idle) 130 if (sc->ps_idle)
145 ath9k_hw_setpower(sc->sc_ah, ATH9K_PM_FULL_SLEEP); 131 ath9k_hw_setpower(sc->sc_ah, ATH9K_PM_FULL_SLEEP);
146 else if (sc->ps_enabled && 132 else if (sc->ps_enabled &&
@@ -175,6 +161,45 @@ static void ath_start_ani(struct ath_common *common)
175 msecs_to_jiffies((u32)ah->config.ani_poll_interval)); 161 msecs_to_jiffies((u32)ah->config.ani_poll_interval));
176} 162}
177 163
164static void ath_update_survey_nf(struct ath_softc *sc, int channel)
165{
166 struct ath_hw *ah = sc->sc_ah;
167 struct ath9k_channel *chan = &ah->channels[channel];
168 struct survey_info *survey = &sc->survey[channel];
169
170 if (chan->noisefloor) {
171 survey->filled |= SURVEY_INFO_NOISE_DBM;
172 survey->noise = chan->noisefloor;
173 }
174}
175
176static void ath_update_survey_stats(struct ath_softc *sc)
177{
178 struct ath_hw *ah = sc->sc_ah;
179 struct ath_common *common = ath9k_hw_common(ah);
180 int pos = ah->curchan - &ah->channels[0];
181 struct survey_info *survey = &sc->survey[pos];
182 struct ath_cycle_counters *cc = &common->cc_survey;
183 unsigned int div = common->clockrate * 1000;
184
185 if (ah->power_mode == ATH9K_PM_AWAKE)
186 ath_hw_cycle_counters_update(common);
187
188 if (cc->cycles > 0) {
189 survey->filled |= SURVEY_INFO_CHANNEL_TIME |
190 SURVEY_INFO_CHANNEL_TIME_BUSY |
191 SURVEY_INFO_CHANNEL_TIME_RX |
192 SURVEY_INFO_CHANNEL_TIME_TX;
193 survey->channel_time += cc->cycles / div;
194 survey->channel_time_busy += cc->rx_busy / div;
195 survey->channel_time_rx += cc->rx_frame / div;
196 survey->channel_time_tx += cc->tx_frame / div;
197 }
198 memset(cc, 0, sizeof(*cc));
199
200 ath_update_survey_nf(sc, pos);
201}
202
178/* 203/*
179 * Set/change channels. If the channel is really being changed, it's done 204 * Set/change channels. If the channel is really being changed, it's done
180 * by reseting the chip. To accomplish this we must first cleanup any pending 205 * by reseting the chip. To accomplish this we must first cleanup any pending
@@ -251,14 +276,13 @@ int ath_set_channel(struct ath_softc *sc, struct ieee80211_hw *hw,
251 goto ps_restore; 276 goto ps_restore;
252 } 277 }
253 278
254 ath_cache_conf_rate(sc, &hw->conf);
255 ath_update_txpow(sc); 279 ath_update_txpow(sc);
256 ath9k_hw_set_interrupts(ah, ah->imask); 280 ath9k_hw_set_interrupts(ah, ah->imask);
257 281
258 if (!(sc->sc_flags & (SC_OP_OFFCHANNEL | SC_OP_SCANNING))) { 282 if (!(sc->sc_flags & (SC_OP_OFFCHANNEL))) {
259 ath_start_ani(common);
260 ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work, 0);
261 ath_beacon_config(sc, NULL); 283 ath_beacon_config(sc, NULL);
284 ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work, 0);
285 ath_start_ani(common);
262 } 286 }
263 287
264 ps_restore: 288 ps_restore:
@@ -270,6 +294,7 @@ static void ath_paprd_activate(struct ath_softc *sc)
270{ 294{
271 struct ath_hw *ah = sc->sc_ah; 295 struct ath_hw *ah = sc->sc_ah;
272 struct ath9k_hw_cal_data *caldata = ah->caldata; 296 struct ath9k_hw_cal_data *caldata = ah->caldata;
297 struct ath_common *common = ath9k_hw_common(ah);
273 int chain; 298 int chain;
274 299
275 if (!caldata || !caldata->paprd_done) 300 if (!caldata || !caldata->paprd_done)
@@ -278,7 +303,7 @@ static void ath_paprd_activate(struct ath_softc *sc)
278 ath9k_ps_wakeup(sc); 303 ath9k_ps_wakeup(sc);
279 ar9003_paprd_enable(ah, false); 304 ar9003_paprd_enable(ah, false);
280 for (chain = 0; chain < AR9300_MAX_CHAINS; chain++) { 305 for (chain = 0; chain < AR9300_MAX_CHAINS; chain++) {
281 if (!(ah->caps.tx_chainmask & BIT(chain))) 306 if (!(common->tx_chainmask & BIT(chain)))
282 continue; 307 continue;
283 308
284 ar9003_paprd_populate_single_table(ah, caldata, chain); 309 ar9003_paprd_populate_single_table(ah, caldata, chain);
@@ -300,6 +325,7 @@ void ath_paprd_calibrate(struct work_struct *work)
300 struct ieee80211_supported_band *sband = &sc->sbands[band]; 325 struct ieee80211_supported_band *sband = &sc->sbands[band];
301 struct ath_tx_control txctl; 326 struct ath_tx_control txctl;
302 struct ath9k_hw_cal_data *caldata = ah->caldata; 327 struct ath9k_hw_cal_data *caldata = ah->caldata;
328 struct ath_common *common = ath9k_hw_common(ah);
303 int qnum, ftype; 329 int qnum, ftype;
304 int chain_ok = 0; 330 int chain_ok = 0;
305 int chain; 331 int chain;
@@ -333,7 +359,7 @@ void ath_paprd_calibrate(struct work_struct *work)
333 ath9k_ps_wakeup(sc); 359 ath9k_ps_wakeup(sc);
334 ar9003_paprd_init_table(ah); 360 ar9003_paprd_init_table(ah);
335 for (chain = 0; chain < AR9300_MAX_CHAINS; chain++) { 361 for (chain = 0; chain < AR9300_MAX_CHAINS; chain++) {
336 if (!(ah->caps.tx_chainmask & BIT(chain))) 362 if (!(common->tx_chainmask & BIT(chain)))
337 continue; 363 continue;
338 364
339 chain_ok = 0; 365 chain_ok = 0;
@@ -397,6 +423,7 @@ void ath_ani_calibrate(unsigned long data)
397 bool aniflag = false; 423 bool aniflag = false;
398 unsigned int timestamp = jiffies_to_msecs(jiffies); 424 unsigned int timestamp = jiffies_to_msecs(jiffies);
399 u32 cal_interval, short_cal_interval, long_cal_interval; 425 u32 cal_interval, short_cal_interval, long_cal_interval;
426 unsigned long flags;
400 427
401 if (ah->caldata && ah->caldata->nfcal_interference) 428 if (ah->caldata && ah->caldata->nfcal_interference)
402 long_cal_interval = ATH_LONG_CALINTERVAL_INT; 429 long_cal_interval = ATH_LONG_CALINTERVAL_INT;
@@ -447,8 +474,12 @@ void ath_ani_calibrate(unsigned long data)
447 /* Skip all processing if there's nothing to do. */ 474 /* Skip all processing if there's nothing to do. */
448 if (longcal || shortcal || aniflag) { 475 if (longcal || shortcal || aniflag) {
449 /* Call ANI routine if necessary */ 476 /* Call ANI routine if necessary */
450 if (aniflag) 477 if (aniflag) {
478 spin_lock_irqsave(&common->cc_lock, flags);
451 ath9k_hw_ani_monitor(ah, ah->curchan); 479 ath9k_hw_ani_monitor(ah, ah->curchan);
480 ath_update_survey_stats(sc);
481 spin_unlock_irqrestore(&common->cc_lock, flags);
482 }
452 483
453 /* Perform calibration if necessary */ 484 /* Perform calibration if necessary */
454 if (longcal || shortcal) { 485 if (longcal || shortcal) {
@@ -457,16 +488,6 @@ void ath_ani_calibrate(unsigned long data)
457 ah->curchan, 488 ah->curchan,
458 common->rx_chainmask, 489 common->rx_chainmask,
459 longcal); 490 longcal);
460
461 if (longcal)
462 common->ani.noise_floor = ath9k_hw_getchan_noise(ah,
463 ah->curchan);
464
465 ath_print(common, ATH_DBG_ANI,
466 " calibrate chan %u/%x nf: %d\n",
467 ah->curchan->channel,
468 ah->curchan->channelFlags,
469 common->ani.noise_floor);
470 } 491 }
471 } 492 }
472 493
@@ -643,6 +664,7 @@ irqreturn_t ath_isr(int irq, void *dev)
643 664
644 struct ath_softc *sc = dev; 665 struct ath_softc *sc = dev;
645 struct ath_hw *ah = sc->sc_ah; 666 struct ath_hw *ah = sc->sc_ah;
667 struct ath_common *common = ath9k_hw_common(ah);
646 enum ath9k_int status; 668 enum ath9k_int status;
647 bool sched = false; 669 bool sched = false;
648 670
@@ -692,7 +714,12 @@ irqreturn_t ath_isr(int irq, void *dev)
692 714
693 if ((ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) && 715 if ((ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) &&
694 (status & ATH9K_INT_BB_WATCHDOG)) { 716 (status & ATH9K_INT_BB_WATCHDOG)) {
717
718 spin_lock(&common->cc_lock);
719 ath_hw_cycle_counters_update(common);
695 ar9003_hw_bb_watchdog_dbg_info(ah); 720 ar9003_hw_bb_watchdog_dbg_info(ah);
721 spin_unlock(&common->cc_lock);
722
696 goto chip_reset; 723 goto chip_reset;
697 } 724 }
698 725
@@ -721,7 +748,9 @@ irqreturn_t ath_isr(int irq, void *dev)
721 * it will clear whatever condition caused 748 * it will clear whatever condition caused
722 * the interrupt. 749 * the interrupt.
723 */ 750 */
724 ath9k_hw_procmibevent(ah); 751 spin_lock(&common->cc_lock);
752 ath9k_hw_proc_mib_event(ah);
753 spin_unlock(&common->cc_lock);
725 ath9k_hw_set_interrupts(ah, ah->imask); 754 ath9k_hw_set_interrupts(ah, ah->imask);
726 } 755 }
727 756
@@ -953,11 +982,9 @@ int ath_reset(struct ath_softc *sc, bool retry_tx)
953 * that changes the channel so update any state that 982 * that changes the channel so update any state that
954 * might change as a result. 983 * might change as a result.
955 */ 984 */
956 ath_cache_conf_rate(sc, &hw->conf);
957
958 ath_update_txpow(sc); 985 ath_update_txpow(sc);
959 986
960 if (sc->sc_flags & SC_OP_BEACONS) 987 if ((sc->sc_flags & SC_OP_BEACONS) || !(sc->sc_flags & (SC_OP_OFFCHANNEL)))
961 ath_beacon_config(sc, NULL); /* restart beacons */ 988 ath_beacon_config(sc, NULL); /* restart beacons */
962 989
963 ath9k_hw_set_interrupts(ah, ah->imask); 990 ath9k_hw_set_interrupts(ah, ah->imask);
@@ -1156,14 +1183,11 @@ static int ath9k_start(struct ieee80211_hw *hw)
1156 else 1183 else
1157 ah->imask |= ATH9K_INT_RX; 1184 ah->imask |= ATH9K_INT_RX;
1158 1185
1159 if (ah->caps.hw_caps & ATH9K_HW_CAP_GTT) 1186 ah->imask |= ATH9K_INT_GTT;
1160 ah->imask |= ATH9K_INT_GTT;
1161 1187
1162 if (ah->caps.hw_caps & ATH9K_HW_CAP_HT) 1188 if (ah->caps.hw_caps & ATH9K_HW_CAP_HT)
1163 ah->imask |= ATH9K_INT_CST; 1189 ah->imask |= ATH9K_INT_CST;
1164 1190
1165 ath_cache_conf_rate(sc, &hw->conf);
1166
1167 sc->sc_flags &= ~SC_OP_INVALID; 1191 sc->sc_flags &= ~SC_OP_INVALID;
1168 1192
1169 /* Disable BMISS interrupt when we're not associated */ 1193 /* Disable BMISS interrupt when we're not associated */
@@ -1379,16 +1403,13 @@ static int ath9k_add_interface(struct ieee80211_hw *hw,
1379 1403
1380 mutex_lock(&sc->mutex); 1404 mutex_lock(&sc->mutex);
1381 1405
1382 if (!(ah->caps.hw_caps & ATH9K_HW_CAP_BSSIDMASK) &&
1383 sc->nvifs > 0) {
1384 ret = -ENOBUFS;
1385 goto out;
1386 }
1387
1388 switch (vif->type) { 1406 switch (vif->type) {
1389 case NL80211_IFTYPE_STATION: 1407 case NL80211_IFTYPE_STATION:
1390 ic_opmode = NL80211_IFTYPE_STATION; 1408 ic_opmode = NL80211_IFTYPE_STATION;
1391 break; 1409 break;
1410 case NL80211_IFTYPE_WDS:
1411 ic_opmode = NL80211_IFTYPE_WDS;
1412 break;
1392 case NL80211_IFTYPE_ADHOC: 1413 case NL80211_IFTYPE_ADHOC:
1393 case NL80211_IFTYPE_AP: 1414 case NL80211_IFTYPE_AP:
1394 case NL80211_IFTYPE_MESH_POINT: 1415 case NL80211_IFTYPE_MESH_POINT:
@@ -1414,8 +1435,7 @@ static int ath9k_add_interface(struct ieee80211_hw *hw,
1414 1435
1415 sc->nvifs++; 1436 sc->nvifs++;
1416 1437
1417 if (ah->caps.hw_caps & ATH9K_HW_CAP_BSSIDMASK) 1438 ath9k_set_bssid_mask(hw, vif);
1418 ath9k_set_bssid_mask(hw);
1419 1439
1420 if (sc->nvifs > 1) 1440 if (sc->nvifs > 1)
1421 goto out; /* skip global settings for secondary vif */ 1441 goto out; /* skip global settings for secondary vif */
@@ -1497,7 +1517,7 @@ static void ath9k_remove_interface(struct ieee80211_hw *hw,
1497 mutex_unlock(&sc->mutex); 1517 mutex_unlock(&sc->mutex);
1498} 1518}
1499 1519
1500void ath9k_enable_ps(struct ath_softc *sc) 1520static void ath9k_enable_ps(struct ath_softc *sc)
1501{ 1521{
1502 struct ath_hw *ah = sc->sc_ah; 1522 struct ath_hw *ah = sc->sc_ah;
1503 1523
@@ -1511,13 +1531,33 @@ void ath9k_enable_ps(struct ath_softc *sc)
1511 } 1531 }
1512} 1532}
1513 1533
1534static void ath9k_disable_ps(struct ath_softc *sc)
1535{
1536 struct ath_hw *ah = sc->sc_ah;
1537
1538 sc->ps_enabled = false;
1539 ath9k_hw_setpower(ah, ATH9K_PM_AWAKE);
1540 if (!(ah->caps.hw_caps & ATH9K_HW_CAP_AUTOSLEEP)) {
1541 ath9k_hw_setrxabort(ah, 0);
1542 sc->ps_flags &= ~(PS_WAIT_FOR_BEACON |
1543 PS_WAIT_FOR_CAB |
1544 PS_WAIT_FOR_PSPOLL_DATA |
1545 PS_WAIT_FOR_TX_ACK);
1546 if (ah->imask & ATH9K_INT_TIM_TIMER) {
1547 ah->imask &= ~ATH9K_INT_TIM_TIMER;
1548 ath9k_hw_set_interrupts(ah, ah->imask);
1549 }
1550 }
1551
1552}
1553
1514static int ath9k_config(struct ieee80211_hw *hw, u32 changed) 1554static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
1515{ 1555{
1516 struct ath_wiphy *aphy = hw->priv; 1556 struct ath_wiphy *aphy = hw->priv;
1517 struct ath_softc *sc = aphy->sc; 1557 struct ath_softc *sc = aphy->sc;
1518 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
1519 struct ieee80211_conf *conf = &hw->conf;
1520 struct ath_hw *ah = sc->sc_ah; 1558 struct ath_hw *ah = sc->sc_ah;
1559 struct ath_common *common = ath9k_hw_common(ah);
1560 struct ieee80211_conf *conf = &hw->conf;
1521 bool disable_radio; 1561 bool disable_radio;
1522 1562
1523 mutex_lock(&sc->mutex); 1563 mutex_lock(&sc->mutex);
@@ -1562,35 +1602,13 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
1562 * IEEE80211_CONF_CHANGE_PS is only passed by mac80211 for STA mode. 1602 * IEEE80211_CONF_CHANGE_PS is only passed by mac80211 for STA mode.
1563 */ 1603 */
1564 if (changed & IEEE80211_CONF_CHANGE_PS) { 1604 if (changed & IEEE80211_CONF_CHANGE_PS) {
1565 if (conf->flags & IEEE80211_CONF_PS) { 1605 unsigned long flags;
1566 sc->ps_flags |= PS_ENABLED; 1606 spin_lock_irqsave(&sc->sc_pm_lock, flags);
1567 /* 1607 if (conf->flags & IEEE80211_CONF_PS)
1568 * At this point we know hardware has received an ACK 1608 ath9k_enable_ps(sc);
1569 * of a previously sent null data frame. 1609 else
1570 */ 1610 ath9k_disable_ps(sc);
1571 if ((sc->ps_flags & PS_NULLFUNC_COMPLETED)) { 1611 spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
1572 sc->ps_flags &= ~PS_NULLFUNC_COMPLETED;
1573 ath9k_enable_ps(sc);
1574 }
1575 } else {
1576 sc->ps_enabled = false;
1577 sc->ps_flags &= ~(PS_ENABLED |
1578 PS_NULLFUNC_COMPLETED);
1579 ath9k_setpower(sc, ATH9K_PM_AWAKE);
1580 if (!(ah->caps.hw_caps &
1581 ATH9K_HW_CAP_AUTOSLEEP)) {
1582 ath9k_hw_setrxabort(sc->sc_ah, 0);
1583 sc->ps_flags &= ~(PS_WAIT_FOR_BEACON |
1584 PS_WAIT_FOR_CAB |
1585 PS_WAIT_FOR_PSPOLL_DATA |
1586 PS_WAIT_FOR_TX_ACK);
1587 if (ah->imask & ATH9K_INT_TIM_TIMER) {
1588 ah->imask &= ~ATH9K_INT_TIM_TIMER;
1589 ath9k_hw_set_interrupts(sc->sc_ah,
1590 ah->imask);
1591 }
1592 }
1593 }
1594 } 1612 }
1595 1613
1596 if (changed & IEEE80211_CONF_CHANGE_MONITOR) { 1614 if (changed & IEEE80211_CONF_CHANGE_MONITOR) {
@@ -1604,6 +1622,11 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
1604 if (changed & IEEE80211_CONF_CHANGE_CHANNEL) { 1622 if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
1605 struct ieee80211_channel *curchan = hw->conf.channel; 1623 struct ieee80211_channel *curchan = hw->conf.channel;
1606 int pos = curchan->hw_value; 1624 int pos = curchan->hw_value;
1625 int old_pos = -1;
1626 unsigned long flags;
1627
1628 if (ah->curchan)
1629 old_pos = ah->curchan - &ah->channels[0];
1607 1630
1608 aphy->chan_idx = pos; 1631 aphy->chan_idx = pos;
1609 aphy->chan_is_ht = conf_is_ht(conf); 1632 aphy->chan_is_ht = conf_is_ht(conf);
@@ -1631,12 +1654,45 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
1631 1654
1632 ath_update_chainmask(sc, conf_is_ht(conf)); 1655 ath_update_chainmask(sc, conf_is_ht(conf));
1633 1656
1657 /* update survey stats for the old channel before switching */
1658 spin_lock_irqsave(&common->cc_lock, flags);
1659 ath_update_survey_stats(sc);
1660 spin_unlock_irqrestore(&common->cc_lock, flags);
1661
1662 /*
1663 * If the operating channel changes, change the survey in-use flags
1664 * along with it.
1665 * Reset the survey data for the new channel, unless we're switching
1666 * back to the operating channel from an off-channel operation.
1667 */
1668 if (!(hw->conf.flags & IEEE80211_CONF_OFFCHANNEL) &&
1669 sc->cur_survey != &sc->survey[pos]) {
1670
1671 if (sc->cur_survey)
1672 sc->cur_survey->filled &= ~SURVEY_INFO_IN_USE;
1673
1674 sc->cur_survey = &sc->survey[pos];
1675
1676 memset(sc->cur_survey, 0, sizeof(struct survey_info));
1677 sc->cur_survey->filled |= SURVEY_INFO_IN_USE;
1678 } else if (!(sc->survey[pos].filled & SURVEY_INFO_IN_USE)) {
1679 memset(&sc->survey[pos], 0, sizeof(struct survey_info));
1680 }
1681
1634 if (ath_set_channel(sc, hw, &sc->sc_ah->channels[pos]) < 0) { 1682 if (ath_set_channel(sc, hw, &sc->sc_ah->channels[pos]) < 0) {
1635 ath_print(common, ATH_DBG_FATAL, 1683 ath_print(common, ATH_DBG_FATAL,
1636 "Unable to set channel\n"); 1684 "Unable to set channel\n");
1637 mutex_unlock(&sc->mutex); 1685 mutex_unlock(&sc->mutex);
1638 return -EINVAL; 1686 return -EINVAL;
1639 } 1687 }
1688
1689 /*
1690 * The most recent snapshot of channel->noisefloor for the old
1691 * channel is only available after the hardware reset. Copy it to
1692 * the survey stats now.
1693 */
1694 if (old_pos >= 0)
1695 ath_update_survey_nf(sc, old_pos);
1640 } 1696 }
1641 1697
1642skip_chan_change: 1698skip_chan_change:
@@ -1667,6 +1723,7 @@ skip_chan_change:
1667 FIF_PSPOLL | \ 1723 FIF_PSPOLL | \
1668 FIF_OTHER_BSS | \ 1724 FIF_OTHER_BSS | \
1669 FIF_BCN_PRBRESP_PROMISC | \ 1725 FIF_BCN_PRBRESP_PROMISC | \
1726 FIF_PROBE_REQ | \
1670 FIF_FCSFAIL) 1727 FIF_FCSFAIL)
1671 1728
1672/* FIXME: sc->sc_full_reset ? */ 1729/* FIXME: sc->sc_full_reset ? */
@@ -1777,7 +1834,7 @@ static int ath9k_set_key(struct ieee80211_hw *hw,
1777 1834
1778 switch (cmd) { 1835 switch (cmd) {
1779 case SET_KEY: 1836 case SET_KEY:
1780 ret = ath9k_cmn_key_config(common, vif, sta, key); 1837 ret = ath_key_config(common, vif, sta, key);
1781 if (ret >= 0) { 1838 if (ret >= 0) {
1782 key->hw_key_idx = ret; 1839 key->hw_key_idx = ret;
1783 /* push IV and Michael MIC generation to stack */ 1840 /* push IV and Michael MIC generation to stack */
@@ -1791,7 +1848,7 @@ static int ath9k_set_key(struct ieee80211_hw *hw,
1791 } 1848 }
1792 break; 1849 break;
1793 case DISABLE_KEY: 1850 case DISABLE_KEY:
1794 ath9k_cmn_key_delete(common, key); 1851 ath_key_delete(common, key);
1795 break; 1852 break;
1796 default: 1853 default:
1797 ret = -EINVAL; 1854 ret = -EINVAL;
@@ -1975,8 +2032,9 @@ static int ath9k_ampdu_action(struct ieee80211_hw *hw,
1975 break; 2032 break;
1976 case IEEE80211_AMPDU_TX_START: 2033 case IEEE80211_AMPDU_TX_START:
1977 ath9k_ps_wakeup(sc); 2034 ath9k_ps_wakeup(sc);
1978 ath_tx_aggr_start(sc, sta, tid, ssn); 2035 ret = ath_tx_aggr_start(sc, sta, tid, ssn);
1979 ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid); 2036 if (!ret)
2037 ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
1980 ath9k_ps_restore(sc); 2038 ath9k_ps_restore(sc);
1981 break; 2039 break;
1982 case IEEE80211_AMPDU_TX_STOP: 2040 case IEEE80211_AMPDU_TX_STOP:
@@ -2005,16 +2063,35 @@ static int ath9k_get_survey(struct ieee80211_hw *hw, int idx,
2005{ 2063{
2006 struct ath_wiphy *aphy = hw->priv; 2064 struct ath_wiphy *aphy = hw->priv;
2007 struct ath_softc *sc = aphy->sc; 2065 struct ath_softc *sc = aphy->sc;
2008 struct ath_hw *ah = sc->sc_ah; 2066 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
2009 struct ath_common *common = ath9k_hw_common(ah); 2067 struct ieee80211_supported_band *sband;
2010 struct ieee80211_conf *conf = &hw->conf; 2068 struct ieee80211_channel *chan;
2069 unsigned long flags;
2070 int pos;
2011 2071
2012 if (idx != 0) 2072 spin_lock_irqsave(&common->cc_lock, flags);
2073 if (idx == 0)
2074 ath_update_survey_stats(sc);
2075
2076 sband = hw->wiphy->bands[IEEE80211_BAND_2GHZ];
2077 if (sband && idx >= sband->n_channels) {
2078 idx -= sband->n_channels;
2079 sband = NULL;
2080 }
2081
2082 if (!sband)
2083 sband = hw->wiphy->bands[IEEE80211_BAND_5GHZ];
2084
2085 if (!sband || idx >= sband->n_channels) {
2086 spin_unlock_irqrestore(&common->cc_lock, flags);
2013 return -ENOENT; 2087 return -ENOENT;
2088 }
2014 2089
2015 survey->channel = conf->channel; 2090 chan = &sband->channels[idx];
2016 survey->filled = SURVEY_INFO_NOISE_DBM; 2091 pos = chan->hw_value;
2017 survey->noise = common->ani.noise_floor; 2092 memcpy(survey, &sc->survey[pos], sizeof(*survey));
2093 survey->channel = chan;
2094 spin_unlock_irqrestore(&common->cc_lock, flags);
2018 2095
2019 return 0; 2096 return 0;
2020} 2097}
@@ -2039,7 +2116,6 @@ static void ath9k_sw_scan_start(struct ieee80211_hw *hw)
2039 2116
2040 aphy->state = ATH_WIPHY_SCAN; 2117 aphy->state = ATH_WIPHY_SCAN;
2041 ath9k_wiphy_pause_all_forced(sc, aphy); 2118 ath9k_wiphy_pause_all_forced(sc, aphy);
2042 sc->sc_flags |= SC_OP_SCANNING;
2043 mutex_unlock(&sc->mutex); 2119 mutex_unlock(&sc->mutex);
2044} 2120}
2045 2121
@@ -2054,7 +2130,6 @@ static void ath9k_sw_scan_complete(struct ieee80211_hw *hw)
2054 2130
2055 mutex_lock(&sc->mutex); 2131 mutex_lock(&sc->mutex);
2056 aphy->state = ATH_WIPHY_ACTIVE; 2132 aphy->state = ATH_WIPHY_ACTIVE;
2057 sc->sc_flags &= ~SC_OP_SCANNING;
2058 mutex_unlock(&sc->mutex); 2133 mutex_unlock(&sc->mutex);
2059} 2134}
2060 2135
diff --git a/drivers/net/wireless/ath/ath9k/phy.h b/drivers/net/wireless/ath/ath9k/phy.h
index e724c2c1ae2a..17969af842f6 100644
--- a/drivers/net/wireless/ath/ath9k/phy.h
+++ b/drivers/net/wireless/ath/ath9k/phy.h
@@ -45,9 +45,6 @@
45 } \ 45 } \
46 } while (0) 46 } while (0)
47 47
48#define ATH9K_IS_MIC_ENABLED(ah) \
49 ((ah)->sta_id1_defaults & AR_STA_ID1_CRPT_MIC_ENABLE)
50
51#define ANTSWAP_AB 0x0001 48#define ANTSWAP_AB 0x0001
52#define REDUCE_CHAIN_0 0x00000050 49#define REDUCE_CHAIN_0 0x00000050
53#define REDUCE_CHAIN_1 0x00000051 50#define REDUCE_CHAIN_1 0x00000051
diff --git a/drivers/net/wireless/ath/ath9k/rc.c b/drivers/net/wireless/ath/ath9k/rc.c
index e49be733d546..0cee90cf8dc9 100644
--- a/drivers/net/wireless/ath/ath9k/rc.c
+++ b/drivers/net/wireless/ath/ath9k/rc.c
@@ -302,7 +302,7 @@ static const struct ath_rate_table ar5416_11ng_ratetable = {
302 [64] = { RC_INVALID, WLAN_RC_PHY_HT_40_TS, 243000, 302 [64] = { RC_INVALID, WLAN_RC_PHY_HT_40_TS, 243000,
303 205100, 20, 20, 8, 64, 65, 65 }, /* 243 Mb */ 303 205100, 20, 20, 8, 64, 65, 65 }, /* 243 Mb */
304 [65] = { RC_INVALID, WLAN_RC_PHY_HT_40_TS_HGI, 270000, 304 [65] = { RC_INVALID, WLAN_RC_PHY_HT_40_TS_HGI, 270000,
305 224700, 20, 20, 8, 64, 65, 65 }, /* 170 Mb */ 305 224700, 20, 20, 8, 64, 65, 65 }, /* 270 Mb */
306 [66] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_TS, 324000, 306 [66] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_TS, 324000,
307 263100, 21, 21, 8, 66, 67, 67 }, /* 324 Mb */ 307 263100, 21, 21, 8, 66, 67, 67 }, /* 324 Mb */
308 [67] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_TS_HGI, 360000, 308 [67] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_TS_HGI, 360000,
@@ -378,17 +378,6 @@ static const struct ath_rate_table ar5416_11g_ratetable = {
378 0, /* Phy rates allowed initially */ 378 0, /* Phy rates allowed initially */
379}; 379};
380 380
381static const struct ath_rate_table *hw_rate_table[ATH9K_MODE_MAX] = {
382 [ATH9K_MODE_11A] = &ar5416_11a_ratetable,
383 [ATH9K_MODE_11G] = &ar5416_11g_ratetable,
384 [ATH9K_MODE_11NA_HT20] = &ar5416_11na_ratetable,
385 [ATH9K_MODE_11NG_HT20] = &ar5416_11ng_ratetable,
386 [ATH9K_MODE_11NA_HT40PLUS] = &ar5416_11na_ratetable,
387 [ATH9K_MODE_11NA_HT40MINUS] = &ar5416_11na_ratetable,
388 [ATH9K_MODE_11NG_HT40PLUS] = &ar5416_11ng_ratetable,
389 [ATH9K_MODE_11NG_HT40MINUS] = &ar5416_11ng_ratetable,
390};
391
392static int ath_rc_get_rateindex(const struct ath_rate_table *rate_table, 381static int ath_rc_get_rateindex(const struct ath_rate_table *rate_table,
393 struct ieee80211_tx_rate *rate); 382 struct ieee80211_tx_rate *rate);
394 383
@@ -791,7 +780,7 @@ static void ath_get_rate(void *priv, struct ieee80211_sta *sta, void *priv_sta,
791 */ 780 */
792 try_per_rate = 4; 781 try_per_rate = 4;
793 782
794 rate_table = sc->cur_rate_table; 783 rate_table = ath_rc_priv->rate_table;
795 rix = ath_rc_get_highest_rix(sc, ath_rc_priv, rate_table, &is_probe); 784 rix = ath_rc_get_highest_rix(sc, ath_rc_priv, rate_table, &is_probe);
796 785
797 /* 786 /*
@@ -1026,6 +1015,16 @@ static bool ath_rc_update_per(struct ath_softc *sc,
1026 return state_change; 1015 return state_change;
1027} 1016}
1028 1017
1018static void ath_debug_stat_retries(struct ath_rate_priv *rc, int rix,
1019 int xretries, int retries, u8 per)
1020{
1021 struct ath_rc_stats *stats = &rc->rcstats[rix];
1022
1023 stats->xretries += xretries;
1024 stats->retries += retries;
1025 stats->per = per;
1026}
1027
1029/* Update PER, RSSI and whatever else that the code thinks it is doing. 1028/* Update PER, RSSI and whatever else that the code thinks it is doing.
1030 If you can make sense of all this, you really need to go out more. */ 1029 If you can make sense of all this, you really need to go out more. */
1031 1030
@@ -1038,7 +1037,7 @@ static void ath_rc_update_ht(struct ath_softc *sc,
1038 int rate; 1037 int rate;
1039 u8 last_per; 1038 u8 last_per;
1040 bool state_change = false; 1039 bool state_change = false;
1041 const struct ath_rate_table *rate_table = sc->cur_rate_table; 1040 const struct ath_rate_table *rate_table = ath_rc_priv->rate_table;
1042 int size = ath_rc_priv->rate_table_size; 1041 int size = ath_rc_priv->rate_table_size;
1043 1042
1044 if ((tx_rate < 0) || (tx_rate > rate_table->rate_cnt)) 1043 if ((tx_rate < 0) || (tx_rate > rate_table->rate_cnt))
@@ -1098,7 +1097,7 @@ static void ath_rc_update_ht(struct ath_softc *sc,
1098 ath_rc_priv->per_down_time = now_msec; 1097 ath_rc_priv->per_down_time = now_msec;
1099 } 1098 }
1100 1099
1101 ath_debug_stat_retries(sc, tx_rate, xretries, retries, 1100 ath_debug_stat_retries(ath_rc_priv, tx_rate, xretries, retries,
1102 ath_rc_priv->per[tx_rate]); 1101 ath_rc_priv->per[tx_rate]);
1103 1102
1104} 1103}
@@ -1140,7 +1139,7 @@ static void ath_rc_tx_status(struct ath_softc *sc,
1140 u8 flags; 1139 u8 flags;
1141 u32 i = 0, rix; 1140 u32 i = 0, rix;
1142 1141
1143 rate_table = sc->cur_rate_table; 1142 rate_table = ath_rc_priv->rate_table;
1144 1143
1145 /* 1144 /*
1146 * If the first rate is not the final index, there 1145 * If the first rate is not the final index, there
@@ -1190,39 +1189,23 @@ static void ath_rc_tx_status(struct ath_softc *sc,
1190static const 1189static const
1191struct ath_rate_table *ath_choose_rate_table(struct ath_softc *sc, 1190struct ath_rate_table *ath_choose_rate_table(struct ath_softc *sc,
1192 enum ieee80211_band band, 1191 enum ieee80211_band band,
1193 bool is_ht, 1192 bool is_ht)
1194 bool is_cw_40)
1195{ 1193{
1196 int mode = 0;
1197 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 1194 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
1198 1195
1199 switch(band) { 1196 switch(band) {
1200 case IEEE80211_BAND_2GHZ: 1197 case IEEE80211_BAND_2GHZ:
1201 mode = ATH9K_MODE_11G;
1202 if (is_ht) 1198 if (is_ht)
1203 mode = ATH9K_MODE_11NG_HT20; 1199 return &ar5416_11ng_ratetable;
1204 if (is_cw_40) 1200 return &ar5416_11g_ratetable;
1205 mode = ATH9K_MODE_11NG_HT40PLUS;
1206 break;
1207 case IEEE80211_BAND_5GHZ: 1201 case IEEE80211_BAND_5GHZ:
1208 mode = ATH9K_MODE_11A;
1209 if (is_ht) 1202 if (is_ht)
1210 mode = ATH9K_MODE_11NA_HT20; 1203 return &ar5416_11na_ratetable;
1211 if (is_cw_40) 1204 return &ar5416_11a_ratetable;
1212 mode = ATH9K_MODE_11NA_HT40PLUS;
1213 break;
1214 default: 1205 default:
1215 ath_print(common, ATH_DBG_CONFIG, "Invalid band\n"); 1206 ath_print(common, ATH_DBG_CONFIG, "Invalid band\n");
1216 return NULL; 1207 return NULL;
1217 } 1208 }
1218
1219 BUG_ON(mode >= ATH9K_MODE_MAX);
1220
1221 ath_print(common, ATH_DBG_CONFIG,
1222 "Choosing rate table for mode: %d\n", mode);
1223
1224 sc->cur_rate_mode = mode;
1225 return hw_rate_table[mode];
1226} 1209}
1227 1210
1228static void ath_rc_init(struct ath_softc *sc, 1211static void ath_rc_init(struct ath_softc *sc,
@@ -1293,7 +1276,7 @@ static void ath_rc_init(struct ath_softc *sc,
1293 ath_rc_priv->max_valid_rate = k; 1276 ath_rc_priv->max_valid_rate = k;
1294 ath_rc_sort_validrates(rate_table, ath_rc_priv); 1277 ath_rc_sort_validrates(rate_table, ath_rc_priv);
1295 ath_rc_priv->rate_max_phy = ath_rc_priv->valid_rate_index[k-4]; 1278 ath_rc_priv->rate_max_phy = ath_rc_priv->valid_rate_index[k-4];
1296 sc->cur_rate_table = rate_table; 1279 ath_rc_priv->rate_table = rate_table;
1297 1280
1298 ath_print(common, ATH_DBG_CONFIG, 1281 ath_print(common, ATH_DBG_CONFIG,
1299 "RC Initialized with capabilities: 0x%x\n", 1282 "RC Initialized with capabilities: 0x%x\n",
@@ -1320,10 +1303,35 @@ static u8 ath_rc_build_ht_caps(struct ath_softc *sc, struct ieee80211_sta *sta,
1320 return caps; 1303 return caps;
1321} 1304}
1322 1305
1306static bool ath_tx_aggr_check(struct ath_softc *sc, struct ath_node *an,
1307 u8 tidno)
1308{
1309 struct ath_atx_tid *txtid;
1310
1311 if (!(sc->sc_flags & SC_OP_TXAGGR))
1312 return false;
1313
1314 txtid = ATH_AN_2_TID(an, tidno);
1315
1316 if (!(txtid->state & (AGGR_ADDBA_COMPLETE | AGGR_ADDBA_PROGRESS)))
1317 return true;
1318 return false;
1319}
1320
1321
1323/***********************************/ 1322/***********************************/
1324/* mac80211 Rate Control callbacks */ 1323/* mac80211 Rate Control callbacks */
1325/***********************************/ 1324/***********************************/
1326 1325
1326static void ath_debug_stat_rc(struct ath_rate_priv *rc, int final_rate)
1327{
1328 struct ath_rc_stats *stats;
1329
1330 stats = &rc->rcstats[final_rate];
1331 stats->success++;
1332}
1333
1334
1327static void ath_tx_status(void *priv, struct ieee80211_supported_band *sband, 1335static void ath_tx_status(void *priv, struct ieee80211_supported_band *sband,
1328 struct ieee80211_sta *sta, void *priv_sta, 1336 struct ieee80211_sta *sta, void *priv_sta,
1329 struct sk_buff *skb) 1337 struct sk_buff *skb)
@@ -1359,6 +1367,12 @@ static void ath_tx_status(void *priv, struct ieee80211_supported_band *sband,
1359 if (tx_info->flags & IEEE80211_TX_STAT_TX_FILTERED) 1367 if (tx_info->flags & IEEE80211_TX_STAT_TX_FILTERED)
1360 return; 1368 return;
1361 1369
1370 if (!(tx_info->flags & IEEE80211_TX_STAT_AMPDU)) {
1371 tx_info->status.ampdu_ack_len =
1372 (tx_info->flags & IEEE80211_TX_STAT_ACK ? 1 : 0);
1373 tx_info->status.ampdu_len = 1;
1374 }
1375
1362 /* 1376 /*
1363 * If an underrun error is seen assume it as an excessive retry only 1377 * If an underrun error is seen assume it as an excessive retry only
1364 * if max frame trigger level has been reached (2 KB for singel stream, 1378 * if max frame trigger level has been reached (2 KB for singel stream,
@@ -1397,8 +1411,9 @@ static void ath_tx_status(void *priv, struct ieee80211_supported_band *sband,
1397 } 1411 }
1398 } 1412 }
1399 1413
1400 ath_debug_stat_rc(sc, ath_rc_get_rateindex(sc->cur_rate_table, 1414 ath_debug_stat_rc(ath_rc_priv,
1401 &tx_info->status.rates[final_ts_idx])); 1415 ath_rc_get_rateindex(ath_rc_priv->rate_table,
1416 &tx_info->status.rates[final_ts_idx]));
1402} 1417}
1403 1418
1404static void ath_rate_init(void *priv, struct ieee80211_supported_band *sband, 1419static void ath_rate_init(void *priv, struct ieee80211_supported_band *sband,
@@ -1438,14 +1453,8 @@ static void ath_rate_init(void *priv, struct ieee80211_supported_band *sband,
1438 1453
1439 /* Choose rate table first */ 1454 /* Choose rate table first */
1440 1455
1441 if ((sc->sc_ah->opmode == NL80211_IFTYPE_STATION) || 1456 rate_table = ath_choose_rate_table(sc, sband->band,
1442 (sc->sc_ah->opmode == NL80211_IFTYPE_MESH_POINT) || 1457 sta->ht_cap.ht_supported);
1443 (sc->sc_ah->opmode == NL80211_IFTYPE_ADHOC)) {
1444 rate_table = ath_choose_rate_table(sc, sband->band,
1445 sta->ht_cap.ht_supported, is_cw40);
1446 } else {
1447 rate_table = hw_rate_table[sc->cur_rate_mode];
1448 }
1449 1458
1450 ath_rc_priv->ht_cap = ath_rc_build_ht_caps(sc, sta, is_cw40, is_sgi); 1459 ath_rc_priv->ht_cap = ath_rc_build_ht_caps(sc, sta, is_cw40, is_sgi);
1451 ath_rc_init(sc, priv_sta, sband, sta, rate_table); 1460 ath_rc_init(sc, priv_sta, sband, sta, rate_table);
@@ -1485,8 +1494,7 @@ static void ath_rate_update(void *priv, struct ieee80211_supported_band *sband,
1485 1494
1486 if ((local_cw40 != oper_cw40) || (local_sgi != oper_sgi)) { 1495 if ((local_cw40 != oper_cw40) || (local_sgi != oper_sgi)) {
1487 rate_table = ath_choose_rate_table(sc, sband->band, 1496 rate_table = ath_choose_rate_table(sc, sband->band,
1488 sta->ht_cap.ht_supported, 1497 sta->ht_cap.ht_supported);
1489 oper_cw40);
1490 ath_rc_priv->ht_cap = ath_rc_build_ht_caps(sc, sta, 1498 ath_rc_priv->ht_cap = ath_rc_build_ht_caps(sc, sta,
1491 oper_cw40, oper_sgi); 1499 oper_cw40, oper_sgi);
1492 ath_rc_init(sc, priv_sta, sband, sta, rate_table); 1500 ath_rc_init(sc, priv_sta, sband, sta, rate_table);
@@ -1494,11 +1502,98 @@ static void ath_rate_update(void *priv, struct ieee80211_supported_band *sband,
1494 ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_CONFIG, 1502 ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_CONFIG,
1495 "Operating HT Bandwidth changed to: %d\n", 1503 "Operating HT Bandwidth changed to: %d\n",
1496 sc->hw->conf.channel_type); 1504 sc->hw->conf.channel_type);
1497 sc->cur_rate_table = hw_rate_table[sc->cur_rate_mode];
1498 } 1505 }
1499 } 1506 }
1500} 1507}
1501 1508
1509#ifdef CONFIG_ATH9K_DEBUGFS
1510
1511static int ath9k_debugfs_open(struct inode *inode, struct file *file)
1512{
1513 file->private_data = inode->i_private;
1514 return 0;
1515}
1516
1517static ssize_t read_file_rcstat(struct file *file, char __user *user_buf,
1518 size_t count, loff_t *ppos)
1519{
1520 struct ath_rate_priv *rc = file->private_data;
1521 char *buf;
1522 unsigned int len = 0, max;
1523 int i = 0;
1524 ssize_t retval;
1525
1526 if (rc->rate_table == NULL)
1527 return 0;
1528
1529 max = 80 + rc->rate_table->rate_cnt * 1024 + 1;
1530 buf = kmalloc(max, GFP_KERNEL);
1531 if (buf == NULL)
1532 return -ENOMEM;
1533
1534 len += sprintf(buf, "%6s %6s %6s "
1535 "%10s %10s %10s %10s\n",
1536 "HT", "MCS", "Rate",
1537 "Success", "Retries", "XRetries", "PER");
1538
1539 for (i = 0; i < rc->rate_table->rate_cnt; i++) {
1540 u32 ratekbps = rc->rate_table->info[i].ratekbps;
1541 struct ath_rc_stats *stats = &rc->rcstats[i];
1542 char mcs[5];
1543 char htmode[5];
1544 int used_mcs = 0, used_htmode = 0;
1545
1546 if (WLAN_RC_PHY_HT(rc->rate_table->info[i].phy)) {
1547 used_mcs = snprintf(mcs, 5, "%d",
1548 rc->rate_table->info[i].ratecode);
1549
1550 if (WLAN_RC_PHY_40(rc->rate_table->info[i].phy))
1551 used_htmode = snprintf(htmode, 5, "HT40");
1552 else if (WLAN_RC_PHY_20(rc->rate_table->info[i].phy))
1553 used_htmode = snprintf(htmode, 5, "HT20");
1554 else
1555 used_htmode = snprintf(htmode, 5, "????");
1556 }
1557
1558 mcs[used_mcs] = '\0';
1559 htmode[used_htmode] = '\0';
1560
1561 len += snprintf(buf + len, max - len,
1562 "%6s %6s %3u.%d: "
1563 "%10u %10u %10u %10u\n",
1564 htmode,
1565 mcs,
1566 ratekbps / 1000,
1567 (ratekbps % 1000) / 100,
1568 stats->success,
1569 stats->retries,
1570 stats->xretries,
1571 stats->per);
1572 }
1573
1574 if (len > max)
1575 len = max;
1576
1577 retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
1578 kfree(buf);
1579 return retval;
1580}
1581
1582static const struct file_operations fops_rcstat = {
1583 .read = read_file_rcstat,
1584 .open = ath9k_debugfs_open,
1585 .owner = THIS_MODULE
1586};
1587
1588static void ath_rate_add_sta_debugfs(void *priv, void *priv_sta,
1589 struct dentry *dir)
1590{
1591 struct ath_rate_priv *rc = priv_sta;
1592 debugfs_create_file("rc_stats", S_IRUGO, dir, rc, &fops_rcstat);
1593}
1594
1595#endif /* CONFIG_ATH9K_DEBUGFS */
1596
1502static void *ath_rate_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir) 1597static void *ath_rate_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir)
1503{ 1598{
1504 struct ath_wiphy *aphy = hw->priv; 1599 struct ath_wiphy *aphy = hw->priv;
@@ -1545,6 +1640,9 @@ static struct rate_control_ops ath_rate_ops = {
1545 .free = ath_rate_free, 1640 .free = ath_rate_free,
1546 .alloc_sta = ath_rate_alloc_sta, 1641 .alloc_sta = ath_rate_alloc_sta,
1547 .free_sta = ath_rate_free_sta, 1642 .free_sta = ath_rate_free_sta,
1643#ifdef CONFIG_ATH9K_DEBUGFS
1644 .add_sta_debugfs = ath_rate_add_sta_debugfs,
1645#endif
1548}; 1646};
1549 1647
1550int ath_rate_control_register(void) 1648int ath_rate_control_register(void)
diff --git a/drivers/net/wireless/ath/ath9k/rc.h b/drivers/net/wireless/ath/ath9k/rc.h
index dc1082654501..2f46a2266ba1 100644
--- a/drivers/net/wireless/ath/ath9k/rc.h
+++ b/drivers/net/wireless/ath/ath9k/rc.h
@@ -135,20 +135,21 @@ enum {
135 135
136/** 136/**
137 * struct ath_rate_table - Rate Control table 137 * struct ath_rate_table - Rate Control table
138 * @valid: valid for use in rate control 138 * @rate_cnt: total number of rates for the given wireless mode
139 * @valid_single_stream: valid for use in rate control for 139 * @mcs_start: MCS rate index offset
140 * single stream operation 140 * @rate_flags: Rate Control flags
141 * @phy: CCK/OFDM 141 * @phy: CCK/OFDM/HT20/HT40
142 * @ratekbps: rate in Kbits per second 142 * @ratekbps: rate in Kbits per second
143 * @user_ratekbps: user rate in Kbits per second 143 * @user_ratekbps: user rate in Kbits per second
144 * @ratecode: rate that goes into HW descriptors 144 * @ratecode: rate that goes into HW descriptors
145 * @short_preamble: Mask for enabling short preamble in ratecode for CCK
146 * @dot11rate: value that goes into supported 145 * @dot11rate: value that goes into supported
147 * rates info element of MLME 146 * rates info element of MLME
148 * @ctrl_rate: Index of next lower basic rate, used for duration computation 147 * @ctrl_rate: Index of next lower basic rate, used for duration computation
149 * @max_4ms_framelen: maximum frame length(bytes) for tx duration 148 * @cw40index: Index of rates having 40MHz channel width
149 * @sgi_index: Index of rates having Short Guard Interval
150 * @ht_index: high throughput rates having 40MHz channel width and
151 * Short Guard Interval
150 * @probe_interval: interval for rate control to probe for other rates 152 * @probe_interval: interval for rate control to probe for other rates
151 * @rssi_reduce_interval: interval for rate control to reduce rssi
152 * @initial_ratemax: initial ratemax value 153 * @initial_ratemax: initial ratemax value
153 */ 154 */
154struct ath_rate_table { 155struct ath_rate_table {
@@ -175,6 +176,13 @@ struct ath_rateset {
175 u8 rs_rates[ATH_RATE_MAX]; 176 u8 rs_rates[ATH_RATE_MAX];
176}; 177};
177 178
179struct ath_rc_stats {
180 u32 success;
181 u32 retries;
182 u32 xretries;
183 u8 per;
184};
185
178/** 186/**
179 * struct ath_rate_priv - Rate Control priv data 187 * struct ath_rate_priv - Rate Control priv data
180 * @state: RC state 188 * @state: RC state
@@ -211,6 +219,10 @@ struct ath_rate_priv {
211 struct ath_rateset neg_rates; 219 struct ath_rateset neg_rates;
212 struct ath_rateset neg_ht_rates; 220 struct ath_rateset neg_ht_rates;
213 struct ath_rate_softc *asc; 221 struct ath_rate_softc *asc;
222 const struct ath_rate_table *rate_table;
223
224 struct dentry *debugfs_rcstats;
225 struct ath_rc_stats rcstats[RATE_TABLE_SIZE];
214}; 226};
215 227
216#define ATH_TX_INFO_FRAME_TYPE_INTERNAL (1 << 0) 228#define ATH_TX_INFO_FRAME_TYPE_INTERNAL (1 << 0)
@@ -224,7 +236,18 @@ enum ath9k_internal_frame_type {
224 ATH9K_IFT_UNPAUSE 236 ATH9K_IFT_UNPAUSE
225}; 237};
226 238
239#ifdef CONFIG_ATH9K_RATE_CONTROL
227int ath_rate_control_register(void); 240int ath_rate_control_register(void);
228void ath_rate_control_unregister(void); 241void ath_rate_control_unregister(void);
242#else
243static inline int ath_rate_control_register(void)
244{
245 return 0;
246}
247
248static inline void ath_rate_control_unregister(void)
249{
250}
251#endif
229 252
230#endif /* RC_H */ 253#endif /* RC_H */
diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c
index 534a91bcc1d9..fe73fc50082a 100644
--- a/drivers/net/wireless/ath/ath9k/recv.c
+++ b/drivers/net/wireless/ath/ath9k/recv.c
@@ -19,6 +19,15 @@
19 19
20#define SKB_CB_ATHBUF(__skb) (*((struct ath_buf **)__skb->cb)) 20#define SKB_CB_ATHBUF(__skb) (*((struct ath_buf **)__skb->cb))
21 21
22static inline bool ath_is_alt_ant_ratio_better(int alt_ratio, int maxdelta,
23 int mindelta, int main_rssi_avg,
24 int alt_rssi_avg, int pkt_count)
25{
26 return (((alt_ratio >= ATH_ANT_DIV_COMB_ALT_ANT_RATIO2) &&
27 (alt_rssi_avg > main_rssi_avg + maxdelta)) ||
28 (alt_rssi_avg > main_rssi_avg + mindelta)) && (pkt_count > 50);
29}
30
22static inline bool ath9k_check_auto_sleep(struct ath_softc *sc) 31static inline bool ath9k_check_auto_sleep(struct ath_softc *sc)
23{ 32{
24 return sc->ps_enabled && 33 return sc->ps_enabled &&
@@ -110,8 +119,7 @@ static void ath_opmode_init(struct ath_softc *sc)
110 ath9k_hw_setrxfilter(ah, rfilt); 119 ath9k_hw_setrxfilter(ah, rfilt);
111 120
112 /* configure bssid mask */ 121 /* configure bssid mask */
113 if (ah->caps.hw_caps & ATH9K_HW_CAP_BSSIDMASK) 122 ath_hw_setbssidmask(common);
114 ath_hw_setbssidmask(common);
115 123
116 /* configure operational mode */ 124 /* configure operational mode */
117 ath9k_hw_setopmode(ah); 125 ath9k_hw_setopmode(ah);
@@ -260,6 +268,7 @@ static int ath_rx_edma_init(struct ath_softc *sc, int nbufs)
260 bf->bf_buf_addr))) { 268 bf->bf_buf_addr))) {
261 dev_kfree_skb_any(skb); 269 dev_kfree_skb_any(skb);
262 bf->bf_mpdu = NULL; 270 bf->bf_mpdu = NULL;
271 bf->bf_buf_addr = 0;
263 ath_print(common, ATH_DBG_FATAL, 272 ath_print(common, ATH_DBG_FATAL,
264 "dma_mapping_error() on RX init\n"); 273 "dma_mapping_error() on RX init\n");
265 error = -ENOMEM; 274 error = -ENOMEM;
@@ -292,7 +301,7 @@ static void ath_edma_start_recv(struct ath_softc *sc)
292 301
293 ath_opmode_init(sc); 302 ath_opmode_init(sc);
294 303
295 ath9k_hw_startpcureceive(sc->sc_ah, (sc->sc_flags & SC_OP_SCANNING)); 304 ath9k_hw_startpcureceive(sc->sc_ah, (sc->sc_flags & SC_OP_OFFCHANNEL));
296} 305}
297 306
298static void ath_edma_stop_recv(struct ath_softc *sc) 307static void ath_edma_stop_recv(struct ath_softc *sc)
@@ -350,12 +359,12 @@ int ath_rx_init(struct ath_softc *sc, int nbufs)
350 bf->bf_buf_addr))) { 359 bf->bf_buf_addr))) {
351 dev_kfree_skb_any(skb); 360 dev_kfree_skb_any(skb);
352 bf->bf_mpdu = NULL; 361 bf->bf_mpdu = NULL;
362 bf->bf_buf_addr = 0;
353 ath_print(common, ATH_DBG_FATAL, 363 ath_print(common, ATH_DBG_FATAL,
354 "dma_mapping_error() on RX init\n"); 364 "dma_mapping_error() on RX init\n");
355 error = -ENOMEM; 365 error = -ENOMEM;
356 goto err; 366 goto err;
357 } 367 }
358 bf->bf_dmacontext = bf->bf_buf_addr;
359 } 368 }
360 sc->rx.rxlink = NULL; 369 sc->rx.rxlink = NULL;
361 } 370 }
@@ -385,6 +394,8 @@ void ath_rx_cleanup(struct ath_softc *sc)
385 common->rx_bufsize, 394 common->rx_bufsize,
386 DMA_FROM_DEVICE); 395 DMA_FROM_DEVICE);
387 dev_kfree_skb(skb); 396 dev_kfree_skb(skb);
397 bf->bf_buf_addr = 0;
398 bf->bf_mpdu = NULL;
388 } 399 }
389 } 400 }
390 401
@@ -422,8 +433,7 @@ u32 ath_calcrxfilter(struct ath_softc *sc)
422 | ATH9K_RX_FILTER_UCAST | ATH9K_RX_FILTER_BCAST 433 | ATH9K_RX_FILTER_UCAST | ATH9K_RX_FILTER_BCAST
423 | ATH9K_RX_FILTER_MCAST; 434 | ATH9K_RX_FILTER_MCAST;
424 435
425 /* If not a STA, enable processing of Probe Requests */ 436 if (sc->rx.rxfilter & FIF_PROBE_REQ)
426 if (sc->sc_ah->opmode != NL80211_IFTYPE_STATION)
427 rfilt |= ATH9K_RX_FILTER_PROBEREQ; 437 rfilt |= ATH9K_RX_FILTER_PROBEREQ;
428 438
429 /* 439 /*
@@ -440,13 +450,14 @@ u32 ath_calcrxfilter(struct ath_softc *sc)
440 rfilt |= ATH9K_RX_FILTER_CONTROL; 450 rfilt |= ATH9K_RX_FILTER_CONTROL;
441 451
442 if ((sc->sc_ah->opmode == NL80211_IFTYPE_STATION) && 452 if ((sc->sc_ah->opmode == NL80211_IFTYPE_STATION) &&
453 (sc->nvifs <= 1) &&
443 !(sc->rx.rxfilter & FIF_BCN_PRBRESP_PROMISC)) 454 !(sc->rx.rxfilter & FIF_BCN_PRBRESP_PROMISC))
444 rfilt |= ATH9K_RX_FILTER_MYBEACON; 455 rfilt |= ATH9K_RX_FILTER_MYBEACON;
445 else 456 else
446 rfilt |= ATH9K_RX_FILTER_BEACON; 457 rfilt |= ATH9K_RX_FILTER_BEACON;
447 458
448 if ((AR_SREV_9280_10_OR_LATER(sc->sc_ah) || 459 if ((AR_SREV_9280_20_OR_LATER(sc->sc_ah) ||
449 AR_SREV_9285_10_OR_LATER(sc->sc_ah)) && 460 AR_SREV_9285_12_OR_LATER(sc->sc_ah)) &&
450 (sc->sc_ah->opmode == NL80211_IFTYPE_AP) && 461 (sc->sc_ah->opmode == NL80211_IFTYPE_AP) &&
451 (sc->rx.rxfilter & FIF_PSPOLL)) 462 (sc->rx.rxfilter & FIF_PSPOLL))
452 rfilt |= ATH9K_RX_FILTER_PSPOLL; 463 rfilt |= ATH9K_RX_FILTER_PSPOLL;
@@ -454,9 +465,8 @@ u32 ath_calcrxfilter(struct ath_softc *sc)
454 if (conf_is_ht(&sc->hw->conf)) 465 if (conf_is_ht(&sc->hw->conf))
455 rfilt |= ATH9K_RX_FILTER_COMP_BAR; 466 rfilt |= ATH9K_RX_FILTER_COMP_BAR;
456 467
457 if (sc->sec_wiphy || (sc->rx.rxfilter & FIF_OTHER_BSS)) { 468 if (sc->sec_wiphy || (sc->nvifs > 1) ||
458 /* TODO: only needed if more than one BSSID is in use in 469 (sc->rx.rxfilter & FIF_OTHER_BSS)) {
459 * station/adhoc mode */
460 /* The following may also be needed for other older chips */ 470 /* The following may also be needed for other older chips */
461 if (sc->sc_ah->hw_version.macVersion == AR_SREV_VERSION_9160) 471 if (sc->sc_ah->hw_version.macVersion == AR_SREV_VERSION_9160)
462 rfilt |= ATH9K_RX_FILTER_PROM; 472 rfilt |= ATH9K_RX_FILTER_PROM;
@@ -498,7 +508,7 @@ int ath_startrecv(struct ath_softc *sc)
498start_recv: 508start_recv:
499 spin_unlock_bh(&sc->rx.rxbuflock); 509 spin_unlock_bh(&sc->rx.rxbuflock);
500 ath_opmode_init(sc); 510 ath_opmode_init(sc);
501 ath9k_hw_startpcureceive(ah, (sc->sc_flags & SC_OP_SCANNING)); 511 ath9k_hw_startpcureceive(ah, (sc->sc_flags & SC_OP_OFFCHANNEL));
502 512
503 return 0; 513 return 0;
504} 514}
@@ -631,7 +641,7 @@ static void ath_rx_ps(struct ath_softc *sc, struct sk_buff *skb)
631 * No more broadcast/multicast frames to be received at this 641 * No more broadcast/multicast frames to be received at this
632 * point. 642 * point.
633 */ 643 */
634 sc->ps_flags &= ~PS_WAIT_FOR_CAB; 644 sc->ps_flags &= ~(PS_WAIT_FOR_CAB | PS_WAIT_FOR_BEACON);
635 ath_print(common, ATH_DBG_PS, 645 ath_print(common, ATH_DBG_PS,
636 "All PS CAB frames received, back to sleep\n"); 646 "All PS CAB frames received, back to sleep\n");
637 } else if ((sc->ps_flags & PS_WAIT_FOR_PSPOLL_DATA) && 647 } else if ((sc->ps_flags & PS_WAIT_FOR_PSPOLL_DATA) &&
@@ -969,7 +979,11 @@ static void ath9k_process_rssi(struct ath_common *common,
969 * at least one sdata of a wiphy on mac80211 but with ath9k virtual 979 * at least one sdata of a wiphy on mac80211 but with ath9k virtual
970 * wiphy you'd have to iterate over every wiphy and each sdata. 980 * wiphy you'd have to iterate over every wiphy and each sdata.
971 */ 981 */
972 sta = ieee80211_find_sta_by_hw(hw, hdr->addr2); 982 if (is_multicast_ether_addr(hdr->addr1))
983 sta = ieee80211_find_sta_by_ifaddr(hw, hdr->addr2, NULL);
984 else
985 sta = ieee80211_find_sta_by_ifaddr(hw, hdr->addr2, hdr->addr1);
986
973 if (sta) { 987 if (sta) {
974 an = (struct ath_node *) sta->drv_priv; 988 an = (struct ath_node *) sta->drv_priv;
975 if (rx_stats->rs_rssi != ATH9K_RSSI_BAD && 989 if (rx_stats->rs_rssi != ATH9K_RSSI_BAD &&
@@ -1076,6 +1090,539 @@ static void ath9k_rx_skb_postprocess(struct ath_common *common,
1076 rxs->flag &= ~RX_FLAG_DECRYPTED; 1090 rxs->flag &= ~RX_FLAG_DECRYPTED;
1077} 1091}
1078 1092
1093static void ath_lnaconf_alt_good_scan(struct ath_ant_comb *antcomb,
1094 struct ath_hw_antcomb_conf ant_conf,
1095 int main_rssi_avg)
1096{
1097 antcomb->quick_scan_cnt = 0;
1098
1099 if (ant_conf.main_lna_conf == ATH_ANT_DIV_COMB_LNA2)
1100 antcomb->rssi_lna2 = main_rssi_avg;
1101 else if (ant_conf.main_lna_conf == ATH_ANT_DIV_COMB_LNA1)
1102 antcomb->rssi_lna1 = main_rssi_avg;
1103
1104 switch ((ant_conf.main_lna_conf << 4) | ant_conf.alt_lna_conf) {
1105 case (0x10): /* LNA2 A-B */
1106 antcomb->main_conf = ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2;
1107 antcomb->first_quick_scan_conf =
1108 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
1109 antcomb->second_quick_scan_conf = ATH_ANT_DIV_COMB_LNA1;
1110 break;
1111 case (0x20): /* LNA1 A-B */
1112 antcomb->main_conf = ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2;
1113 antcomb->first_quick_scan_conf =
1114 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
1115 antcomb->second_quick_scan_conf = ATH_ANT_DIV_COMB_LNA2;
1116 break;
1117 case (0x21): /* LNA1 LNA2 */
1118 antcomb->main_conf = ATH_ANT_DIV_COMB_LNA2;
1119 antcomb->first_quick_scan_conf =
1120 ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2;
1121 antcomb->second_quick_scan_conf =
1122 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
1123 break;
1124 case (0x12): /* LNA2 LNA1 */
1125 antcomb->main_conf = ATH_ANT_DIV_COMB_LNA1;
1126 antcomb->first_quick_scan_conf =
1127 ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2;
1128 antcomb->second_quick_scan_conf =
1129 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
1130 break;
1131 case (0x13): /* LNA2 A+B */
1132 antcomb->main_conf = ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
1133 antcomb->first_quick_scan_conf =
1134 ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2;
1135 antcomb->second_quick_scan_conf = ATH_ANT_DIV_COMB_LNA1;
1136 break;
1137 case (0x23): /* LNA1 A+B */
1138 antcomb->main_conf = ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
1139 antcomb->first_quick_scan_conf =
1140 ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2;
1141 antcomb->second_quick_scan_conf = ATH_ANT_DIV_COMB_LNA2;
1142 break;
1143 default:
1144 break;
1145 }
1146}
1147
1148static void ath_select_ant_div_from_quick_scan(struct ath_ant_comb *antcomb,
1149 struct ath_hw_antcomb_conf *div_ant_conf,
1150 int main_rssi_avg, int alt_rssi_avg,
1151 int alt_ratio)
1152{
1153 /* alt_good */
1154 switch (antcomb->quick_scan_cnt) {
1155 case 0:
1156 /* set alt to main, and alt to first conf */
1157 div_ant_conf->main_lna_conf = antcomb->main_conf;
1158 div_ant_conf->alt_lna_conf = antcomb->first_quick_scan_conf;
1159 break;
1160 case 1:
1161 /* set alt to main, and alt to first conf */
1162 div_ant_conf->main_lna_conf = antcomb->main_conf;
1163 div_ant_conf->alt_lna_conf = antcomb->second_quick_scan_conf;
1164 antcomb->rssi_first = main_rssi_avg;
1165 antcomb->rssi_second = alt_rssi_avg;
1166
1167 if (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA1) {
1168 /* main is LNA1 */
1169 if (ath_is_alt_ant_ratio_better(alt_ratio,
1170 ATH_ANT_DIV_COMB_LNA1_DELTA_HI,
1171 ATH_ANT_DIV_COMB_LNA1_DELTA_LOW,
1172 main_rssi_avg, alt_rssi_avg,
1173 antcomb->total_pkt_count))
1174 antcomb->first_ratio = true;
1175 else
1176 antcomb->first_ratio = false;
1177 } else if (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA2) {
1178 if (ath_is_alt_ant_ratio_better(alt_ratio,
1179 ATH_ANT_DIV_COMB_LNA1_DELTA_MID,
1180 ATH_ANT_DIV_COMB_LNA1_DELTA_LOW,
1181 main_rssi_avg, alt_rssi_avg,
1182 antcomb->total_pkt_count))
1183 antcomb->first_ratio = true;
1184 else
1185 antcomb->first_ratio = false;
1186 } else {
1187 if ((((alt_ratio >= ATH_ANT_DIV_COMB_ALT_ANT_RATIO2) &&
1188 (alt_rssi_avg > main_rssi_avg +
1189 ATH_ANT_DIV_COMB_LNA1_DELTA_HI)) ||
1190 (alt_rssi_avg > main_rssi_avg)) &&
1191 (antcomb->total_pkt_count > 50))
1192 antcomb->first_ratio = true;
1193 else
1194 antcomb->first_ratio = false;
1195 }
1196 break;
1197 case 2:
1198 antcomb->alt_good = false;
1199 antcomb->scan_not_start = false;
1200 antcomb->scan = false;
1201 antcomb->rssi_first = main_rssi_avg;
1202 antcomb->rssi_third = alt_rssi_avg;
1203
1204 if (antcomb->second_quick_scan_conf == ATH_ANT_DIV_COMB_LNA1)
1205 antcomb->rssi_lna1 = alt_rssi_avg;
1206 else if (antcomb->second_quick_scan_conf ==
1207 ATH_ANT_DIV_COMB_LNA2)
1208 antcomb->rssi_lna2 = alt_rssi_avg;
1209 else if (antcomb->second_quick_scan_conf ==
1210 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2) {
1211 if (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA2)
1212 antcomb->rssi_lna2 = main_rssi_avg;
1213 else if (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA1)
1214 antcomb->rssi_lna1 = main_rssi_avg;
1215 }
1216
1217 if (antcomb->rssi_lna2 > antcomb->rssi_lna1 +
1218 ATH_ANT_DIV_COMB_LNA1_LNA2_SWITCH_DELTA)
1219 div_ant_conf->main_lna_conf = ATH_ANT_DIV_COMB_LNA2;
1220 else
1221 div_ant_conf->main_lna_conf = ATH_ANT_DIV_COMB_LNA1;
1222
1223 if (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA1) {
1224 if (ath_is_alt_ant_ratio_better(alt_ratio,
1225 ATH_ANT_DIV_COMB_LNA1_DELTA_HI,
1226 ATH_ANT_DIV_COMB_LNA1_DELTA_LOW,
1227 main_rssi_avg, alt_rssi_avg,
1228 antcomb->total_pkt_count))
1229 antcomb->second_ratio = true;
1230 else
1231 antcomb->second_ratio = false;
1232 } else if (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA2) {
1233 if (ath_is_alt_ant_ratio_better(alt_ratio,
1234 ATH_ANT_DIV_COMB_LNA1_DELTA_MID,
1235 ATH_ANT_DIV_COMB_LNA1_DELTA_LOW,
1236 main_rssi_avg, alt_rssi_avg,
1237 antcomb->total_pkt_count))
1238 antcomb->second_ratio = true;
1239 else
1240 antcomb->second_ratio = false;
1241 } else {
1242 if ((((alt_ratio >= ATH_ANT_DIV_COMB_ALT_ANT_RATIO2) &&
1243 (alt_rssi_avg > main_rssi_avg +
1244 ATH_ANT_DIV_COMB_LNA1_DELTA_HI)) ||
1245 (alt_rssi_avg > main_rssi_avg)) &&
1246 (antcomb->total_pkt_count > 50))
1247 antcomb->second_ratio = true;
1248 else
1249 antcomb->second_ratio = false;
1250 }
1251
1252 /* set alt to the conf with maximun ratio */
1253 if (antcomb->first_ratio && antcomb->second_ratio) {
1254 if (antcomb->rssi_second > antcomb->rssi_third) {
1255 /* first alt*/
1256 if ((antcomb->first_quick_scan_conf ==
1257 ATH_ANT_DIV_COMB_LNA1) ||
1258 (antcomb->first_quick_scan_conf ==
1259 ATH_ANT_DIV_COMB_LNA2))
1260 /* Set alt LNA1 or LNA2*/
1261 if (div_ant_conf->main_lna_conf ==
1262 ATH_ANT_DIV_COMB_LNA2)
1263 div_ant_conf->alt_lna_conf =
1264 ATH_ANT_DIV_COMB_LNA1;
1265 else
1266 div_ant_conf->alt_lna_conf =
1267 ATH_ANT_DIV_COMB_LNA2;
1268 else
1269 /* Set alt to A+B or A-B */
1270 div_ant_conf->alt_lna_conf =
1271 antcomb->first_quick_scan_conf;
1272 } else if ((antcomb->second_quick_scan_conf ==
1273 ATH_ANT_DIV_COMB_LNA1) ||
1274 (antcomb->second_quick_scan_conf ==
1275 ATH_ANT_DIV_COMB_LNA2)) {
1276 /* Set alt LNA1 or LNA2 */
1277 if (div_ant_conf->main_lna_conf ==
1278 ATH_ANT_DIV_COMB_LNA2)
1279 div_ant_conf->alt_lna_conf =
1280 ATH_ANT_DIV_COMB_LNA1;
1281 else
1282 div_ant_conf->alt_lna_conf =
1283 ATH_ANT_DIV_COMB_LNA2;
1284 } else {
1285 /* Set alt to A+B or A-B */
1286 div_ant_conf->alt_lna_conf =
1287 antcomb->second_quick_scan_conf;
1288 }
1289 } else if (antcomb->first_ratio) {
1290 /* first alt */
1291 if ((antcomb->first_quick_scan_conf ==
1292 ATH_ANT_DIV_COMB_LNA1) ||
1293 (antcomb->first_quick_scan_conf ==
1294 ATH_ANT_DIV_COMB_LNA2))
1295 /* Set alt LNA1 or LNA2 */
1296 if (div_ant_conf->main_lna_conf ==
1297 ATH_ANT_DIV_COMB_LNA2)
1298 div_ant_conf->alt_lna_conf =
1299 ATH_ANT_DIV_COMB_LNA1;
1300 else
1301 div_ant_conf->alt_lna_conf =
1302 ATH_ANT_DIV_COMB_LNA2;
1303 else
1304 /* Set alt to A+B or A-B */
1305 div_ant_conf->alt_lna_conf =
1306 antcomb->first_quick_scan_conf;
1307 } else if (antcomb->second_ratio) {
1308 /* second alt */
1309 if ((antcomb->second_quick_scan_conf ==
1310 ATH_ANT_DIV_COMB_LNA1) ||
1311 (antcomb->second_quick_scan_conf ==
1312 ATH_ANT_DIV_COMB_LNA2))
1313 /* Set alt LNA1 or LNA2 */
1314 if (div_ant_conf->main_lna_conf ==
1315 ATH_ANT_DIV_COMB_LNA2)
1316 div_ant_conf->alt_lna_conf =
1317 ATH_ANT_DIV_COMB_LNA1;
1318 else
1319 div_ant_conf->alt_lna_conf =
1320 ATH_ANT_DIV_COMB_LNA2;
1321 else
1322 /* Set alt to A+B or A-B */
1323 div_ant_conf->alt_lna_conf =
1324 antcomb->second_quick_scan_conf;
1325 } else {
1326 /* main is largest */
1327 if ((antcomb->main_conf == ATH_ANT_DIV_COMB_LNA1) ||
1328 (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA2))
1329 /* Set alt LNA1 or LNA2 */
1330 if (div_ant_conf->main_lna_conf ==
1331 ATH_ANT_DIV_COMB_LNA2)
1332 div_ant_conf->alt_lna_conf =
1333 ATH_ANT_DIV_COMB_LNA1;
1334 else
1335 div_ant_conf->alt_lna_conf =
1336 ATH_ANT_DIV_COMB_LNA2;
1337 else
1338 /* Set alt to A+B or A-B */
1339 div_ant_conf->alt_lna_conf = antcomb->main_conf;
1340 }
1341 break;
1342 default:
1343 break;
1344 }
1345}
1346
1347static void ath_ant_div_conf_fast_divbias(struct ath_hw_antcomb_conf *ant_conf)
1348{
1349 /* Adjust the fast_div_bias based on main and alt lna conf */
1350 switch ((ant_conf->main_lna_conf << 4) | ant_conf->alt_lna_conf) {
1351 case (0x01): /* A-B LNA2 */
1352 ant_conf->fast_div_bias = 0x3b;
1353 break;
1354 case (0x02): /* A-B LNA1 */
1355 ant_conf->fast_div_bias = 0x3d;
1356 break;
1357 case (0x03): /* A-B A+B */
1358 ant_conf->fast_div_bias = 0x1;
1359 break;
1360 case (0x10): /* LNA2 A-B */
1361 ant_conf->fast_div_bias = 0x7;
1362 break;
1363 case (0x12): /* LNA2 LNA1 */
1364 ant_conf->fast_div_bias = 0x2;
1365 break;
1366 case (0x13): /* LNA2 A+B */
1367 ant_conf->fast_div_bias = 0x7;
1368 break;
1369 case (0x20): /* LNA1 A-B */
1370 ant_conf->fast_div_bias = 0x6;
1371 break;
1372 case (0x21): /* LNA1 LNA2 */
1373 ant_conf->fast_div_bias = 0x0;
1374 break;
1375 case (0x23): /* LNA1 A+B */
1376 ant_conf->fast_div_bias = 0x6;
1377 break;
1378 case (0x30): /* A+B A-B */
1379 ant_conf->fast_div_bias = 0x1;
1380 break;
1381 case (0x31): /* A+B LNA2 */
1382 ant_conf->fast_div_bias = 0x3b;
1383 break;
1384 case (0x32): /* A+B LNA1 */
1385 ant_conf->fast_div_bias = 0x3d;
1386 break;
1387 default:
1388 break;
1389 }
1390}
1391
1392/* Antenna diversity and combining */
1393static void ath_ant_comb_scan(struct ath_softc *sc, struct ath_rx_status *rs)
1394{
1395 struct ath_hw_antcomb_conf div_ant_conf;
1396 struct ath_ant_comb *antcomb = &sc->ant_comb;
1397 int alt_ratio = 0, alt_rssi_avg = 0, main_rssi_avg = 0, curr_alt_set;
1398 int curr_main_set, curr_bias;
1399 int main_rssi = rs->rs_rssi_ctl0;
1400 int alt_rssi = rs->rs_rssi_ctl1;
1401 int rx_ant_conf, main_ant_conf;
1402 bool short_scan = false;
1403
1404 rx_ant_conf = (rs->rs_rssi_ctl2 >> ATH_ANT_RX_CURRENT_SHIFT) &
1405 ATH_ANT_RX_MASK;
1406 main_ant_conf = (rs->rs_rssi_ctl2 >> ATH_ANT_RX_MAIN_SHIFT) &
1407 ATH_ANT_RX_MASK;
1408
1409 /* Record packet only when alt_rssi is positive */
1410 if (alt_rssi > 0) {
1411 antcomb->total_pkt_count++;
1412 antcomb->main_total_rssi += main_rssi;
1413 antcomb->alt_total_rssi += alt_rssi;
1414 if (main_ant_conf == rx_ant_conf)
1415 antcomb->main_recv_cnt++;
1416 else
1417 antcomb->alt_recv_cnt++;
1418 }
1419
1420 /* Short scan check */
1421 if (antcomb->scan && antcomb->alt_good) {
1422 if (time_after(jiffies, antcomb->scan_start_time +
1423 msecs_to_jiffies(ATH_ANT_DIV_COMB_SHORT_SCAN_INTR)))
1424 short_scan = true;
1425 else
1426 if (antcomb->total_pkt_count ==
1427 ATH_ANT_DIV_COMB_SHORT_SCAN_PKTCOUNT) {
1428 alt_ratio = ((antcomb->alt_recv_cnt * 100) /
1429 antcomb->total_pkt_count);
1430 if (alt_ratio < ATH_ANT_DIV_COMB_ALT_ANT_RATIO)
1431 short_scan = true;
1432 }
1433 }
1434
1435 if (((antcomb->total_pkt_count < ATH_ANT_DIV_COMB_MAX_PKTCOUNT) ||
1436 rs->rs_moreaggr) && !short_scan)
1437 return;
1438
1439 if (antcomb->total_pkt_count) {
1440 alt_ratio = ((antcomb->alt_recv_cnt * 100) /
1441 antcomb->total_pkt_count);
1442 main_rssi_avg = (antcomb->main_total_rssi /
1443 antcomb->total_pkt_count);
1444 alt_rssi_avg = (antcomb->alt_total_rssi /
1445 antcomb->total_pkt_count);
1446 }
1447
1448
1449 ath9k_hw_antdiv_comb_conf_get(sc->sc_ah, &div_ant_conf);
1450 curr_alt_set = div_ant_conf.alt_lna_conf;
1451 curr_main_set = div_ant_conf.main_lna_conf;
1452 curr_bias = div_ant_conf.fast_div_bias;
1453
1454 antcomb->count++;
1455
1456 if (antcomb->count == ATH_ANT_DIV_COMB_MAX_COUNT) {
1457 if (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO) {
1458 ath_lnaconf_alt_good_scan(antcomb, div_ant_conf,
1459 main_rssi_avg);
1460 antcomb->alt_good = true;
1461 } else {
1462 antcomb->alt_good = false;
1463 }
1464
1465 antcomb->count = 0;
1466 antcomb->scan = true;
1467 antcomb->scan_not_start = true;
1468 }
1469
1470 if (!antcomb->scan) {
1471 if (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO) {
1472 if (curr_alt_set == ATH_ANT_DIV_COMB_LNA2) {
1473 /* Switch main and alt LNA */
1474 div_ant_conf.main_lna_conf =
1475 ATH_ANT_DIV_COMB_LNA2;
1476 div_ant_conf.alt_lna_conf =
1477 ATH_ANT_DIV_COMB_LNA1;
1478 } else if (curr_alt_set == ATH_ANT_DIV_COMB_LNA1) {
1479 div_ant_conf.main_lna_conf =
1480 ATH_ANT_DIV_COMB_LNA1;
1481 div_ant_conf.alt_lna_conf =
1482 ATH_ANT_DIV_COMB_LNA2;
1483 }
1484
1485 goto div_comb_done;
1486 } else if ((curr_alt_set != ATH_ANT_DIV_COMB_LNA1) &&
1487 (curr_alt_set != ATH_ANT_DIV_COMB_LNA2)) {
1488 /* Set alt to another LNA */
1489 if (curr_main_set == ATH_ANT_DIV_COMB_LNA2)
1490 div_ant_conf.alt_lna_conf =
1491 ATH_ANT_DIV_COMB_LNA1;
1492 else if (curr_main_set == ATH_ANT_DIV_COMB_LNA1)
1493 div_ant_conf.alt_lna_conf =
1494 ATH_ANT_DIV_COMB_LNA2;
1495
1496 goto div_comb_done;
1497 }
1498
1499 if ((alt_rssi_avg < (main_rssi_avg +
1500 ATH_ANT_DIV_COMB_LNA1_LNA2_DELTA)))
1501 goto div_comb_done;
1502 }
1503
1504 if (!antcomb->scan_not_start) {
1505 switch (curr_alt_set) {
1506 case ATH_ANT_DIV_COMB_LNA2:
1507 antcomb->rssi_lna2 = alt_rssi_avg;
1508 antcomb->rssi_lna1 = main_rssi_avg;
1509 antcomb->scan = true;
1510 /* set to A+B */
1511 div_ant_conf.main_lna_conf =
1512 ATH_ANT_DIV_COMB_LNA1;
1513 div_ant_conf.alt_lna_conf =
1514 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
1515 break;
1516 case ATH_ANT_DIV_COMB_LNA1:
1517 antcomb->rssi_lna1 = alt_rssi_avg;
1518 antcomb->rssi_lna2 = main_rssi_avg;
1519 antcomb->scan = true;
1520 /* set to A+B */
1521 div_ant_conf.main_lna_conf = ATH_ANT_DIV_COMB_LNA2;
1522 div_ant_conf.alt_lna_conf =
1523 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
1524 break;
1525 case ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2:
1526 antcomb->rssi_add = alt_rssi_avg;
1527 antcomb->scan = true;
1528 /* set to A-B */
1529 div_ant_conf.alt_lna_conf =
1530 ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2;
1531 break;
1532 case ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2:
1533 antcomb->rssi_sub = alt_rssi_avg;
1534 antcomb->scan = false;
1535 if (antcomb->rssi_lna2 >
1536 (antcomb->rssi_lna1 +
1537 ATH_ANT_DIV_COMB_LNA1_LNA2_SWITCH_DELTA)) {
1538 /* use LNA2 as main LNA */
1539 if ((antcomb->rssi_add > antcomb->rssi_lna1) &&
1540 (antcomb->rssi_add > antcomb->rssi_sub)) {
1541 /* set to A+B */
1542 div_ant_conf.main_lna_conf =
1543 ATH_ANT_DIV_COMB_LNA2;
1544 div_ant_conf.alt_lna_conf =
1545 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
1546 } else if (antcomb->rssi_sub >
1547 antcomb->rssi_lna1) {
1548 /* set to A-B */
1549 div_ant_conf.main_lna_conf =
1550 ATH_ANT_DIV_COMB_LNA2;
1551 div_ant_conf.alt_lna_conf =
1552 ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2;
1553 } else {
1554 /* set to LNA1 */
1555 div_ant_conf.main_lna_conf =
1556 ATH_ANT_DIV_COMB_LNA2;
1557 div_ant_conf.alt_lna_conf =
1558 ATH_ANT_DIV_COMB_LNA1;
1559 }
1560 } else {
1561 /* use LNA1 as main LNA */
1562 if ((antcomb->rssi_add > antcomb->rssi_lna2) &&
1563 (antcomb->rssi_add > antcomb->rssi_sub)) {
1564 /* set to A+B */
1565 div_ant_conf.main_lna_conf =
1566 ATH_ANT_DIV_COMB_LNA1;
1567 div_ant_conf.alt_lna_conf =
1568 ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
1569 } else if (antcomb->rssi_sub >
1570 antcomb->rssi_lna1) {
1571 /* set to A-B */
1572 div_ant_conf.main_lna_conf =
1573 ATH_ANT_DIV_COMB_LNA1;
1574 div_ant_conf.alt_lna_conf =
1575 ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2;
1576 } else {
1577 /* set to LNA2 */
1578 div_ant_conf.main_lna_conf =
1579 ATH_ANT_DIV_COMB_LNA1;
1580 div_ant_conf.alt_lna_conf =
1581 ATH_ANT_DIV_COMB_LNA2;
1582 }
1583 }
1584 break;
1585 default:
1586 break;
1587 }
1588 } else {
1589 if (!antcomb->alt_good) {
1590 antcomb->scan_not_start = false;
1591 /* Set alt to another LNA */
1592 if (curr_main_set == ATH_ANT_DIV_COMB_LNA2) {
1593 div_ant_conf.main_lna_conf =
1594 ATH_ANT_DIV_COMB_LNA2;
1595 div_ant_conf.alt_lna_conf =
1596 ATH_ANT_DIV_COMB_LNA1;
1597 } else if (curr_main_set == ATH_ANT_DIV_COMB_LNA1) {
1598 div_ant_conf.main_lna_conf =
1599 ATH_ANT_DIV_COMB_LNA1;
1600 div_ant_conf.alt_lna_conf =
1601 ATH_ANT_DIV_COMB_LNA2;
1602 }
1603 goto div_comb_done;
1604 }
1605 }
1606
1607 ath_select_ant_div_from_quick_scan(antcomb, &div_ant_conf,
1608 main_rssi_avg, alt_rssi_avg,
1609 alt_ratio);
1610
1611 antcomb->quick_scan_cnt++;
1612
1613div_comb_done:
1614 ath_ant_div_conf_fast_divbias(&div_ant_conf);
1615
1616 ath9k_hw_antdiv_comb_conf_set(sc->sc_ah, &div_ant_conf);
1617
1618 antcomb->scan_start_time = jiffies;
1619 antcomb->total_pkt_count = 0;
1620 antcomb->main_total_rssi = 0;
1621 antcomb->alt_total_rssi = 0;
1622 antcomb->main_recv_cnt = 0;
1623 antcomb->alt_recv_cnt = 0;
1624}
1625
1079int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp) 1626int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
1080{ 1627{
1081 struct ath_buf *bf; 1628 struct ath_buf *bf;
@@ -1099,6 +1646,7 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
1099 u8 rx_status_len = ah->caps.rx_status_len; 1646 u8 rx_status_len = ah->caps.rx_status_len;
1100 u64 tsf = 0; 1647 u64 tsf = 0;
1101 u32 tsf_lower = 0; 1648 u32 tsf_lower = 0;
1649 unsigned long flags;
1102 1650
1103 if (edma) 1651 if (edma)
1104 dma_type = DMA_BIDIRECTIONAL; 1652 dma_type = DMA_BIDIRECTIONAL;
@@ -1189,12 +1737,12 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
1189 bf->bf_buf_addr))) { 1737 bf->bf_buf_addr))) {
1190 dev_kfree_skb_any(requeue_skb); 1738 dev_kfree_skb_any(requeue_skb);
1191 bf->bf_mpdu = NULL; 1739 bf->bf_mpdu = NULL;
1740 bf->bf_buf_addr = 0;
1192 ath_print(common, ATH_DBG_FATAL, 1741 ath_print(common, ATH_DBG_FATAL,
1193 "dma_mapping_error() on RX\n"); 1742 "dma_mapping_error() on RX\n");
1194 ath_rx_send_to_mac80211(hw, sc, skb, rxs); 1743 ath_rx_send_to_mac80211(hw, sc, skb, rxs);
1195 break; 1744 break;
1196 } 1745 }
1197 bf->bf_dmacontext = bf->bf_buf_addr;
1198 1746
1199 /* 1747 /*
1200 * change the default rx antenna if rx diversity chooses the 1748 * change the default rx antenna if rx diversity chooses the
@@ -1207,11 +1755,16 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
1207 sc->rx.rxotherant = 0; 1755 sc->rx.rxotherant = 0;
1208 } 1756 }
1209 1757
1758 spin_lock_irqsave(&sc->sc_pm_lock, flags);
1210 if (unlikely(ath9k_check_auto_sleep(sc) || 1759 if (unlikely(ath9k_check_auto_sleep(sc) ||
1211 (sc->ps_flags & (PS_WAIT_FOR_BEACON | 1760 (sc->ps_flags & (PS_WAIT_FOR_BEACON |
1212 PS_WAIT_FOR_CAB | 1761 PS_WAIT_FOR_CAB |
1213 PS_WAIT_FOR_PSPOLL_DATA)))) 1762 PS_WAIT_FOR_PSPOLL_DATA))))
1214 ath_rx_ps(sc, skb); 1763 ath_rx_ps(sc, skb);
1764 spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
1765
1766 if (ah->caps.hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB)
1767 ath_ant_comb_scan(sc, &rs);
1215 1768
1216 ath_rx_send_to_mac80211(hw, sc, skb, rxs); 1769 ath_rx_send_to_mac80211(hw, sc, skb, rxs);
1217 1770
diff --git a/drivers/net/wireless/ath/ath9k/reg.h b/drivers/net/wireless/ath/ath9k/reg.h
index d01c4adab8d6..42976b0a01c1 100644
--- a/drivers/net/wireless/ath/ath9k/reg.h
+++ b/drivers/net/wireless/ath/ath9k/reg.h
@@ -107,12 +107,6 @@
107#define AR_RXCFG_DMASZ_256B 6 107#define AR_RXCFG_DMASZ_256B 6
108#define AR_RXCFG_DMASZ_512B 7 108#define AR_RXCFG_DMASZ_512B 7
109 109
110#define AR_MIBC 0x0040
111#define AR_MIBC_COW 0x00000001
112#define AR_MIBC_FMC 0x00000002
113#define AR_MIBC_CMC 0x00000004
114#define AR_MIBC_MCS 0x00000008
115
116#define AR_TOPS 0x0044 110#define AR_TOPS 0x0044
117#define AR_TOPS_MASK 0x0000FFFF 111#define AR_TOPS_MASK 0x0000FFFF
118 112
@@ -819,49 +813,23 @@
819 ((_ah)->hw_version.macRev == AR_SREV_REVISION_9160_11)) 813 ((_ah)->hw_version.macRev == AR_SREV_REVISION_9160_11))
820#define AR_SREV_9280(_ah) \ 814#define AR_SREV_9280(_ah) \
821 (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9280)) 815 (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9280))
822#define AR_SREV_9280_10_OR_LATER(_ah) \ 816#define AR_SREV_9280_20_OR_LATER(_ah) \
823 (((_ah)->hw_version.macVersion >= AR_SREV_VERSION_9280)) 817 (((_ah)->hw_version.macVersion >= AR_SREV_VERSION_9280))
824#define AR_SREV_9280_20(_ah) \ 818#define AR_SREV_9280_20(_ah) \
825 (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9280) && \ 819 (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9280))
826 ((_ah)->hw_version.macRev >= AR_SREV_REVISION_9280_20))
827#define AR_SREV_9280_20_OR_LATER(_ah) \
828 (((_ah)->hw_version.macVersion > AR_SREV_VERSION_9280) || \
829 (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9280) && \
830 ((_ah)->hw_version.macRev >= AR_SREV_REVISION_9280_20)))
831 820
832#define AR_SREV_9285(_ah) \ 821#define AR_SREV_9285(_ah) \
833 (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9285)) 822 (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9285))
834#define AR_SREV_9285_10_OR_LATER(_ah) \
835 (((_ah)->hw_version.macVersion >= AR_SREV_VERSION_9285))
836#define AR_SREV_9285_11(_ah) \
837 (AR_SREV_9285(ah) && \
838 ((_ah)->hw_version.macRev == AR_SREV_REVISION_9285_11))
839#define AR_SREV_9285_11_OR_LATER(_ah) \
840 (((_ah)->hw_version.macVersion > AR_SREV_VERSION_9285) || \
841 (AR_SREV_9285(ah) && ((_ah)->hw_version.macRev >= \
842 AR_SREV_REVISION_9285_11)))
843#define AR_SREV_9285_12(_ah) \
844 (AR_SREV_9285(ah) && \
845 ((_ah)->hw_version.macRev == AR_SREV_REVISION_9285_12))
846#define AR_SREV_9285_12_OR_LATER(_ah) \ 823#define AR_SREV_9285_12_OR_LATER(_ah) \
847 (((_ah)->hw_version.macVersion > AR_SREV_VERSION_9285) || \ 824 (((_ah)->hw_version.macVersion >= AR_SREV_VERSION_9285))
848 (AR_SREV_9285(ah) && ((_ah)->hw_version.macRev >= \
849 AR_SREV_REVISION_9285_12)))
850 825
851#define AR_SREV_9287(_ah) \ 826#define AR_SREV_9287(_ah) \
852 (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9287)) 827 (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9287))
853#define AR_SREV_9287_10_OR_LATER(_ah) \ 828#define AR_SREV_9287_11_OR_LATER(_ah) \
854 (((_ah)->hw_version.macVersion >= AR_SREV_VERSION_9287)) 829 (((_ah)->hw_version.macVersion >= AR_SREV_VERSION_9287))
855#define AR_SREV_9287_10(_ah) \
856 (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9287) && \
857 ((_ah)->hw_version.macRev == AR_SREV_REVISION_9287_10))
858#define AR_SREV_9287_11(_ah) \ 830#define AR_SREV_9287_11(_ah) \
859 (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9287) && \ 831 (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9287) && \
860 ((_ah)->hw_version.macRev == AR_SREV_REVISION_9287_11)) 832 ((_ah)->hw_version.macRev == AR_SREV_REVISION_9287_11))
861#define AR_SREV_9287_11_OR_LATER(_ah) \
862 (((_ah)->hw_version.macVersion > AR_SREV_VERSION_9287) || \
863 (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9287) && \
864 ((_ah)->hw_version.macRev >= AR_SREV_REVISION_9287_11)))
865#define AR_SREV_9287_12(_ah) \ 833#define AR_SREV_9287_12(_ah) \
866 (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9287) && \ 834 (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9287) && \
867 ((_ah)->hw_version.macRev == AR_SREV_REVISION_9287_12)) 835 ((_ah)->hw_version.macRev == AR_SREV_REVISION_9287_12))
@@ -885,9 +853,6 @@
885 853
886#define AR_SREV_9300(_ah) \ 854#define AR_SREV_9300(_ah) \
887 (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9300)) 855 (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9300))
888#define AR_SREV_9300_20(_ah) \
889 (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9300) && \
890 ((_ah)->hw_version.macRev == AR_SREV_REVISION_9300_20))
891#define AR_SREV_9300_20_OR_LATER(_ah) \ 856#define AR_SREV_9300_20_OR_LATER(_ah) \
892 (((_ah)->hw_version.macVersion > AR_SREV_VERSION_9300) || \ 857 (((_ah)->hw_version.macVersion > AR_SREV_VERSION_9300) || \
893 (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9300) && \ 858 (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9300) && \
@@ -1550,11 +1515,6 @@ enum {
1550#define AR_TPC_CHIRP 0x003f0000 1515#define AR_TPC_CHIRP 0x003f0000
1551#define AR_TPC_CHIRP_S 0x16 1516#define AR_TPC_CHIRP_S 0x16
1552 1517
1553#define AR_TFCNT 0x80ec
1554#define AR_RFCNT 0x80f0
1555#define AR_RCCNT 0x80f4
1556#define AR_CCCNT 0x80f8
1557
1558#define AR_QUIET1 0x80fc 1518#define AR_QUIET1 0x80fc
1559#define AR_QUIET1_NEXT_QUIET_S 0 1519#define AR_QUIET1_NEXT_QUIET_S 0
1560#define AR_QUIET1_NEXT_QUIET_M 0x0000ffff 1520#define AR_QUIET1_NEXT_QUIET_M 0x0000ffff
diff --git a/drivers/net/wireless/ath/ath9k/virtual.c b/drivers/net/wireless/ath/ath9k/virtual.c
index fd20241f57d8..ec7cf5ee56bc 100644
--- a/drivers/net/wireless/ath/ath9k/virtual.c
+++ b/drivers/net/wireless/ath/ath9k/virtual.c
@@ -19,45 +19,36 @@
19#include "ath9k.h" 19#include "ath9k.h"
20 20
21struct ath9k_vif_iter_data { 21struct ath9k_vif_iter_data {
22 int count; 22 const u8 *hw_macaddr;
23 u8 *addr; 23 u8 mask[ETH_ALEN];
24}; 24};
25 25
26static void ath9k_vif_iter(void *data, u8 *mac, struct ieee80211_vif *vif) 26static void ath9k_vif_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
27{ 27{
28 struct ath9k_vif_iter_data *iter_data = data; 28 struct ath9k_vif_iter_data *iter_data = data;
29 u8 *nbuf; 29 int i;
30
31 nbuf = krealloc(iter_data->addr, (iter_data->count + 1) * ETH_ALEN,
32 GFP_ATOMIC);
33 if (nbuf == NULL)
34 return;
35 30
36 memcpy(nbuf + iter_data->count * ETH_ALEN, mac, ETH_ALEN); 31 for (i = 0; i < ETH_ALEN; i++)
37 iter_data->addr = nbuf; 32 iter_data->mask[i] &= ~(iter_data->hw_macaddr[i] ^ mac[i]);
38 iter_data->count++;
39} 33}
40 34
41void ath9k_set_bssid_mask(struct ieee80211_hw *hw) 35void ath9k_set_bssid_mask(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
42{ 36{
43 struct ath_wiphy *aphy = hw->priv; 37 struct ath_wiphy *aphy = hw->priv;
44 struct ath_softc *sc = aphy->sc; 38 struct ath_softc *sc = aphy->sc;
45 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 39 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
46 struct ath9k_vif_iter_data iter_data; 40 struct ath9k_vif_iter_data iter_data;
47 int i, j; 41 int i;
48 u8 mask[ETH_ALEN];
49 42
50 /* 43 /*
51 * Add primary MAC address even if it is not in active use since it 44 * Use the hardware MAC address as reference, the hardware uses it
52 * will be configured to the hardware as the starting point and the 45 * together with the BSSID mask when matching addresses.
53 * BSSID mask will need to be changed if another address is active.
54 */ 46 */
55 iter_data.addr = kmalloc(ETH_ALEN, GFP_ATOMIC); 47 iter_data.hw_macaddr = common->macaddr;
56 if (iter_data.addr) { 48 memset(&iter_data.mask, 0xff, ETH_ALEN);
57 memcpy(iter_data.addr, common->macaddr, ETH_ALEN); 49
58 iter_data.count = 1; 50 if (vif)
59 } else 51 ath9k_vif_iter(&iter_data, vif->addr, vif);
60 iter_data.count = 0;
61 52
62 /* Get list of all active MAC addresses */ 53 /* Get list of all active MAC addresses */
63 spin_lock_bh(&sc->wiphy_lock); 54 spin_lock_bh(&sc->wiphy_lock);
@@ -71,31 +62,7 @@ void ath9k_set_bssid_mask(struct ieee80211_hw *hw)
71 } 62 }
72 spin_unlock_bh(&sc->wiphy_lock); 63 spin_unlock_bh(&sc->wiphy_lock);
73 64
74 /* Generate an address mask to cover all active addresses */ 65 memcpy(common->bssidmask, iter_data.mask, ETH_ALEN);
75 memset(mask, 0, ETH_ALEN);
76 for (i = 0; i < iter_data.count; i++) {
77 u8 *a1 = iter_data.addr + i * ETH_ALEN;
78 for (j = i + 1; j < iter_data.count; j++) {
79 u8 *a2 = iter_data.addr + j * ETH_ALEN;
80 mask[0] |= a1[0] ^ a2[0];
81 mask[1] |= a1[1] ^ a2[1];
82 mask[2] |= a1[2] ^ a2[2];
83 mask[3] |= a1[3] ^ a2[3];
84 mask[4] |= a1[4] ^ a2[4];
85 mask[5] |= a1[5] ^ a2[5];
86 }
87 }
88
89 kfree(iter_data.addr);
90
91 /* Invert the mask and configure hardware */
92 common->bssidmask[0] = ~mask[0];
93 common->bssidmask[1] = ~mask[1];
94 common->bssidmask[2] = ~mask[2];
95 common->bssidmask[3] = ~mask[3];
96 common->bssidmask[4] = ~mask[4];
97 common->bssidmask[5] = ~mask[5];
98
99 ath_hw_setbssidmask(common); 66 ath_hw_setbssidmask(common);
100} 67}
101 68
diff --git a/drivers/net/wireless/ath/ath9k/wmi.c b/drivers/net/wireless/ath/ath9k/wmi.c
index 45fe9cac7971..93a8bda09c25 100644
--- a/drivers/net/wireless/ath/ath9k/wmi.c
+++ b/drivers/net/wireless/ath/ath9k/wmi.c
@@ -124,55 +124,11 @@ void ath9k_wmi_tasklet(unsigned long data)
124{ 124{
125 struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *)data; 125 struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *)data;
126 struct ath_common *common = ath9k_hw_common(priv->ah); 126 struct ath_common *common = ath9k_hw_common(priv->ah);
127 struct wmi_cmd_hdr *hdr;
128 struct wmi_swba *swba_hdr;
129 enum wmi_event_id event;
130 struct sk_buff *skb;
131 void *wmi_event;
132 unsigned long flags;
133#ifdef CONFIG_ATH9K_HTC_DEBUGFS
134 __be32 txrate;
135#endif
136 127
137 spin_lock_irqsave(&priv->wmi->wmi_lock, flags); 128 ath_print(common, ATH_DBG_WMI, "SWBA Event received\n");
138 skb = priv->wmi->wmi_skb;
139 spin_unlock_irqrestore(&priv->wmi->wmi_lock, flags);
140 129
141 hdr = (struct wmi_cmd_hdr *) skb->data; 130 ath9k_htc_swba(priv, priv->wmi->beacon_pending);
142 event = be16_to_cpu(hdr->command_id);
143 wmi_event = skb_pull(skb, sizeof(struct wmi_cmd_hdr));
144 131
145 ath_print(common, ATH_DBG_WMI,
146 "WMI Event: 0x%x\n", event);
147
148 switch (event) {
149 case WMI_TGT_RDY_EVENTID:
150 break;
151 case WMI_SWBA_EVENTID:
152 swba_hdr = (struct wmi_swba *) wmi_event;
153 ath9k_htc_swba(priv, swba_hdr->beacon_pending);
154 break;
155 case WMI_FATAL_EVENTID:
156 break;
157 case WMI_TXTO_EVENTID:
158 break;
159 case WMI_BMISS_EVENTID:
160 break;
161 case WMI_WLAN_TXCOMP_EVENTID:
162 break;
163 case WMI_DELBA_EVENTID:
164 break;
165 case WMI_TXRATE_EVENTID:
166#ifdef CONFIG_ATH9K_HTC_DEBUGFS
167 txrate = ((struct wmi_event_txrate *)wmi_event)->txrate;
168 priv->debug.txrate = be32_to_cpu(txrate);
169#endif
170 break;
171 default:
172 break;
173 }
174
175 kfree_skb(skb);
176} 132}
177 133
178static void ath9k_wmi_rsp_callback(struct wmi *wmi, struct sk_buff *skb) 134static void ath9k_wmi_rsp_callback(struct wmi *wmi, struct sk_buff *skb)
@@ -191,6 +147,10 @@ static void ath9k_wmi_ctrl_rx(void *priv, struct sk_buff *skb,
191 struct wmi *wmi = (struct wmi *) priv; 147 struct wmi *wmi = (struct wmi *) priv;
192 struct wmi_cmd_hdr *hdr; 148 struct wmi_cmd_hdr *hdr;
193 u16 cmd_id; 149 u16 cmd_id;
150 void *wmi_event;
151#ifdef CONFIG_ATH9K_HTC_DEBUGFS
152 __be32 txrate;
153#endif
194 154
195 if (unlikely(wmi->stopped)) 155 if (unlikely(wmi->stopped))
196 goto free_skb; 156 goto free_skb;
@@ -199,10 +159,22 @@ static void ath9k_wmi_ctrl_rx(void *priv, struct sk_buff *skb,
199 cmd_id = be16_to_cpu(hdr->command_id); 159 cmd_id = be16_to_cpu(hdr->command_id);
200 160
201 if (cmd_id & 0x1000) { 161 if (cmd_id & 0x1000) {
202 spin_lock(&wmi->wmi_lock); 162 wmi_event = skb_pull(skb, sizeof(struct wmi_cmd_hdr));
203 wmi->wmi_skb = skb; 163 switch (cmd_id) {
204 spin_unlock(&wmi->wmi_lock); 164 case WMI_SWBA_EVENTID:
205 tasklet_schedule(&wmi->drv_priv->wmi_tasklet); 165 wmi->beacon_pending = *(u8 *)wmi_event;
166 tasklet_schedule(&wmi->drv_priv->wmi_tasklet);
167 break;
168 case WMI_TXRATE_EVENTID:
169#ifdef CONFIG_ATH9K_HTC_DEBUGFS
170 txrate = ((struct wmi_event_txrate *)wmi_event)->txrate;
171 wmi->drv_priv->debug.txrate = be32_to_cpu(txrate);
172#endif
173 break;
174 default:
175 break;
176 }
177 kfree_skb(skb);
206 return; 178 return;
207 } 179 }
208 180
diff --git a/drivers/net/wireless/ath/ath9k/wmi.h b/drivers/net/wireless/ath/ath9k/wmi.h
index a0bf857625df..ac61074af8ac 100644
--- a/drivers/net/wireless/ath/ath9k/wmi.h
+++ b/drivers/net/wireless/ath/ath9k/wmi.h
@@ -31,10 +31,6 @@ struct wmi_cmd_hdr {
31 __be16 seq_no; 31 __be16 seq_no;
32} __packed; 32} __packed;
33 33
34struct wmi_swba {
35 u8 beacon_pending;
36} __packed;
37
38enum wmi_cmd_id { 34enum wmi_cmd_id {
39 WMI_ECHO_CMDID = 0x0001, 35 WMI_ECHO_CMDID = 0x0001,
40 WMI_ACCESS_MEMORY_CMDID, 36 WMI_ACCESS_MEMORY_CMDID,
@@ -104,7 +100,7 @@ struct wmi {
104 u32 cmd_rsp_len; 100 u32 cmd_rsp_len;
105 bool stopped; 101 bool stopped;
106 102
107 struct sk_buff *wmi_skb; 103 u8 beacon_pending;
108 spinlock_t wmi_lock; 104 spinlock_t wmi_lock;
109 105
110 atomic_t mwrite_cnt; 106 atomic_t mwrite_cnt;
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index 457f07692ac7..d077186da870 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -61,6 +61,8 @@ static int ath_tx_num_badfrms(struct ath_softc *sc, struct ath_buf *bf,
61 struct ath_tx_status *ts, int txok); 61 struct ath_tx_status *ts, int txok);
62static void ath_tx_rc_status(struct ath_buf *bf, struct ath_tx_status *ts, 62static void ath_tx_rc_status(struct ath_buf *bf, struct ath_tx_status *ts,
63 int nbad, int txok, bool update_rc); 63 int nbad, int txok, bool update_rc);
64static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
65 int seqno);
64 66
65enum { 67enum {
66 MCS_HT20, 68 MCS_HT20,
@@ -143,18 +145,23 @@ static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
143 struct ath_txq *txq = &sc->tx.txq[tid->ac->qnum]; 145 struct ath_txq *txq = &sc->tx.txq[tid->ac->qnum];
144 struct ath_buf *bf; 146 struct ath_buf *bf;
145 struct list_head bf_head; 147 struct list_head bf_head;
146 INIT_LIST_HEAD(&bf_head); 148 struct ath_tx_status ts;
147 149
148 WARN_ON(!tid->paused); 150 INIT_LIST_HEAD(&bf_head);
149 151
152 memset(&ts, 0, sizeof(ts));
150 spin_lock_bh(&txq->axq_lock); 153 spin_lock_bh(&txq->axq_lock);
151 tid->paused = false;
152 154
153 while (!list_empty(&tid->buf_q)) { 155 while (!list_empty(&tid->buf_q)) {
154 bf = list_first_entry(&tid->buf_q, struct ath_buf, list); 156 bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
155 BUG_ON(bf_isretried(bf));
156 list_move_tail(&bf->list, &bf_head); 157 list_move_tail(&bf->list, &bf_head);
157 ath_tx_send_ht_normal(sc, txq, tid, &bf_head); 158
159 if (bf_isretried(bf)) {
160 ath_tx_update_baw(sc, tid, bf->bf_seqno);
161 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
162 } else {
163 ath_tx_send_ht_normal(sc, txq, tid, &bf_head);
164 }
158 } 165 }
159 166
160 spin_unlock_bh(&txq->axq_lock); 167 spin_unlock_bh(&txq->axq_lock);
@@ -168,9 +175,9 @@ static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
168 index = ATH_BA_INDEX(tid->seq_start, seqno); 175 index = ATH_BA_INDEX(tid->seq_start, seqno);
169 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1); 176 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
170 177
171 tid->tx_buf[cindex] = NULL; 178 __clear_bit(cindex, tid->tx_buf);
172 179
173 while (tid->baw_head != tid->baw_tail && !tid->tx_buf[tid->baw_head]) { 180 while (tid->baw_head != tid->baw_tail && !test_bit(tid->baw_head, tid->tx_buf)) {
174 INCR(tid->seq_start, IEEE80211_SEQ_MAX); 181 INCR(tid->seq_start, IEEE80211_SEQ_MAX);
175 INCR(tid->baw_head, ATH_TID_MAX_BUFS); 182 INCR(tid->baw_head, ATH_TID_MAX_BUFS);
176 } 183 }
@@ -186,9 +193,7 @@ static void ath_tx_addto_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
186 193
187 index = ATH_BA_INDEX(tid->seq_start, bf->bf_seqno); 194 index = ATH_BA_INDEX(tid->seq_start, bf->bf_seqno);
188 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1); 195 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
189 196 __set_bit(cindex, tid->tx_buf);
190 BUG_ON(tid->tx_buf[cindex] != NULL);
191 tid->tx_buf[cindex] = bf;
192 197
193 if (index >= ((tid->baw_tail - tid->baw_head) & 198 if (index >= ((tid->baw_tail - tid->baw_head) &
194 (ATH_TID_MAX_BUFS - 1))) { 199 (ATH_TID_MAX_BUFS - 1))) {
@@ -289,7 +294,6 @@ static struct ath_buf* ath_clone_txbuf(struct ath_softc *sc, struct ath_buf *bf)
289 tbf->bf_buf_addr = bf->bf_buf_addr; 294 tbf->bf_buf_addr = bf->bf_buf_addr;
290 memcpy(tbf->bf_desc, bf->bf_desc, sc->sc_ah->caps.tx_desc_len); 295 memcpy(tbf->bf_desc, bf->bf_desc, sc->sc_ah->caps.tx_desc_len);
291 tbf->bf_state = bf->bf_state; 296 tbf->bf_state = bf->bf_state;
292 tbf->bf_dmacontext = bf->bf_dmacontext;
293 297
294 return tbf; 298 return tbf;
295} 299}
@@ -312,6 +316,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
312 int isaggr, txfail, txpending, sendbar = 0, needreset = 0, nbad = 0; 316 int isaggr, txfail, txpending, sendbar = 0, needreset = 0, nbad = 0;
313 bool rc_update = true; 317 bool rc_update = true;
314 struct ieee80211_tx_rate rates[4]; 318 struct ieee80211_tx_rate rates[4];
319 int nframes;
315 320
316 skb = bf->bf_mpdu; 321 skb = bf->bf_mpdu;
317 hdr = (struct ieee80211_hdr *)skb->data; 322 hdr = (struct ieee80211_hdr *)skb->data;
@@ -320,11 +325,11 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
320 hw = bf->aphy->hw; 325 hw = bf->aphy->hw;
321 326
322 memcpy(rates, tx_info->control.rates, sizeof(rates)); 327 memcpy(rates, tx_info->control.rates, sizeof(rates));
328 nframes = bf->bf_nframes;
323 329
324 rcu_read_lock(); 330 rcu_read_lock();
325 331
326 /* XXX: use ieee80211_find_sta! */ 332 sta = ieee80211_find_sta_by_ifaddr(hw, hdr->addr1, hdr->addr2);
327 sta = ieee80211_find_sta_by_hw(hw, hdr->addr1);
328 if (!sta) { 333 if (!sta) {
329 rcu_read_unlock(); 334 rcu_read_unlock();
330 335
@@ -337,7 +342,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
337 !bf->bf_stale || bf_next != NULL) 342 !bf->bf_stale || bf_next != NULL)
338 list_move_tail(&bf->list, &bf_head); 343 list_move_tail(&bf->list, &bf_head);
339 344
340 ath_tx_rc_status(bf, ts, 0, 0, false); 345 ath_tx_rc_status(bf, ts, 1, 0, false);
341 ath_tx_complete_buf(sc, bf, txq, &bf_head, ts, 346 ath_tx_complete_buf(sc, bf, txq, &bf_head, ts,
342 0, 0); 347 0, 0);
343 348
@@ -431,7 +436,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
431 list_move_tail(&bf->list, &bf_head); 436 list_move_tail(&bf->list, &bf_head);
432 } 437 }
433 438
434 if (!txpending) { 439 if (!txpending || (tid->state & AGGR_CLEANUP)) {
435 /* 440 /*
436 * complete the acked-ones/xretried ones; update 441 * complete the acked-ones/xretried ones; update
437 * block-ack window 442 * block-ack window
@@ -442,6 +447,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
442 447
443 if (rc_update && (acked_cnt == 1 || txfail_cnt == 1)) { 448 if (rc_update && (acked_cnt == 1 || txfail_cnt == 1)) {
444 memcpy(tx_info->control.rates, rates, sizeof(rates)); 449 memcpy(tx_info->control.rates, rates, sizeof(rates));
450 bf->bf_nframes = nframes;
445 ath_tx_rc_status(bf, ts, nbad, txok, true); 451 ath_tx_rc_status(bf, ts, nbad, txok, true);
446 rc_update = false; 452 rc_update = false;
447 } else { 453 } else {
@@ -510,15 +516,12 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
510 } 516 }
511 517
512 if (tid->state & AGGR_CLEANUP) { 518 if (tid->state & AGGR_CLEANUP) {
519 ath_tx_flush_tid(sc, tid);
520
513 if (tid->baw_head == tid->baw_tail) { 521 if (tid->baw_head == tid->baw_tail) {
514 tid->state &= ~AGGR_ADDBA_COMPLETE; 522 tid->state &= ~AGGR_ADDBA_COMPLETE;
515 tid->state &= ~AGGR_CLEANUP; 523 tid->state &= ~AGGR_CLEANUP;
516
517 /* send buffered frames as singles */
518 ath_tx_flush_tid(sc, tid);
519 } 524 }
520 rcu_read_unlock();
521 return;
522 } 525 }
523 526
524 rcu_read_unlock(); 527 rcu_read_unlock();
@@ -785,17 +788,23 @@ static void ath_tx_sched_aggr(struct ath_softc *sc, struct ath_txq *txq,
785 status != ATH_AGGR_BAW_CLOSED); 788 status != ATH_AGGR_BAW_CLOSED);
786} 789}
787 790
788void ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta, 791int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
789 u16 tid, u16 *ssn) 792 u16 tid, u16 *ssn)
790{ 793{
791 struct ath_atx_tid *txtid; 794 struct ath_atx_tid *txtid;
792 struct ath_node *an; 795 struct ath_node *an;
793 796
794 an = (struct ath_node *)sta->drv_priv; 797 an = (struct ath_node *)sta->drv_priv;
795 txtid = ATH_AN_2_TID(an, tid); 798 txtid = ATH_AN_2_TID(an, tid);
799
800 if (txtid->state & (AGGR_CLEANUP | AGGR_ADDBA_COMPLETE))
801 return -EAGAIN;
802
796 txtid->state |= AGGR_ADDBA_PROGRESS; 803 txtid->state |= AGGR_ADDBA_PROGRESS;
797 txtid->paused = true; 804 txtid->paused = true;
798 *ssn = txtid->seq_start; 805 *ssn = txtid->seq_start;
806
807 return 0;
799} 808}
800 809
801void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid) 810void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
@@ -803,12 +812,6 @@ void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
803 struct ath_node *an = (struct ath_node *)sta->drv_priv; 812 struct ath_node *an = (struct ath_node *)sta->drv_priv;
804 struct ath_atx_tid *txtid = ATH_AN_2_TID(an, tid); 813 struct ath_atx_tid *txtid = ATH_AN_2_TID(an, tid);
805 struct ath_txq *txq = &sc->tx.txq[txtid->ac->qnum]; 814 struct ath_txq *txq = &sc->tx.txq[txtid->ac->qnum];
806 struct ath_tx_status ts;
807 struct ath_buf *bf;
808 struct list_head bf_head;
809
810 memset(&ts, 0, sizeof(ts));
811 INIT_LIST_HEAD(&bf_head);
812 815
813 if (txtid->state & AGGR_CLEANUP) 816 if (txtid->state & AGGR_CLEANUP)
814 return; 817 return;
@@ -818,31 +821,22 @@ void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
818 return; 821 return;
819 } 822 }
820 823
821 /* drop all software retried frames and mark this TID */
822 spin_lock_bh(&txq->axq_lock); 824 spin_lock_bh(&txq->axq_lock);
823 txtid->paused = true; 825 txtid->paused = true;
824 while (!list_empty(&txtid->buf_q)) {
825 bf = list_first_entry(&txtid->buf_q, struct ath_buf, list);
826 if (!bf_isretried(bf)) {
827 /*
828 * NB: it's based on the assumption that
829 * software retried frame will always stay
830 * at the head of software queue.
831 */
832 break;
833 }
834 list_move_tail(&bf->list, &bf_head);
835 ath_tx_update_baw(sc, txtid, bf->bf_seqno);
836 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
837 }
838 spin_unlock_bh(&txq->axq_lock);
839 826
840 if (txtid->baw_head != txtid->baw_tail) { 827 /*
828 * If frames are still being transmitted for this TID, they will be
829 * cleaned up during tx completion. To prevent race conditions, this
830 * TID can only be reused after all in-progress subframes have been
831 * completed.
832 */
833 if (txtid->baw_head != txtid->baw_tail)
841 txtid->state |= AGGR_CLEANUP; 834 txtid->state |= AGGR_CLEANUP;
842 } else { 835 else
843 txtid->state &= ~AGGR_ADDBA_COMPLETE; 836 txtid->state &= ~AGGR_ADDBA_COMPLETE;
844 ath_tx_flush_tid(sc, txtid); 837 spin_unlock_bh(&txq->axq_lock);
845 } 838
839 ath_tx_flush_tid(sc, txtid);
846} 840}
847 841
848void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid) 842void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
@@ -862,20 +856,6 @@ void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid
862 } 856 }
863} 857}
864 858
865bool ath_tx_aggr_check(struct ath_softc *sc, struct ath_node *an, u8 tidno)
866{
867 struct ath_atx_tid *txtid;
868
869 if (!(sc->sc_flags & SC_OP_TXAGGR))
870 return false;
871
872 txtid = ATH_AN_2_TID(an, tidno);
873
874 if (!(txtid->state & (AGGR_ADDBA_COMPLETE | AGGR_ADDBA_PROGRESS)))
875 return true;
876 return false;
877}
878
879/********************/ 859/********************/
880/* Queue Management */ 860/* Queue Management */
881/********************/ 861/********************/
@@ -1659,24 +1639,16 @@ static int ath_tx_setup_buffer(struct ieee80211_hw *hw, struct ath_buf *bf,
1659 1639
1660 bf->bf_mpdu = skb; 1640 bf->bf_mpdu = skb;
1661 1641
1662 bf->bf_dmacontext = dma_map_single(sc->dev, skb->data, 1642 bf->bf_buf_addr = dma_map_single(sc->dev, skb->data,
1663 skb->len, DMA_TO_DEVICE); 1643 skb->len, DMA_TO_DEVICE);
1664 if (unlikely(dma_mapping_error(sc->dev, bf->bf_dmacontext))) { 1644 if (unlikely(dma_mapping_error(sc->dev, bf->bf_buf_addr))) {
1665 bf->bf_mpdu = NULL; 1645 bf->bf_mpdu = NULL;
1646 bf->bf_buf_addr = 0;
1666 ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_FATAL, 1647 ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_FATAL,
1667 "dma_mapping_error() on TX\n"); 1648 "dma_mapping_error() on TX\n");
1668 return -ENOMEM; 1649 return -ENOMEM;
1669 } 1650 }
1670 1651
1671 bf->bf_buf_addr = bf->bf_dmacontext;
1672
1673 /* tag if this is a nullfunc frame to enable PS when AP acks it */
1674 if (ieee80211_is_nullfunc(fc) && ieee80211_has_pm(fc)) {
1675 bf->bf_isnullfunc = true;
1676 sc->ps_flags &= ~PS_NULLFUNC_COMPLETED;
1677 } else
1678 bf->bf_isnullfunc = false;
1679
1680 bf->bf_tx_aborted = false; 1652 bf->bf_tx_aborted = false;
1681 1653
1682 return 0; 1654 return 0;
@@ -1940,7 +1912,8 @@ static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
1940 tx_flags |= ATH_TX_XRETRY; 1912 tx_flags |= ATH_TX_XRETRY;
1941 } 1913 }
1942 1914
1943 dma_unmap_single(sc->dev, bf->bf_dmacontext, skb->len, DMA_TO_DEVICE); 1915 dma_unmap_single(sc->dev, bf->bf_buf_addr, skb->len, DMA_TO_DEVICE);
1916 bf->bf_buf_addr = 0;
1944 1917
1945 if (bf->bf_state.bfs_paprd) { 1918 if (bf->bf_state.bfs_paprd) {
1946 if (time_after(jiffies, 1919 if (time_after(jiffies,
@@ -1950,9 +1923,13 @@ static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
1950 else 1923 else
1951 complete(&sc->paprd_complete); 1924 complete(&sc->paprd_complete);
1952 } else { 1925 } else {
1953 ath_tx_complete(sc, skb, bf->aphy, tx_flags);
1954 ath_debug_stat_tx(sc, txq, bf, ts); 1926 ath_debug_stat_tx(sc, txq, bf, ts);
1927 ath_tx_complete(sc, skb, bf->aphy, tx_flags);
1955 } 1928 }
1929 /* At this point, skb (bf->bf_mpdu) is consumed...make sure we don't
1930 * accidentally reference it later.
1931 */
1932 bf->bf_mpdu = NULL;
1956 1933
1957 /* 1934 /*
1958 * Return the list of ath_buf of this mpdu to free queue 1935 * Return the list of ath_buf of this mpdu to free queue
@@ -2008,9 +1985,15 @@ static void ath_tx_rc_status(struct ath_buf *bf, struct ath_tx_status *ts,
2008 1985
2009 if (ts->ts_status & ATH9K_TXERR_FILT) 1986 if (ts->ts_status & ATH9K_TXERR_FILT)
2010 tx_info->flags |= IEEE80211_TX_STAT_TX_FILTERED; 1987 tx_info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
2011 if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) && update_rc) 1988 if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) && update_rc) {
2012 tx_info->flags |= IEEE80211_TX_STAT_AMPDU; 1989 tx_info->flags |= IEEE80211_TX_STAT_AMPDU;
2013 1990
1991 BUG_ON(nbad > bf->bf_nframes);
1992
1993 tx_info->status.ampdu_len = bf->bf_nframes;
1994 tx_info->status.ampdu_ack_len = bf->bf_nframes - nbad;
1995 }
1996
2014 if ((ts->ts_status & ATH9K_TXERR_FILT) == 0 && 1997 if ((ts->ts_status & ATH9K_TXERR_FILT) == 0 &&
2015 (bf->bf_flags & ATH9K_TXDESC_NOACK) == 0 && update_rc) { 1998 (bf->bf_flags & ATH9K_TXDESC_NOACK) == 0 && update_rc) {
2016 if (ieee80211_is_data(hdr->frame_control)) { 1999 if (ieee80211_is_data(hdr->frame_control)) {
@@ -2020,8 +2003,6 @@ static void ath_tx_rc_status(struct ath_buf *bf, struct ath_tx_status *ts,
2020 if ((ts->ts_status & ATH9K_TXERR_XRETRY) || 2003 if ((ts->ts_status & ATH9K_TXERR_XRETRY) ||
2021 (ts->ts_status & ATH9K_TXERR_FIFO)) 2004 (ts->ts_status & ATH9K_TXERR_FIFO))
2022 tx_info->pad[0] |= ATH_TX_INFO_XRETRY; 2005 tx_info->pad[0] |= ATH_TX_INFO_XRETRY;
2023 tx_info->status.ampdu_len = bf->bf_nframes;
2024 tx_info->status.ampdu_ack_len = bf->bf_nframes - nbad;
2025 } 2006 }
2026 } 2007 }
2027 2008
@@ -2104,18 +2085,6 @@ static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
2104 } 2085 }
2105 2086
2106 /* 2087 /*
2107 * We now know the nullfunc frame has been ACKed so we
2108 * can disable RX.
2109 */
2110 if (bf->bf_isnullfunc &&
2111 (ts.ts_status & ATH9K_TX_ACKED)) {
2112 if ((sc->ps_flags & PS_ENABLED))
2113 ath9k_enable_ps(sc);
2114 else
2115 sc->ps_flags |= PS_NULLFUNC_COMPLETED;
2116 }
2117
2118 /*
2119 * Remove ath_buf's of the same transmit unit from txq, 2088 * Remove ath_buf's of the same transmit unit from txq,
2120 * however leave the last descriptor back as the holding 2089 * however leave the last descriptor back as the holding
2121 * descriptor for hw. 2090 * descriptor for hw.
@@ -2143,7 +2112,7 @@ static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
2143 */ 2112 */
2144 if (ts.ts_status & ATH9K_TXERR_XRETRY) 2113 if (ts.ts_status & ATH9K_TXERR_XRETRY)
2145 bf->bf_state.bf_type |= BUF_XRETRY; 2114 bf->bf_state.bf_type |= BUF_XRETRY;
2146 ath_tx_rc_status(bf, &ts, 0, txok, true); 2115 ath_tx_rc_status(bf, &ts, txok ? 0 : 1, txok, true);
2147 } 2116 }
2148 2117
2149 if (bf_isampdu(bf)) 2118 if (bf_isampdu(bf))
@@ -2258,21 +2227,10 @@ void ath_tx_edma_tasklet(struct ath_softc *sc)
2258 2227
2259 txok = !(txs.ts_status & ATH9K_TXERR_MASK); 2228 txok = !(txs.ts_status & ATH9K_TXERR_MASK);
2260 2229
2261 /*
2262 * Make sure null func frame is acked before configuring
2263 * hw into ps mode.
2264 */
2265 if (bf->bf_isnullfunc && txok) {
2266 if ((sc->ps_flags & PS_ENABLED))
2267 ath9k_enable_ps(sc);
2268 else
2269 sc->ps_flags |= PS_NULLFUNC_COMPLETED;
2270 }
2271
2272 if (!bf_isampdu(bf)) { 2230 if (!bf_isampdu(bf)) {
2273 if (txs.ts_status & ATH9K_TXERR_XRETRY) 2231 if (txs.ts_status & ATH9K_TXERR_XRETRY)
2274 bf->bf_state.bf_type |= BUF_XRETRY; 2232 bf->bf_state.bf_type |= BUF_XRETRY;
2275 ath_tx_rc_status(bf, &txs, 0, txok, true); 2233 ath_tx_rc_status(bf, &txs, txok ? 0 : 1, txok, true);
2276 } 2234 }
2277 2235
2278 if (bf_isampdu(bf)) 2236 if (bf_isampdu(bf))
diff --git a/drivers/net/wireless/ath/carl9170/Kconfig b/drivers/net/wireless/ath/carl9170/Kconfig
new file mode 100644
index 000000000000..2d1b821b440d
--- /dev/null
+++ b/drivers/net/wireless/ath/carl9170/Kconfig
@@ -0,0 +1,41 @@
1config CARL9170
2 tristate "Linux Community AR9170 802.11n USB support"
3 depends on USB && MAC80211 && EXPERIMENTAL
4 select FW_LOADER
5 select CRC32
6 help
7 This is another driver for the Atheros "otus" 802.11n USB devices.
8
9 This driver provides more features than the original,
10 but it needs a special firmware (carl9170-1.fw) to do that.
11
12 The firmware can be downloaded from our wiki here:
13 <http://wireless.kernel.org/en/users/Drivers/carl9170>
14
15 If you choose to build a module, it'll be called carl9170.
16
17config CARL9170_LEDS
18 bool "SoftLED Support"
19 depends on CARL9170
20 select MAC80211_LEDS
21 select LEDS_CLASS
22 select NEW_LEDS
23 default y
24 help
25 This option is necessary, if you want your device' LEDs to blink
26
27 Say Y, unless you need the LEDs for firmware debugging.
28
29config CARL9170_DEBUGFS
30 bool "DebugFS Support"
31 depends on CARL9170 && DEBUG_FS && MAC80211_DEBUGFS
32 default n
33 help
34 Export several driver and device internals to user space.
35
36 Say N.
37
38config CARL9170_WPC
39 bool
40 depends on CARL9170 && (INPUT = y || INPUT = CARL9170)
41 default y
diff --git a/drivers/net/wireless/ath/carl9170/Makefile b/drivers/net/wireless/ath/carl9170/Makefile
new file mode 100644
index 000000000000..f64ed76af8ad
--- /dev/null
+++ b/drivers/net/wireless/ath/carl9170/Makefile
@@ -0,0 +1,4 @@
1carl9170-objs := main.o usb.o cmd.o mac.o phy.o led.o fw.o tx.o rx.o
2carl9170-$(CONFIG_CARL9170_DEBUGFS) += debug.o
3
4obj-$(CONFIG_CARL9170) += carl9170.o
diff --git a/drivers/net/wireless/ath/carl9170/carl9170.h b/drivers/net/wireless/ath/carl9170/carl9170.h
new file mode 100644
index 000000000000..6cf0c9ef47aa
--- /dev/null
+++ b/drivers/net/wireless/ath/carl9170/carl9170.h
@@ -0,0 +1,628 @@
1/*
2 * Atheros CARL9170 driver
3 *
4 * Driver specific definitions
5 *
6 * Copyright 2008, Johannes Berg <johannes@sipsolutions.net>
7 * Copyright 2009, 2010, Christian Lamparter <chunkeey@googlemail.com>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; see the file COPYING. If not, see
21 * http://www.gnu.org/licenses/.
22 *
23 * This file incorporates work covered by the following copyright and
24 * permission notice:
25 * Copyright (c) 2007-2008 Atheros Communications, Inc.
26 *
27 * Permission to use, copy, modify, and/or distribute this software for any
28 * purpose with or without fee is hereby granted, provided that the above
29 * copyright notice and this permission notice appear in all copies.
30 *
31 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
32 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
33 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
34 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
35 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
36 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
37 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
38 */
39#ifndef __CARL9170_H
40#define __CARL9170_H
41
42#include <linux/kernel.h>
43#include <linux/firmware.h>
44#include <linux/completion.h>
45#include <linux/spinlock.h>
46#include <net/cfg80211.h>
47#include <net/mac80211.h>
48#include <linux/usb.h>
49#ifdef CONFIG_CARL9170_LEDS
50#include <linux/leds.h>
51#endif /* CONFIG_CARL170_LEDS */
52#ifdef CONFIG_CARL9170_WPC
53#include <linux/input.h>
54#endif /* CONFIG_CARL9170_WPC */
55#include "eeprom.h"
56#include "wlan.h"
57#include "hw.h"
58#include "fwdesc.h"
59#include "fwcmd.h"
60#include "../regd.h"
61
62#ifdef CONFIG_CARL9170_DEBUGFS
63#include "debug.h"
64#endif /* CONFIG_CARL9170_DEBUGFS */
65
66#define CARL9170FW_NAME "carl9170-1.fw"
67
68#define PAYLOAD_MAX (CARL9170_MAX_CMD_LEN / 4 - 1)
69
70enum carl9170_rf_init_mode {
71 CARL9170_RFI_NONE,
72 CARL9170_RFI_WARM,
73 CARL9170_RFI_COLD,
74};
75
76#define CARL9170_MAX_RX_BUFFER_SIZE 8192
77
78enum carl9170_device_state {
79 CARL9170_UNKNOWN_STATE,
80 CARL9170_STOPPED,
81 CARL9170_IDLE,
82 CARL9170_STARTED,
83};
84
85#define CARL9170_NUM_TID 16
86#define WME_BA_BMP_SIZE 64
87#define CARL9170_TX_USER_RATE_TRIES 3
88
89#define WME_AC_BE 2
90#define WME_AC_BK 3
91#define WME_AC_VI 1
92#define WME_AC_VO 0
93
94#define TID_TO_WME_AC(_tid) \
95 ((((_tid) == 0) || ((_tid) == 3)) ? WME_AC_BE : \
96 (((_tid) == 1) || ((_tid) == 2)) ? WME_AC_BK : \
97 (((_tid) == 4) || ((_tid) == 5)) ? WME_AC_VI : \
98 WME_AC_VO)
99
100#define SEQ_DIFF(_start, _seq) \
101 (((_start) - (_seq)) & 0x0fff)
102#define SEQ_PREV(_seq) \
103 (((_seq) - 1) & 0x0fff)
104#define SEQ_NEXT(_seq) \
105 (((_seq) + 1) & 0x0fff)
106#define BAW_WITHIN(_start, _bawsz, _seqno) \
107 ((((_seqno) - (_start)) & 0xfff) < (_bawsz))
108
109enum carl9170_tid_state {
110 CARL9170_TID_STATE_INVALID,
111 CARL9170_TID_STATE_KILLED,
112 CARL9170_TID_STATE_SHUTDOWN,
113 CARL9170_TID_STATE_SUSPEND,
114 CARL9170_TID_STATE_PROGRESS,
115 CARL9170_TID_STATE_IDLE,
116 CARL9170_TID_STATE_XMIT,
117};
118
119#define CARL9170_BAW_BITS (2 * WME_BA_BMP_SIZE)
120#define CARL9170_BAW_SIZE (BITS_TO_LONGS(CARL9170_BAW_BITS))
121#define CARL9170_BAW_LEN (DIV_ROUND_UP(CARL9170_BAW_BITS, BITS_PER_BYTE))
122
123struct carl9170_sta_tid {
124 /* must be the first entry! */
125 struct list_head list;
126
127 /* temporary list for RCU unlink procedure */
128 struct list_head tmp_list;
129
130 /* lock for the following data structures */
131 spinlock_t lock;
132
133 unsigned int counter;
134 enum carl9170_tid_state state;
135 u8 tid; /* TID number ( 0 - 15 ) */
136 u16 max; /* max. AMPDU size */
137
138 u16 snx; /* awaiting _next_ frame */
139 u16 hsn; /* highest _queued_ sequence */
140 u16 bsn; /* base of the tx/agg bitmap */
141 unsigned long bitmap[CARL9170_BAW_SIZE];
142
143 /* Preaggregation reorder queue */
144 struct sk_buff_head queue;
145};
146
147#define CARL9170_QUEUE_TIMEOUT 256
148#define CARL9170_BUMP_QUEUE 1000
149#define CARL9170_TX_TIMEOUT 2500
150#define CARL9170_JANITOR_DELAY 128
151#define CARL9170_QUEUE_STUCK_TIMEOUT 5500
152
153#define CARL9170_NUM_TX_AGG_MAX 30
154
155/*
156 * Tradeoff between stability/latency and speed.
157 *
158 * AR9170_TXQ_DEPTH is devised by dividing the amount of available
159 * tx buffers with the size of a full ethernet frame + overhead.
160 *
161 * Naturally: The higher the limit, the faster the device CAN send.
162 * However, even a slight over-commitment at the wrong time and the
163 * hardware is doomed to send all already-queued frames at suboptimal
164 * rates. This in turn leads to an enourmous amount of unsuccessful
165 * retries => Latency goes up, whereas the throughput goes down. CRASH!
166 */
167#define CARL9170_NUM_TX_LIMIT_HARD ((AR9170_TXQ_DEPTH * 3) / 2)
168#define CARL9170_NUM_TX_LIMIT_SOFT (AR9170_TXQ_DEPTH)
169
170struct carl9170_tx_queue_stats {
171 unsigned int count;
172 unsigned int limit;
173 unsigned int len;
174};
175
176struct carl9170_vif {
177 unsigned int id;
178 struct ieee80211_vif *vif;
179};
180
181struct carl9170_vif_info {
182 struct list_head list;
183 bool active;
184 unsigned int id;
185 struct sk_buff *beacon;
186 bool enable_beacon;
187};
188
189#define AR9170_NUM_RX_URBS 16
190#define AR9170_NUM_RX_URBS_MUL 2
191#define AR9170_NUM_TX_URBS 8
192#define AR9170_NUM_RX_URBS_POOL (AR9170_NUM_RX_URBS_MUL * AR9170_NUM_RX_URBS)
193
194enum carl9170_device_features {
195 CARL9170_WPS_BUTTON = BIT(0),
196 CARL9170_ONE_LED = BIT(1),
197};
198
199#ifdef CONFIG_CARL9170_LEDS
200struct ar9170;
201
202struct carl9170_led {
203 struct ar9170 *ar;
204 struct led_classdev l;
205 char name[32];
206 unsigned int toggled;
207 bool last_state;
208 bool registered;
209};
210#endif /* CONFIG_CARL9170_LEDS */
211
212enum carl9170_restart_reasons {
213 CARL9170_RR_NO_REASON = 0,
214 CARL9170_RR_FATAL_FIRMWARE_ERROR,
215 CARL9170_RR_TOO_MANY_FIRMWARE_ERRORS,
216 CARL9170_RR_WATCHDOG,
217 CARL9170_RR_STUCK_TX,
218 CARL9170_RR_SLOW_SYSTEM,
219 CARL9170_RR_COMMAND_TIMEOUT,
220 CARL9170_RR_TOO_MANY_PHY_ERRORS,
221 CARL9170_RR_LOST_RSP,
222 CARL9170_RR_INVALID_RSP,
223 CARL9170_RR_USER_REQUEST,
224
225 __CARL9170_RR_LAST,
226};
227
228enum carl9170_erp_modes {
229 CARL9170_ERP_INVALID,
230 CARL9170_ERP_AUTO,
231 CARL9170_ERP_MAC80211,
232 CARL9170_ERP_OFF,
233 CARL9170_ERP_CTS,
234 CARL9170_ERP_RTS,
235 __CARL9170_ERP_NUM,
236};
237
238struct ar9170 {
239 struct ath_common common;
240 struct ieee80211_hw *hw;
241 struct mutex mutex;
242 enum carl9170_device_state state;
243 spinlock_t state_lock;
244 enum carl9170_restart_reasons last_reason;
245 bool registered;
246
247 /* USB */
248 struct usb_device *udev;
249 struct usb_interface *intf;
250 struct usb_anchor rx_anch;
251 struct usb_anchor rx_work;
252 struct usb_anchor rx_pool;
253 struct usb_anchor tx_wait;
254 struct usb_anchor tx_anch;
255 struct usb_anchor tx_cmd;
256 struct usb_anchor tx_err;
257 struct tasklet_struct usb_tasklet;
258 atomic_t tx_cmd_urbs;
259 atomic_t tx_anch_urbs;
260 atomic_t rx_anch_urbs;
261 atomic_t rx_work_urbs;
262 atomic_t rx_pool_urbs;
263 kernel_ulong_t features;
264
265 /* firmware settings */
266 struct completion fw_load_wait;
267 struct completion fw_boot_wait;
268 struct {
269 const struct carl9170fw_desc_head *desc;
270 const struct firmware *fw;
271 unsigned int offset;
272 unsigned int address;
273 unsigned int cmd_bufs;
274 unsigned int api_version;
275 unsigned int vif_num;
276 unsigned int err_counter;
277 unsigned int bug_counter;
278 u32 beacon_addr;
279 unsigned int beacon_max_len;
280 bool rx_stream;
281 bool tx_stream;
282 bool rx_filter;
283 unsigned int mem_blocks;
284 unsigned int mem_block_size;
285 unsigned int rx_size;
286 } fw;
287
288 /* reset / stuck frames/queue detection */
289 struct work_struct restart_work;
290 unsigned int restart_counter;
291 unsigned long queue_stop_timeout[__AR9170_NUM_TXQ];
292 unsigned long max_queue_stop_timeout[__AR9170_NUM_TXQ];
293 bool needs_full_reset;
294 atomic_t pending_restarts;
295
296 /* interface mode settings */
297 struct list_head vif_list;
298 unsigned long vif_bitmap;
299 unsigned int vifs;
300 struct carl9170_vif vif_priv[AR9170_MAX_VIRTUAL_MAC];
301
302 /* beaconing */
303 spinlock_t beacon_lock;
304 unsigned int global_pretbtt;
305 unsigned int global_beacon_int;
306 struct carl9170_vif_info *beacon_iter;
307 unsigned int beacon_enabled;
308
309 /* cryptographic engine */
310 u64 usedkeys;
311 bool rx_software_decryption;
312 bool disable_offload;
313
314 /* filter settings */
315 u64 cur_mc_hash;
316 u32 cur_filter;
317 unsigned int filter_state;
318 unsigned int rx_filter_caps;
319 bool sniffer_enabled;
320
321 /* MAC */
322 enum carl9170_erp_modes erp_mode;
323
324 /* PHY */
325 struct ieee80211_channel *channel;
326 int noise[4];
327 unsigned int chan_fail;
328 unsigned int total_chan_fail;
329 u8 heavy_clip;
330 u8 ht_settings;
331
332 /* power calibration data */
333 u8 power_5G_leg[4];
334 u8 power_2G_cck[4];
335 u8 power_2G_ofdm[4];
336 u8 power_5G_ht20[8];
337 u8 power_5G_ht40[8];
338 u8 power_2G_ht20[8];
339 u8 power_2G_ht40[8];
340
341#ifdef CONFIG_CARL9170_LEDS
342 /* LED */
343 struct delayed_work led_work;
344 struct carl9170_led leds[AR9170_NUM_LEDS];
345#endif /* CONFIG_CARL9170_LEDS */
346
347 /* qos queue settings */
348 spinlock_t tx_stats_lock;
349 struct carl9170_tx_queue_stats tx_stats[__AR9170_NUM_TXQ];
350 struct ieee80211_tx_queue_params edcf[5];
351 struct completion tx_flush;
352
353 /* CMD */
354 int cmd_seq;
355 int readlen;
356 u8 *readbuf;
357 spinlock_t cmd_lock;
358 struct completion cmd_wait;
359 union {
360 __le32 cmd_buf[PAYLOAD_MAX + 1];
361 struct carl9170_cmd cmd;
362 struct carl9170_rsp rsp;
363 };
364
365 /* statistics */
366 unsigned int tx_dropped;
367 unsigned int tx_ack_failures;
368 unsigned int tx_fcs_errors;
369 unsigned int rx_dropped;
370
371 /* EEPROM */
372 struct ar9170_eeprom eeprom;
373
374 /* tx queuing */
375 struct sk_buff_head tx_pending[__AR9170_NUM_TXQ];
376 struct sk_buff_head tx_status[__AR9170_NUM_TXQ];
377 struct delayed_work tx_janitor;
378 unsigned long tx_janitor_last_run;
379 bool tx_schedule;
380
381 /* tx ampdu */
382 struct work_struct ampdu_work;
383 spinlock_t tx_ampdu_list_lock;
384 struct carl9170_sta_tid *tx_ampdu_iter;
385 struct list_head tx_ampdu_list;
386 atomic_t tx_ampdu_upload;
387 atomic_t tx_ampdu_scheduler;
388 atomic_t tx_total_pending;
389 atomic_t tx_total_queued;
390 unsigned int tx_ampdu_list_len;
391 int current_density;
392 int current_factor;
393 bool tx_ampdu_schedule;
394
395 /* internal memory management */
396 spinlock_t mem_lock;
397 unsigned long *mem_bitmap;
398 atomic_t mem_free_blocks;
399 atomic_t mem_allocs;
400
401 /* rxstream mpdu merge */
402 struct ar9170_rx_head rx_plcp;
403 bool rx_has_plcp;
404 struct sk_buff *rx_failover;
405 int rx_failover_missing;
406
407#ifdef CONFIG_CARL9170_WPC
408 struct {
409 bool pbc_state;
410 struct input_dev *pbc;
411 char name[32];
412 char phys[32];
413 } wps;
414#endif /* CONFIG_CARL9170_WPC */
415
416#ifdef CONFIG_CARL9170_DEBUGFS
417 struct carl9170_debug debug;
418 struct dentry *debug_dir;
419#endif /* CONFIG_CARL9170_DEBUGFS */
420
421 /* PSM */
422 struct work_struct ps_work;
423 struct {
424 unsigned int dtim_counter;
425 unsigned long last_beacon;
426 unsigned long last_action;
427 unsigned long last_slept;
428 unsigned int sleep_ms;
429 unsigned int off_override;
430 bool state;
431 } ps;
432};
433
434enum carl9170_ps_off_override_reasons {
435 PS_OFF_VIF = BIT(0),
436 PS_OFF_BCN = BIT(1),
437 PS_OFF_5GHZ = BIT(2),
438};
439
440struct carl9170_ba_stats {
441 u8 ampdu_len;
442 u8 ampdu_ack_len;
443 bool clear;
444};
445
446struct carl9170_sta_info {
447 bool ht_sta;
448 unsigned int ampdu_max_len;
449 struct carl9170_sta_tid *agg[CARL9170_NUM_TID];
450 struct carl9170_ba_stats stats[CARL9170_NUM_TID];
451};
452
453struct carl9170_tx_info {
454 unsigned long timeout;
455 struct ar9170 *ar;
456 struct kref ref;
457};
458
459#define CHK_DEV_STATE(a, s) (((struct ar9170 *)a)->state >= (s))
460#define IS_INITIALIZED(a) (CHK_DEV_STATE(a, CARL9170_STOPPED))
461#define IS_ACCEPTING_CMD(a) (CHK_DEV_STATE(a, CARL9170_IDLE))
462#define IS_STARTED(a) (CHK_DEV_STATE(a, CARL9170_STARTED))
463
464static inline void __carl9170_set_state(struct ar9170 *ar,
465 enum carl9170_device_state newstate)
466{
467 ar->state = newstate;
468}
469
470static inline void carl9170_set_state(struct ar9170 *ar,
471 enum carl9170_device_state newstate)
472{
473 unsigned long flags;
474
475 spin_lock_irqsave(&ar->state_lock, flags);
476 __carl9170_set_state(ar, newstate);
477 spin_unlock_irqrestore(&ar->state_lock, flags);
478}
479
480static inline void carl9170_set_state_when(struct ar9170 *ar,
481 enum carl9170_device_state min, enum carl9170_device_state newstate)
482{
483 unsigned long flags;
484
485 spin_lock_irqsave(&ar->state_lock, flags);
486 if (CHK_DEV_STATE(ar, min))
487 __carl9170_set_state(ar, newstate);
488 spin_unlock_irqrestore(&ar->state_lock, flags);
489}
490
491/* exported interface */
492void *carl9170_alloc(size_t priv_size);
493int carl9170_register(struct ar9170 *ar);
494void carl9170_unregister(struct ar9170 *ar);
495void carl9170_free(struct ar9170 *ar);
496void carl9170_restart(struct ar9170 *ar, const enum carl9170_restart_reasons r);
497void carl9170_ps_check(struct ar9170 *ar);
498
499/* USB back-end */
500int carl9170_usb_open(struct ar9170 *ar);
501void carl9170_usb_stop(struct ar9170 *ar);
502void carl9170_usb_tx(struct ar9170 *ar, struct sk_buff *skb);
503void carl9170_usb_handle_tx_err(struct ar9170 *ar);
504int carl9170_exec_cmd(struct ar9170 *ar, const enum carl9170_cmd_oids,
505 u32 plen, void *payload, u32 rlen, void *resp);
506int __carl9170_exec_cmd(struct ar9170 *ar, struct carl9170_cmd *cmd,
507 const bool free_buf);
508int carl9170_usb_restart(struct ar9170 *ar);
509void carl9170_usb_reset(struct ar9170 *ar);
510
511/* MAC */
512int carl9170_init_mac(struct ar9170 *ar);
513int carl9170_set_qos(struct ar9170 *ar);
514int carl9170_update_multicast(struct ar9170 *ar, const u64 mc_hast);
515int carl9170_mod_virtual_mac(struct ar9170 *ar, const unsigned int id,
516 const u8 *mac);
517int carl9170_set_operating_mode(struct ar9170 *ar);
518int carl9170_set_beacon_timers(struct ar9170 *ar);
519int carl9170_set_dyn_sifs_ack(struct ar9170 *ar);
520int carl9170_set_rts_cts_rate(struct ar9170 *ar);
521int carl9170_set_ampdu_settings(struct ar9170 *ar);
522int carl9170_set_slot_time(struct ar9170 *ar);
523int carl9170_set_mac_rates(struct ar9170 *ar);
524int carl9170_set_hwretry_limit(struct ar9170 *ar, const u32 max_retry);
525int carl9170_update_beacon(struct ar9170 *ar, const bool submit);
526int carl9170_upload_key(struct ar9170 *ar, const u8 id, const u8 *mac,
527 const u8 ktype, const u8 keyidx, const u8 *keydata, const int keylen);
528int carl9170_disable_key(struct ar9170 *ar, const u8 id);
529
530/* RX */
531void carl9170_rx(struct ar9170 *ar, void *buf, unsigned int len);
532void carl9170_handle_command_response(struct ar9170 *ar, void *buf, u32 len);
533
534/* TX */
535int carl9170_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb);
536void carl9170_tx_janitor(struct work_struct *work);
537void carl9170_tx_process_status(struct ar9170 *ar,
538 const struct carl9170_rsp *cmd);
539void carl9170_tx_status(struct ar9170 *ar, struct sk_buff *skb,
540 const bool success);
541void carl9170_tx_callback(struct ar9170 *ar, struct sk_buff *skb);
542void carl9170_tx_drop(struct ar9170 *ar, struct sk_buff *skb);
543void carl9170_tx_scheduler(struct ar9170 *ar);
544void carl9170_tx_get_skb(struct sk_buff *skb);
545int carl9170_tx_put_skb(struct sk_buff *skb);
546
547/* LEDs */
548#ifdef CONFIG_CARL9170_LEDS
549int carl9170_led_register(struct ar9170 *ar);
550void carl9170_led_unregister(struct ar9170 *ar);
551#endif /* CONFIG_CARL9170_LEDS */
552int carl9170_led_init(struct ar9170 *ar);
553int carl9170_led_set_state(struct ar9170 *ar, const u32 led_state);
554
555/* PHY / RF */
556int carl9170_set_channel(struct ar9170 *ar, struct ieee80211_channel *channel,
557 enum nl80211_channel_type bw, enum carl9170_rf_init_mode rfi);
558int carl9170_get_noisefloor(struct ar9170 *ar);
559
560/* FW */
561int carl9170_parse_firmware(struct ar9170 *ar);
562int carl9170_fw_fix_eeprom(struct ar9170 *ar);
563
564extern struct ieee80211_rate __carl9170_ratetable[];
565extern int modparam_noht;
566
567static inline struct ar9170 *carl9170_get_priv(struct carl9170_vif *carl_vif)
568{
569 return container_of(carl_vif, struct ar9170,
570 vif_priv[carl_vif->id]);
571}
572
573static inline struct ieee80211_hdr *carl9170_get_hdr(struct sk_buff *skb)
574{
575 return (void *)((struct _carl9170_tx_superframe *)
576 skb->data)->frame_data;
577}
578
579static inline u16 get_seq_h(struct ieee80211_hdr *hdr)
580{
581 return le16_to_cpu(hdr->seq_ctrl) >> 4;
582}
583
584static inline u16 carl9170_get_seq(struct sk_buff *skb)
585{
586 return get_seq_h(carl9170_get_hdr(skb));
587}
588
589static inline u16 get_tid_h(struct ieee80211_hdr *hdr)
590{
591 return (ieee80211_get_qos_ctl(hdr))[0] & IEEE80211_QOS_CTL_TID_MASK;
592}
593
594static inline u16 carl9170_get_tid(struct sk_buff *skb)
595{
596 return get_tid_h(carl9170_get_hdr(skb));
597}
598
599static inline struct ieee80211_vif *
600carl9170_get_vif(struct carl9170_vif_info *priv)
601{
602 return container_of((void *)priv, struct ieee80211_vif, drv_priv);
603}
604
605/* Protected by ar->mutex or RCU */
606static inline struct ieee80211_vif *carl9170_get_main_vif(struct ar9170 *ar)
607{
608 struct carl9170_vif_info *cvif;
609
610 list_for_each_entry_rcu(cvif, &ar->vif_list, list) {
611 if (cvif->active)
612 return carl9170_get_vif(cvif);
613 }
614
615 return NULL;
616}
617
618static inline bool is_main_vif(struct ar9170 *ar, struct ieee80211_vif *vif)
619{
620 bool ret;
621
622 rcu_read_lock();
623 ret = (carl9170_get_main_vif(ar) == vif);
624 rcu_read_unlock();
625 return ret;
626}
627
628#endif /* __CARL9170_H */
diff --git a/drivers/net/wireless/ath/carl9170/cmd.c b/drivers/net/wireless/ath/carl9170/cmd.c
new file mode 100644
index 000000000000..c21f3364bfec
--- /dev/null
+++ b/drivers/net/wireless/ath/carl9170/cmd.c
@@ -0,0 +1,188 @@
1/*
2 * Atheros CARL9170 driver
3 *
4 * Basic HW register/memory/command access functions
5 *
6 * Copyright 2008, Johannes Berg <johannes@sipsolutions.net>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; see the file COPYING. If not, see
20 * http://www.gnu.org/licenses/.
21 *
22 * This file incorporates work covered by the following copyright and
23 * permission notice:
24 * Copyright (c) 2007-2008 Atheros Communications, Inc.
25 *
26 * Permission to use, copy, modify, and/or distribute this software for any
27 * purpose with or without fee is hereby granted, provided that the above
28 * copyright notice and this permission notice appear in all copies.
29 *
30 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
31 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
32 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
33 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
34 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
35 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
36 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
37 */
38
39#include "carl9170.h"
40#include "cmd.h"
41
42int carl9170_write_reg(struct ar9170 *ar, const u32 reg, const u32 val)
43{
44 __le32 buf[2] = {
45 cpu_to_le32(reg),
46 cpu_to_le32(val),
47 };
48 int err;
49
50 err = carl9170_exec_cmd(ar, CARL9170_CMD_WREG, sizeof(buf),
51 (u8 *) buf, 0, NULL);
52 if (err) {
53 if (net_ratelimit()) {
54 wiphy_err(ar->hw->wiphy, "writing reg %#x "
55 "(val %#x) failed (%d)\n", reg, val, err);
56 }
57 }
58 return err;
59}
60
61int carl9170_read_mreg(struct ar9170 *ar, const int nregs,
62 const u32 *regs, u32 *out)
63{
64 int i, err;
65 __le32 *offs, *res;
66
67 /* abuse "out" for the register offsets, must be same length */
68 offs = (__le32 *)out;
69 for (i = 0; i < nregs; i++)
70 offs[i] = cpu_to_le32(regs[i]);
71
72 /* also use the same buffer for the input */
73 res = (__le32 *)out;
74
75 err = carl9170_exec_cmd(ar, CARL9170_CMD_RREG,
76 4 * nregs, (u8 *)offs,
77 4 * nregs, (u8 *)res);
78 if (err) {
79 if (net_ratelimit()) {
80 wiphy_err(ar->hw->wiphy, "reading regs failed (%d)\n",
81 err);
82 }
83 return err;
84 }
85
86 /* convert result to cpu endian */
87 for (i = 0; i < nregs; i++)
88 out[i] = le32_to_cpu(res[i]);
89
90 return 0;
91}
92
93int carl9170_read_reg(struct ar9170 *ar, u32 reg, u32 *val)
94{
95 return carl9170_read_mreg(ar, 1, &reg, val);
96}
97
98int carl9170_echo_test(struct ar9170 *ar, const u32 v)
99{
100 u32 echores;
101 int err;
102
103 err = carl9170_exec_cmd(ar, CARL9170_CMD_ECHO,
104 4, (u8 *)&v,
105 4, (u8 *)&echores);
106 if (err)
107 return err;
108
109 if (v != echores) {
110 wiphy_info(ar->hw->wiphy, "wrong echo %x != %x", v, echores);
111 return -EINVAL;
112 }
113
114 return 0;
115}
116
117struct carl9170_cmd *carl9170_cmd_buf(struct ar9170 *ar,
118 const enum carl9170_cmd_oids cmd, const unsigned int len)
119{
120 struct carl9170_cmd *tmp;
121
122 tmp = kzalloc(sizeof(struct carl9170_cmd_head) + len, GFP_ATOMIC);
123 if (tmp) {
124 tmp->hdr.cmd = cmd;
125 tmp->hdr.len = len;
126 }
127
128 return tmp;
129}
130
131int carl9170_reboot(struct ar9170 *ar)
132{
133 struct carl9170_cmd *cmd;
134 int err;
135
136 cmd = carl9170_cmd_buf(ar, CARL9170_CMD_REBOOT_ASYNC, 0);
137 if (!cmd)
138 return -ENOMEM;
139
140 err = __carl9170_exec_cmd(ar, (struct carl9170_cmd *)cmd, true);
141 return err;
142}
143
144int carl9170_mac_reset(struct ar9170 *ar)
145{
146 return carl9170_exec_cmd(ar, CARL9170_CMD_SWRST,
147 0, NULL, 0, NULL);
148}
149
150int carl9170_bcn_ctrl(struct ar9170 *ar, const unsigned int vif_id,
151 const u32 mode, const u32 addr, const u32 len)
152{
153 struct carl9170_cmd *cmd;
154
155 cmd = carl9170_cmd_buf(ar, CARL9170_CMD_BCN_CTRL_ASYNC,
156 sizeof(struct carl9170_bcn_ctrl_cmd));
157 if (!cmd)
158 return -ENOMEM;
159
160 cmd->bcn_ctrl.vif_id = cpu_to_le32(vif_id);
161 cmd->bcn_ctrl.mode = cpu_to_le32(mode);
162 cmd->bcn_ctrl.bcn_addr = cpu_to_le32(addr);
163 cmd->bcn_ctrl.bcn_len = cpu_to_le32(len);
164
165 return __carl9170_exec_cmd(ar, cmd, true);
166}
167
168int carl9170_powersave(struct ar9170 *ar, const bool ps)
169{
170 struct carl9170_cmd *cmd;
171 u32 state;
172
173 cmd = carl9170_cmd_buf(ar, CARL9170_CMD_PSM_ASYNC,
174 sizeof(struct carl9170_psm));
175 if (!cmd)
176 return -ENOMEM;
177
178 if (ps) {
179 /* Sleep until next TBTT */
180 state = CARL9170_PSM_SLEEP | 1;
181 } else {
182 /* wake up immediately */
183 state = 1;
184 }
185
186 cmd->psm.state = cpu_to_le32(state);
187 return __carl9170_exec_cmd(ar, cmd, true);
188}
diff --git a/drivers/net/wireless/ath/carl9170/cmd.h b/drivers/net/wireless/ath/carl9170/cmd.h
new file mode 100644
index 000000000000..f78728c38294
--- /dev/null
+++ b/drivers/net/wireless/ath/carl9170/cmd.h
@@ -0,0 +1,168 @@
1/*
2 * Atheros CARL9170 driver
3 *
4 * Basic HW register/memory/command access functions
5 *
6 * Copyright 2008, Johannes Berg <johannes@sipsolutions.net>
7 * Copyright 2010, Christian Lamparter <chunkeey@googlemail.com>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; see the file COPYING. If not, see
21 * http://www.gnu.org/licenses/.
22 *
23 * This file incorporates work covered by the following copyright and
24 * permission notice:
25 * Copyright (c) 2007-2008 Atheros Communications, Inc.
26 *
27 * Permission to use, copy, modify, and/or distribute this software for any
28 * purpose with or without fee is hereby granted, provided that the above
29 * copyright notice and this permission notice appear in all copies.
30 *
31 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
32 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
33 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
34 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
35 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
36 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
37 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
38 */
39#ifndef __CMD_H
40#define __CMD_H
41
42#include "carl9170.h"
43
44/* basic HW access */
45int carl9170_write_reg(struct ar9170 *ar, const u32 reg, const u32 val);
46int carl9170_read_reg(struct ar9170 *ar, const u32 reg, u32 *val);
47int carl9170_read_mreg(struct ar9170 *ar, const int nregs,
48 const u32 *regs, u32 *out);
49int carl9170_echo_test(struct ar9170 *ar, u32 v);
50int carl9170_reboot(struct ar9170 *ar);
51int carl9170_mac_reset(struct ar9170 *ar);
52int carl9170_powersave(struct ar9170 *ar, const bool power_on);
53int carl9170_bcn_ctrl(struct ar9170 *ar, const unsigned int vif_id,
54 const u32 mode, const u32 addr, const u32 len);
55
56static inline int carl9170_flush_cab(struct ar9170 *ar,
57 const unsigned int vif_id)
58{
59 return carl9170_bcn_ctrl(ar, vif_id, CARL9170_BCN_CTRL_DRAIN, 0, 0);
60}
61
62static inline int carl9170_rx_filter(struct ar9170 *ar,
63 const unsigned int _rx_filter)
64{
65 __le32 rx_filter = cpu_to_le32(_rx_filter);
66
67 return carl9170_exec_cmd(ar, CARL9170_CMD_RX_FILTER,
68 sizeof(rx_filter), (u8 *)&rx_filter,
69 0, NULL);
70}
71
72struct carl9170_cmd *carl9170_cmd_buf(struct ar9170 *ar,
73 const enum carl9170_cmd_oids cmd, const unsigned int len);
74
75/*
76 * Macros to facilitate writing multiple registers in a single
77 * write-combining USB command. Note that when the first group
78 * fails the whole thing will fail without any others attempted,
79 * but you won't know which write in the group failed.
80 */
81#define carl9170_regwrite_begin(ar) \
82do { \
83 int __nreg = 0, __err = 0; \
84 struct ar9170 *__ar = ar;
85
86#define carl9170_regwrite(r, v) do { \
87 __ar->cmd_buf[2 * __nreg + 1] = cpu_to_le32(r); \
88 __ar->cmd_buf[2 * __nreg + 2] = cpu_to_le32(v); \
89 __nreg++; \
90 if ((__nreg >= PAYLOAD_MAX/2)) { \
91 if (IS_ACCEPTING_CMD(__ar)) \
92 __err = carl9170_exec_cmd(__ar, \
93 CARL9170_CMD_WREG, 8 * __nreg, \
94 (u8 *) &__ar->cmd_buf[1], 0, NULL); \
95 else \
96 goto __regwrite_out; \
97 \
98 __nreg = 0; \
99 if (__err) \
100 goto __regwrite_out; \
101 } \
102} while (0)
103
104#define carl9170_regwrite_finish() \
105__regwrite_out : \
106 if (__err == 0 && __nreg) { \
107 if (IS_ACCEPTING_CMD(__ar)) \
108 __err = carl9170_exec_cmd(__ar, \
109 CARL9170_CMD_WREG, 8 * __nreg, \
110 (u8 *) &__ar->cmd_buf[1], 0, NULL); \
111 __nreg = 0; \
112 }
113
114#define carl9170_regwrite_result() \
115 __err; \
116} while (0);
117
118
119#define carl9170_async_get_buf() \
120do { \
121 __cmd = carl9170_cmd_buf(__carl, CARL9170_CMD_WREG_ASYNC, \
122 CARL9170_MAX_CMD_PAYLOAD_LEN); \
123 if (__cmd == NULL) { \
124 __err = -ENOMEM; \
125 goto __async_regwrite_out; \
126 } \
127} while (0);
128
129#define carl9170_async_regwrite_begin(carl) \
130do { \
131 int __nreg = 0, __err = 0; \
132 struct ar9170 *__carl = carl; \
133 struct carl9170_cmd *__cmd; \
134 carl9170_async_get_buf(); \
135
136#define carl9170_async_regwrite(r, v) do { \
137 __cmd->wreg.regs[__nreg].addr = cpu_to_le32(r); \
138 __cmd->wreg.regs[__nreg].val = cpu_to_le32(v); \
139 __nreg++; \
140 if ((__nreg >= PAYLOAD_MAX/2)) { \
141 if (IS_ACCEPTING_CMD(__carl)) { \
142 __cmd->hdr.len = 8 * __nreg; \
143 __err = __carl9170_exec_cmd(__carl, __cmd, true);\
144 __cmd = NULL; \
145 carl9170_async_get_buf(); \
146 } else { \
147 goto __async_regwrite_out; \
148 } \
149 __nreg = 0; \
150 if (__err) \
151 goto __async_regwrite_out; \
152 } \
153} while (0)
154
155#define carl9170_async_regwrite_finish() \
156__async_regwrite_out : \
157 if (__err == 0 && __nreg) { \
158 __cmd->hdr.len = 8 * __nreg; \
159 if (IS_ACCEPTING_CMD(__carl)) \
160 __err = __carl9170_exec_cmd(__carl, __cmd, true);\
161 __nreg = 0; \
162 }
163
164#define carl9170_async_regwrite_result() \
165 __err; \
166} while (0);
167
168#endif /* __CMD_H */
diff --git a/drivers/net/wireless/ath/carl9170/debug.c b/drivers/net/wireless/ath/carl9170/debug.c
new file mode 100644
index 000000000000..0ac1124c2a0b
--- /dev/null
+++ b/drivers/net/wireless/ath/carl9170/debug.c
@@ -0,0 +1,902 @@
1/*
2 * Atheros CARL9170 driver
3 *
4 * debug(fs) probing
5 *
6 * Copyright 2008, Johannes Berg <johannes@sipsolutions.net>
7 * Copyright 2009, 2010, Christian Lamparter <chunkeey@googlemail.com>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; see the file COPYING. If not, see
21 * http://www.gnu.org/licenses/.
22 *
23 * This file incorporates work covered by the following copyright and
24 * permission notice:
25 * Copyright (c) 2008-2009 Atheros Communications, Inc.
26 *
27 * Permission to use, copy, modify, and/or distribute this software for any
28 * purpose with or without fee is hereby granted, provided that the above
29 * copyright notice and this permission notice appear in all copies.
30 *
31 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
32 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
33 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
34 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
35 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
36 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
37 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
38 */
39
40#include <linux/init.h>
41#include <linux/slab.h>
42#include <linux/module.h>
43#include <linux/seq_file.h>
44#include <linux/vmalloc.h>
45#include "carl9170.h"
46#include "cmd.h"
47
48#define ADD(buf, off, max, fmt, args...) \
49 off += snprintf(&buf[off], max - off, fmt, ##args);
50
51static int carl9170_debugfs_open(struct inode *inode, struct file *file)
52{
53 file->private_data = inode->i_private;
54 return 0;
55}
56
57struct carl9170_debugfs_fops {
58 unsigned int read_bufsize;
59 mode_t attr;
60 char *(*read)(struct ar9170 *ar, char *buf, size_t bufsize,
61 ssize_t *len);
62 ssize_t (*write)(struct ar9170 *aru, const char *buf, size_t size);
63 const struct file_operations fops;
64
65 enum carl9170_device_state req_dev_state;
66};
67
68static ssize_t carl9170_debugfs_read(struct file *file, char __user *userbuf,
69 size_t count, loff_t *ppos)
70{
71 struct carl9170_debugfs_fops *dfops;
72 struct ar9170 *ar;
73 char *buf = NULL, *res_buf = NULL;
74 ssize_t ret = 0;
75 int err = 0;
76
77 if (!count)
78 return 0;
79
80 ar = file->private_data;
81
82 if (!ar)
83 return -ENODEV;
84 dfops = container_of(file->f_op, struct carl9170_debugfs_fops, fops);
85
86 if (!dfops->read)
87 return -ENOSYS;
88
89 if (dfops->read_bufsize) {
90 buf = vmalloc(dfops->read_bufsize);
91 if (!buf)
92 return -ENOMEM;
93 }
94
95 mutex_lock(&ar->mutex);
96 if (!CHK_DEV_STATE(ar, dfops->req_dev_state)) {
97 err = -ENODEV;
98 res_buf = buf;
99 goto out_free;
100 }
101
102 res_buf = dfops->read(ar, buf, dfops->read_bufsize, &ret);
103
104 if (ret > 0)
105 err = simple_read_from_buffer(userbuf, count, ppos,
106 res_buf, ret);
107 else
108 err = ret;
109
110 WARN_ON_ONCE(dfops->read_bufsize && (res_buf != buf));
111
112out_free:
113 vfree(res_buf);
114 mutex_unlock(&ar->mutex);
115 return err;
116}
117
118static ssize_t carl9170_debugfs_write(struct file *file,
119 const char __user *userbuf, size_t count, loff_t *ppos)
120{
121 struct carl9170_debugfs_fops *dfops;
122 struct ar9170 *ar;
123 char *buf = NULL;
124 int err = 0;
125
126 if (!count)
127 return 0;
128
129 if (count > PAGE_SIZE)
130 return -E2BIG;
131
132 ar = file->private_data;
133
134 if (!ar)
135 return -ENODEV;
136 dfops = container_of(file->f_op, struct carl9170_debugfs_fops, fops);
137
138 if (!dfops->write)
139 return -ENOSYS;
140
141 buf = vmalloc(count);
142 if (!buf)
143 return -ENOMEM;
144
145 if (copy_from_user(buf, userbuf, count)) {
146 err = -EFAULT;
147 goto out_free;
148 }
149
150 if (mutex_trylock(&ar->mutex) == 0) {
151 err = -EAGAIN;
152 goto out_free;
153 }
154
155 if (!CHK_DEV_STATE(ar, dfops->req_dev_state)) {
156 err = -ENODEV;
157 goto out_unlock;
158 }
159
160 err = dfops->write(ar, buf, count);
161 if (err)
162 goto out_unlock;
163
164out_unlock:
165 mutex_unlock(&ar->mutex);
166
167out_free:
168 vfree(buf);
169 return err;
170}
171
172#define __DEBUGFS_DECLARE_FILE(name, _read, _write, _read_bufsize, \
173 _attr, _dstate) \
174static const struct carl9170_debugfs_fops carl_debugfs_##name ##_ops = {\
175 .read_bufsize = _read_bufsize, \
176 .read = _read, \
177 .write = _write, \
178 .attr = _attr, \
179 .req_dev_state = _dstate, \
180 .fops = { \
181 .open = carl9170_debugfs_open, \
182 .read = carl9170_debugfs_read, \
183 .write = carl9170_debugfs_write, \
184 .owner = THIS_MODULE \
185 }, \
186}
187
188#define DEBUGFS_DECLARE_FILE(name, _read, _write, _read_bufsize, _attr) \
189 __DEBUGFS_DECLARE_FILE(name, _read, _write, _read_bufsize, \
190 _attr, CARL9170_STARTED) \
191
192#define DEBUGFS_DECLARE_RO_FILE(name, _read_bufsize) \
193 DEBUGFS_DECLARE_FILE(name, carl9170_debugfs_##name ##_read, \
194 NULL, _read_bufsize, S_IRUSR)
195
196#define DEBUGFS_DECLARE_WO_FILE(name) \
197 DEBUGFS_DECLARE_FILE(name, NULL, carl9170_debugfs_##name ##_write,\
198 0, S_IWUSR)
199
200#define DEBUGFS_DECLARE_RW_FILE(name, _read_bufsize) \
201 DEBUGFS_DECLARE_FILE(name, carl9170_debugfs_##name ##_read, \
202 carl9170_debugfs_##name ##_write, \
203 _read_bufsize, S_IRUSR | S_IWUSR)
204
205#define __DEBUGFS_DECLARE_RW_FILE(name, _read_bufsize, _dstate) \
206 __DEBUGFS_DECLARE_FILE(name, carl9170_debugfs_##name ##_read, \
207 carl9170_debugfs_##name ##_write, \
208 _read_bufsize, S_IRUSR | S_IWUSR, _dstate)
209
210#define DEBUGFS_READONLY_FILE(name, _read_bufsize, fmt, value...) \
211static char *carl9170_debugfs_ ##name ## _read(struct ar9170 *ar, \
212 char *buf, size_t buf_size,\
213 ssize_t *len) \
214{ \
215 ADD(buf, *len, buf_size, fmt "\n", ##value); \
216 return buf; \
217} \
218DEBUGFS_DECLARE_RO_FILE(name, _read_bufsize)
219
220static char *carl9170_debugfs_mem_usage_read(struct ar9170 *ar, char *buf,
221 size_t bufsize, ssize_t *len)
222{
223 ADD(buf, *len, bufsize, "jar: [");
224
225 spin_lock_bh(&ar->mem_lock);
226
227 *len += bitmap_scnprintf(&buf[*len], bufsize - *len,
228 ar->mem_bitmap, ar->fw.mem_blocks);
229
230 ADD(buf, *len, bufsize, "]\n");
231
232 ADD(buf, *len, bufsize, "cookies: used:%3d / total:%3d, allocs:%d\n",
233 bitmap_weight(ar->mem_bitmap, ar->fw.mem_blocks),
234 ar->fw.mem_blocks, atomic_read(&ar->mem_allocs));
235
236 ADD(buf, *len, bufsize, "memory: free:%3d (%3d KiB) / total:%3d KiB)\n",
237 atomic_read(&ar->mem_free_blocks),
238 (atomic_read(&ar->mem_free_blocks) * ar->fw.mem_block_size) / 1024,
239 (ar->fw.mem_blocks * ar->fw.mem_block_size) / 1024);
240
241 spin_unlock_bh(&ar->mem_lock);
242
243 return buf;
244}
245DEBUGFS_DECLARE_RO_FILE(mem_usage, 512);
246
247static char *carl9170_debugfs_qos_stat_read(struct ar9170 *ar, char *buf,
248 size_t bufsize, ssize_t *len)
249{
250 ADD(buf, *len, bufsize, "%s QoS AC\n", modparam_noht ? "Hardware" :
251 "Software");
252
253 ADD(buf, *len, bufsize, "[ VO VI "
254 " BE BK ]\n");
255
256 spin_lock_bh(&ar->tx_stats_lock);
257 ADD(buf, *len, bufsize, "[length/limit length/limit "
258 "length/limit length/limit ]\n"
259 "[ %3d/%3d %3d/%3d "
260 " %3d/%3d %3d/%3d ]\n\n",
261 ar->tx_stats[0].len, ar->tx_stats[0].limit,
262 ar->tx_stats[1].len, ar->tx_stats[1].limit,
263 ar->tx_stats[2].len, ar->tx_stats[2].limit,
264 ar->tx_stats[3].len, ar->tx_stats[3].limit);
265
266 ADD(buf, *len, bufsize, "[ total total "
267 " total total ]\n"
268 "[%10d %10d %10d %10d ]\n\n",
269 ar->tx_stats[0].count, ar->tx_stats[1].count,
270 ar->tx_stats[2].count, ar->tx_stats[3].count);
271
272 spin_unlock_bh(&ar->tx_stats_lock);
273
274 ADD(buf, *len, bufsize, "[ pend/waittx pend/waittx "
275 " pend/waittx pend/waittx]\n"
276 "[ %3d/%3d %3d/%3d "
277 " %3d/%3d %3d/%3d ]\n\n",
278 skb_queue_len(&ar->tx_pending[0]),
279 skb_queue_len(&ar->tx_status[0]),
280 skb_queue_len(&ar->tx_pending[1]),
281 skb_queue_len(&ar->tx_status[1]),
282 skb_queue_len(&ar->tx_pending[2]),
283 skb_queue_len(&ar->tx_status[2]),
284 skb_queue_len(&ar->tx_pending[3]),
285 skb_queue_len(&ar->tx_status[3]));
286
287 return buf;
288}
289DEBUGFS_DECLARE_RO_FILE(qos_stat, 512);
290
291static void carl9170_debugfs_format_frame(struct ar9170 *ar,
292 struct sk_buff *skb, const char *prefix, char *buf,
293 ssize_t *off, ssize_t bufsize)
294{
295 struct _carl9170_tx_superframe *txc = (void *) skb->data;
296 struct ieee80211_tx_info *txinfo = IEEE80211_SKB_CB(skb);
297 struct carl9170_tx_info *arinfo = (void *) txinfo->rate_driver_data;
298 struct ieee80211_hdr *hdr = (void *) txc->frame_data;
299
300 ADD(buf, *off, bufsize, "%s %p, c:%2x, DA:%pM, sq:%4d, mc:%.4x, "
301 "pc:%.8x, to:%d ms\n", prefix, skb, txc->s.cookie,
302 ieee80211_get_DA(hdr), get_seq_h(hdr),
303 le16_to_cpu(txc->f.mac_control), le32_to_cpu(txc->f.phy_control),
304 jiffies_to_msecs(jiffies - arinfo->timeout));
305}
306
307
308static char *carl9170_debugfs_ampdu_state_read(struct ar9170 *ar, char *buf,
309 size_t bufsize, ssize_t *len)
310{
311 struct carl9170_sta_tid *iter;
312 struct sk_buff *skb;
313 int cnt = 0, fc;
314 int offset;
315
316 rcu_read_lock();
317 list_for_each_entry_rcu(iter, &ar->tx_ampdu_list, list) {
318
319 spin_lock_bh(&iter->lock);
320 ADD(buf, *len, bufsize, "Entry: #%2d TID:%1d, BSN:%4d, "
321 "SNX:%4d, HSN:%4d, BAW:%2d, state:%1d, toggles:%d\n",
322 cnt, iter->tid, iter->bsn, iter->snx, iter->hsn,
323 iter->max, iter->state, iter->counter);
324
325 ADD(buf, *len, bufsize, "\tWindow: [");
326
327 *len += bitmap_scnprintf(&buf[*len], bufsize - *len,
328 iter->bitmap, CARL9170_BAW_BITS);
329
330#define BM_STR_OFF(offset) \
331 ((CARL9170_BAW_BITS - (offset) - 1) / 4 + \
332 (CARL9170_BAW_BITS - (offset) - 1) / 32 + 1)
333
334 ADD(buf, *len, bufsize, ",W]\n");
335
336 offset = BM_STR_OFF(0);
337 ADD(buf, *len, bufsize, "\tBase Seq: %*s\n", offset, "T");
338
339 offset = BM_STR_OFF(SEQ_DIFF(iter->snx, iter->bsn));
340 ADD(buf, *len, bufsize, "\tNext Seq: %*s\n", offset, "W");
341
342 offset = BM_STR_OFF(((int)iter->hsn - (int)iter->bsn) %
343 CARL9170_BAW_BITS);
344 ADD(buf, *len, bufsize, "\tLast Seq: %*s\n", offset, "N");
345
346 ADD(buf, *len, bufsize, "\tPre-Aggregation reorder buffer: "
347 " currently queued:%d\n", skb_queue_len(&iter->queue));
348
349 fc = 0;
350 skb_queue_walk(&iter->queue, skb) {
351 char prefix[32];
352
353 snprintf(prefix, sizeof(prefix), "\t\t%3d :", fc);
354 carl9170_debugfs_format_frame(ar, skb, prefix, buf,
355 len, bufsize);
356
357 fc++;
358 }
359 spin_unlock_bh(&iter->lock);
360 cnt++;
361 }
362 rcu_read_unlock();
363
364 return buf;
365}
366DEBUGFS_DECLARE_RO_FILE(ampdu_state, 8000);
367
368static void carl9170_debugfs_queue_dump(struct ar9170 *ar, char *buf,
369 ssize_t *len, size_t bufsize, struct sk_buff_head *queue)
370{
371 struct sk_buff *skb;
372 char prefix[16];
373 int fc = 0;
374
375 spin_lock_bh(&queue->lock);
376 skb_queue_walk(queue, skb) {
377 snprintf(prefix, sizeof(prefix), "%3d :", fc);
378 carl9170_debugfs_format_frame(ar, skb, prefix, buf,
379 len, bufsize);
380 fc++;
381 }
382 spin_unlock_bh(&queue->lock);
383}
384
385#define DEBUGFS_QUEUE_DUMP(q, qi) \
386static char *carl9170_debugfs_##q ##_##qi ##_read(struct ar9170 *ar, \
387 char *buf, size_t bufsize, ssize_t *len) \
388{ \
389 carl9170_debugfs_queue_dump(ar, buf, len, bufsize, &ar->q[qi]); \
390 return buf; \
391} \
392DEBUGFS_DECLARE_RO_FILE(q##_##qi, 8000);
393
394static char *carl9170_debugfs_sta_psm_read(struct ar9170 *ar, char *buf,
395 size_t bufsize, ssize_t *len)
396{
397 ADD(buf, *len, bufsize, "psm state: %s\n", (ar->ps.off_override ?
398 "FORCE CAM" : (ar->ps.state ? "PSM" : "CAM")));
399
400 ADD(buf, *len, bufsize, "sleep duration: %d ms.\n", ar->ps.sleep_ms);
401 ADD(buf, *len, bufsize, "last power-state transition: %d ms ago.\n",
402 jiffies_to_msecs(jiffies - ar->ps.last_action));
403 ADD(buf, *len, bufsize, "last CAM->PSM transition: %d ms ago.\n",
404 jiffies_to_msecs(jiffies - ar->ps.last_slept));
405
406 return buf;
407}
408DEBUGFS_DECLARE_RO_FILE(sta_psm, 160);
409
410static char *carl9170_debugfs_tx_stuck_read(struct ar9170 *ar, char *buf,
411 size_t bufsize, ssize_t *len)
412{
413 int i;
414
415 for (i = 0; i < ar->hw->queues; i++) {
416 ADD(buf, *len, bufsize, "TX queue [%d]: %10d max:%10d ms.\n",
417 i, ieee80211_queue_stopped(ar->hw, i) ?
418 jiffies_to_msecs(jiffies - ar->queue_stop_timeout[i]) : 0,
419 jiffies_to_msecs(ar->max_queue_stop_timeout[i]));
420
421 ar->max_queue_stop_timeout[i] = 0;
422 }
423
424 return buf;
425}
426DEBUGFS_DECLARE_RO_FILE(tx_stuck, 180);
427
428static char *carl9170_debugfs_phy_noise_read(struct ar9170 *ar, char *buf,
429 size_t bufsize, ssize_t *len)
430{
431 int err;
432
433 err = carl9170_get_noisefloor(ar);
434 if (err) {
435 *len = err;
436 return buf;
437 }
438
439 ADD(buf, *len, bufsize, "Chain 0: %10d dBm, ext. chan.:%10d dBm\n",
440 ar->noise[0], ar->noise[2]);
441 ADD(buf, *len, bufsize, "Chain 2: %10d dBm, ext. chan.:%10d dBm\n",
442 ar->noise[1], ar->noise[3]);
443
444 return buf;
445}
446DEBUGFS_DECLARE_RO_FILE(phy_noise, 180);
447
448static char *carl9170_debugfs_vif_dump_read(struct ar9170 *ar, char *buf,
449 size_t bufsize, ssize_t *len)
450{
451 struct carl9170_vif_info *iter;
452 int i = 0;
453
454 ADD(buf, *len, bufsize, "registered VIFs:%d \\ %d\n",
455 ar->vifs, ar->fw.vif_num);
456
457 ADD(buf, *len, bufsize, "VIF bitmap: [");
458
459 *len += bitmap_scnprintf(&buf[*len], bufsize - *len,
460 &ar->vif_bitmap, ar->fw.vif_num);
461
462 ADD(buf, *len, bufsize, "]\n");
463
464 rcu_read_lock();
465 list_for_each_entry_rcu(iter, &ar->vif_list, list) {
466 struct ieee80211_vif *vif = carl9170_get_vif(iter);
467 ADD(buf, *len, bufsize, "\t%d = [%s VIF, id:%d, type:%x "
468 " mac:%pM %s]\n", i, (carl9170_get_main_vif(ar) == vif ?
469 "Master" : " Slave"), iter->id, vif->type, vif->addr,
470 iter->enable_beacon ? "beaconing " : "");
471 i++;
472 }
473 rcu_read_unlock();
474
475 return buf;
476}
477DEBUGFS_DECLARE_RO_FILE(vif_dump, 8000);
478
479#define UPDATE_COUNTER(ar, name) ({ \
480 u32 __tmp[ARRAY_SIZE(name##_regs)]; \
481 unsigned int __i, __err = -ENODEV; \
482 \
483 for (__i = 0; __i < ARRAY_SIZE(name##_regs); __i++) { \
484 __tmp[__i] = name##_regs[__i].reg; \
485 ar->debug.stats.name##_counter[__i] = 0; \
486 } \
487 \
488 if (IS_STARTED(ar)) \
489 __err = carl9170_read_mreg(ar, ARRAY_SIZE(name##_regs), \
490 __tmp, ar->debug.stats.name##_counter); \
491 (__err); })
492
493#define TALLY_SUM_UP(ar, name) do { \
494 unsigned int __i; \
495 \
496 for (__i = 0; __i < ARRAY_SIZE(name##_regs); __i++) { \
497 ar->debug.stats.name##_sum[__i] += \
498 ar->debug.stats.name##_counter[__i]; \
499 } \
500} while (0)
501
502#define DEBUGFS_HW_TALLY_FILE(name, f) \
503static char *carl9170_debugfs_##name ## _read(struct ar9170 *ar, \
504 char *dum, size_t bufsize, ssize_t *ret) \
505{ \
506 char *buf; \
507 int i, max_len, err; \
508 \
509 max_len = ARRAY_SIZE(name##_regs) * 80; \
510 buf = vmalloc(max_len); \
511 if (!buf) \
512 return NULL; \
513 \
514 err = UPDATE_COUNTER(ar, name); \
515 if (err) { \
516 *ret = err; \
517 return buf; \
518 } \
519 \
520 TALLY_SUM_UP(ar, name); \
521 \
522 for (i = 0; i < ARRAY_SIZE(name##_regs); i++) { \
523 ADD(buf, *ret, max_len, "%22s = %" f "[+%" f "]\n", \
524 name##_regs[i].nreg, ar->debug.stats.name ##_sum[i],\
525 ar->debug.stats.name ##_counter[i]); \
526 } \
527 \
528 return buf; \
529} \
530DEBUGFS_DECLARE_RO_FILE(name, 0);
531
532#define DEBUGFS_HW_REG_FILE(name, f) \
533static char *carl9170_debugfs_##name ## _read(struct ar9170 *ar, \
534 char *dum, size_t bufsize, ssize_t *ret) \
535{ \
536 char *buf; \
537 int i, max_len, err; \
538 \
539 max_len = ARRAY_SIZE(name##_regs) * 80; \
540 buf = vmalloc(max_len); \
541 if (!buf) \
542 return NULL; \
543 \
544 err = UPDATE_COUNTER(ar, name); \
545 if (err) { \
546 *ret = err; \
547 return buf; \
548 } \
549 \
550 for (i = 0; i < ARRAY_SIZE(name##_regs); i++) { \
551 ADD(buf, *ret, max_len, "%22s = %" f "\n", \
552 name##_regs[i].nreg, \
553 ar->debug.stats.name##_counter[i]); \
554 } \
555 \
556 return buf; \
557} \
558DEBUGFS_DECLARE_RO_FILE(name, 0);
559
560static ssize_t carl9170_debugfs_hw_ioread32_write(struct ar9170 *ar,
561 const char *buf, size_t count)
562{
563 int err = 0, i, n = 0, max_len = 32, res;
564 unsigned int reg, tmp;
565
566 if (!count)
567 return 0;
568
569 if (count > max_len)
570 return -E2BIG;
571
572 res = sscanf(buf, "0x%X %d", &reg, &n);
573 if (res < 1) {
574 err = -EINVAL;
575 goto out;
576 }
577
578 if (res == 1)
579 n = 1;
580
581 if (n > 15) {
582 err = -EMSGSIZE;
583 goto out;
584 }
585
586 if ((reg >= 0x280000) || ((reg + (n << 2)) >= 0x280000)) {
587 err = -EADDRNOTAVAIL;
588 goto out;
589 }
590
591 if (reg & 3) {
592 err = -EINVAL;
593 goto out;
594 }
595
596 for (i = 0; i < n; i++) {
597 err = carl9170_read_reg(ar, reg + (i << 2), &tmp);
598 if (err)
599 goto out;
600
601 ar->debug.ring[ar->debug.ring_tail].reg = reg + (i << 2);
602 ar->debug.ring[ar->debug.ring_tail].value = tmp;
603 ar->debug.ring_tail++;
604 ar->debug.ring_tail %= CARL9170_DEBUG_RING_SIZE;
605 }
606
607out:
608 return err ? err : count;
609}
610
611static char *carl9170_debugfs_hw_ioread32_read(struct ar9170 *ar, char *buf,
612 size_t bufsize, ssize_t *ret)
613{
614 int i = 0;
615
616 while (ar->debug.ring_head != ar->debug.ring_tail) {
617 ADD(buf, *ret, bufsize, "%.8x = %.8x\n",
618 ar->debug.ring[ar->debug.ring_head].reg,
619 ar->debug.ring[ar->debug.ring_head].value);
620
621 ar->debug.ring_head++;
622 ar->debug.ring_head %= CARL9170_DEBUG_RING_SIZE;
623
624 if (i++ == 64)
625 break;
626 }
627 ar->debug.ring_head = ar->debug.ring_tail;
628 return buf;
629}
630DEBUGFS_DECLARE_RW_FILE(hw_ioread32, CARL9170_DEBUG_RING_SIZE * 40);
631
632static ssize_t carl9170_debugfs_bug_write(struct ar9170 *ar, const char *buf,
633 size_t count)
634{
635 int err;
636
637 if (count < 1)
638 return -EINVAL;
639
640 switch (buf[0]) {
641 case 'F':
642 ar->needs_full_reset = true;
643 break;
644
645 case 'R':
646 if (!IS_STARTED(ar)) {
647 err = -EAGAIN;
648 goto out;
649 }
650
651 ar->needs_full_reset = false;
652 break;
653
654 case 'M':
655 err = carl9170_mac_reset(ar);
656 if (err < 0)
657 count = err;
658
659 goto out;
660
661 case 'P':
662 err = carl9170_set_channel(ar, ar->hw->conf.channel,
663 ar->hw->conf.channel_type, CARL9170_RFI_COLD);
664 if (err < 0)
665 count = err;
666
667 goto out;
668
669 default:
670 return -EINVAL;
671 }
672
673 carl9170_restart(ar, CARL9170_RR_USER_REQUEST);
674
675out:
676 return count;
677}
678
679static char *carl9170_debugfs_bug_read(struct ar9170 *ar, char *buf,
680 size_t bufsize, ssize_t *ret)
681{
682 ADD(buf, *ret, bufsize, "[P]hy reinit, [R]estart, [F]ull usb reset, "
683 "[M]ac reset\n");
684 ADD(buf, *ret, bufsize, "firmware restarts:%d, last reason:%d\n",
685 ar->restart_counter, ar->last_reason);
686 ADD(buf, *ret, bufsize, "phy reinit errors:%d (%d)\n",
687 ar->total_chan_fail, ar->chan_fail);
688 ADD(buf, *ret, bufsize, "reported firmware errors:%d\n",
689 ar->fw.err_counter);
690 ADD(buf, *ret, bufsize, "reported firmware BUGs:%d\n",
691 ar->fw.bug_counter);
692 ADD(buf, *ret, bufsize, "pending restart requests:%d\n",
693 atomic_read(&ar->pending_restarts));
694 return buf;
695}
696__DEBUGFS_DECLARE_RW_FILE(bug, 400, CARL9170_STOPPED);
697
698static const char *erp_modes[] = {
699 [CARL9170_ERP_INVALID] = "INVALID",
700 [CARL9170_ERP_AUTO] = "Automatic",
701 [CARL9170_ERP_MAC80211] = "Set by MAC80211",
702 [CARL9170_ERP_OFF] = "Force Off",
703 [CARL9170_ERP_RTS] = "Force RTS",
704 [CARL9170_ERP_CTS] = "Force CTS"
705};
706
707static char *carl9170_debugfs_erp_read(struct ar9170 *ar, char *buf,
708 size_t bufsize, ssize_t *ret)
709{
710 ADD(buf, *ret, bufsize, "ERP Setting: (%d) -> %s\n", ar->erp_mode,
711 erp_modes[ar->erp_mode]);
712 return buf;
713}
714
715static ssize_t carl9170_debugfs_erp_write(struct ar9170 *ar, const char *buf,
716 size_t count)
717{
718 int res, val;
719
720 if (count < 1)
721 return -EINVAL;
722
723 res = sscanf(buf, "%d", &val);
724 if (res != 1)
725 return -EINVAL;
726
727 if (!((val > CARL9170_ERP_INVALID) &&
728 (val < __CARL9170_ERP_NUM)))
729 return -EINVAL;
730
731 ar->erp_mode = val;
732 return count;
733}
734
735DEBUGFS_DECLARE_RW_FILE(erp, 80);
736
737static ssize_t carl9170_debugfs_hw_iowrite32_write(struct ar9170 *ar,
738 const char *buf, size_t count)
739{
740 int err = 0, max_len = 22, res;
741 u32 reg, val;
742
743 if (!count)
744 return 0;
745
746 if (count > max_len)
747 return -E2BIG;
748
749 res = sscanf(buf, "0x%X 0x%X", &reg, &val);
750 if (res != 2) {
751 err = -EINVAL;
752 goto out;
753 }
754
755 if (reg <= 0x100000 || reg >= 0x280000) {
756 err = -EADDRNOTAVAIL;
757 goto out;
758 }
759
760 if (reg & 3) {
761 err = -EINVAL;
762 goto out;
763 }
764
765 err = carl9170_write_reg(ar, reg, val);
766 if (err)
767 goto out;
768
769out:
770 return err ? err : count;
771}
772DEBUGFS_DECLARE_WO_FILE(hw_iowrite32);
773
774DEBUGFS_HW_TALLY_FILE(hw_tx_tally, "u");
775DEBUGFS_HW_TALLY_FILE(hw_rx_tally, "u");
776DEBUGFS_HW_TALLY_FILE(hw_phy_errors, "u");
777DEBUGFS_HW_REG_FILE(hw_wlan_queue, ".8x");
778DEBUGFS_HW_REG_FILE(hw_pta_queue, ".8x");
779DEBUGFS_HW_REG_FILE(hw_ampdu_info, ".8x");
780DEBUGFS_QUEUE_DUMP(tx_status, 0);
781DEBUGFS_QUEUE_DUMP(tx_status, 1);
782DEBUGFS_QUEUE_DUMP(tx_status, 2);
783DEBUGFS_QUEUE_DUMP(tx_status, 3);
784DEBUGFS_QUEUE_DUMP(tx_pending, 0);
785DEBUGFS_QUEUE_DUMP(tx_pending, 1);
786DEBUGFS_QUEUE_DUMP(tx_pending, 2);
787DEBUGFS_QUEUE_DUMP(tx_pending, 3);
788DEBUGFS_READONLY_FILE(usb_tx_anch_urbs, 20, "%d",
789 atomic_read(&ar->tx_anch_urbs));
790DEBUGFS_READONLY_FILE(usb_rx_anch_urbs, 20, "%d",
791 atomic_read(&ar->rx_anch_urbs));
792DEBUGFS_READONLY_FILE(usb_rx_work_urbs, 20, "%d",
793 atomic_read(&ar->rx_work_urbs));
794DEBUGFS_READONLY_FILE(usb_rx_pool_urbs, 20, "%d",
795 atomic_read(&ar->rx_pool_urbs));
796
797DEBUGFS_READONLY_FILE(tx_total_queued, 20, "%d",
798 atomic_read(&ar->tx_total_queued));
799DEBUGFS_READONLY_FILE(tx_ampdu_scheduler, 20, "%d",
800 atomic_read(&ar->tx_ampdu_scheduler));
801
802DEBUGFS_READONLY_FILE(tx_total_pending, 20, "%d",
803 atomic_read(&ar->tx_total_pending));
804
805DEBUGFS_READONLY_FILE(tx_ampdu_list_len, 20, "%d",
806 ar->tx_ampdu_list_len);
807
808DEBUGFS_READONLY_FILE(tx_ampdu_upload, 20, "%d",
809 atomic_read(&ar->tx_ampdu_upload));
810
811DEBUGFS_READONLY_FILE(tx_janitor_last_run, 64, "last run:%d ms ago",
812 jiffies_to_msecs(jiffies - ar->tx_janitor_last_run));
813
814DEBUGFS_READONLY_FILE(tx_dropped, 20, "%d", ar->tx_dropped);
815
816DEBUGFS_READONLY_FILE(rx_dropped, 20, "%d", ar->rx_dropped);
817
818DEBUGFS_READONLY_FILE(sniffer_enabled, 20, "%d", ar->sniffer_enabled);
819DEBUGFS_READONLY_FILE(rx_software_decryption, 20, "%d",
820 ar->rx_software_decryption);
821DEBUGFS_READONLY_FILE(ampdu_factor, 20, "%d",
822 ar->current_factor);
823DEBUGFS_READONLY_FILE(ampdu_density, 20, "%d",
824 ar->current_density);
825
826DEBUGFS_READONLY_FILE(beacon_int, 20, "%d TU", ar->global_beacon_int);
827DEBUGFS_READONLY_FILE(pretbtt, 20, "%d TU", ar->global_pretbtt);
828
829void carl9170_debugfs_register(struct ar9170 *ar)
830{
831 ar->debug_dir = debugfs_create_dir(KBUILD_MODNAME,
832 ar->hw->wiphy->debugfsdir);
833
834#define DEBUGFS_ADD(name) \
835 debugfs_create_file(#name, carl_debugfs_##name ##_ops.attr, \
836 ar->debug_dir, ar, \
837 &carl_debugfs_##name ## _ops.fops);
838
839 DEBUGFS_ADD(usb_tx_anch_urbs);
840 DEBUGFS_ADD(usb_rx_pool_urbs);
841 DEBUGFS_ADD(usb_rx_anch_urbs);
842 DEBUGFS_ADD(usb_rx_work_urbs);
843
844 DEBUGFS_ADD(tx_total_queued);
845 DEBUGFS_ADD(tx_total_pending);
846 DEBUGFS_ADD(tx_dropped);
847 DEBUGFS_ADD(tx_stuck);
848 DEBUGFS_ADD(tx_ampdu_upload);
849 DEBUGFS_ADD(tx_ampdu_scheduler);
850 DEBUGFS_ADD(tx_ampdu_list_len);
851
852 DEBUGFS_ADD(rx_dropped);
853 DEBUGFS_ADD(sniffer_enabled);
854 DEBUGFS_ADD(rx_software_decryption);
855
856 DEBUGFS_ADD(mem_usage);
857 DEBUGFS_ADD(qos_stat);
858 DEBUGFS_ADD(sta_psm);
859 DEBUGFS_ADD(ampdu_state);
860
861 DEBUGFS_ADD(hw_tx_tally);
862 DEBUGFS_ADD(hw_rx_tally);
863 DEBUGFS_ADD(hw_phy_errors);
864 DEBUGFS_ADD(phy_noise);
865
866 DEBUGFS_ADD(hw_wlan_queue);
867 DEBUGFS_ADD(hw_pta_queue);
868 DEBUGFS_ADD(hw_ampdu_info);
869
870 DEBUGFS_ADD(ampdu_density);
871 DEBUGFS_ADD(ampdu_factor);
872
873 DEBUGFS_ADD(tx_janitor_last_run);
874
875 DEBUGFS_ADD(tx_status_0);
876 DEBUGFS_ADD(tx_status_1);
877 DEBUGFS_ADD(tx_status_2);
878 DEBUGFS_ADD(tx_status_3);
879
880 DEBUGFS_ADD(tx_pending_0);
881 DEBUGFS_ADD(tx_pending_1);
882 DEBUGFS_ADD(tx_pending_2);
883 DEBUGFS_ADD(tx_pending_3);
884
885 DEBUGFS_ADD(hw_ioread32);
886 DEBUGFS_ADD(hw_iowrite32);
887 DEBUGFS_ADD(bug);
888
889 DEBUGFS_ADD(erp);
890
891 DEBUGFS_ADD(vif_dump);
892
893 DEBUGFS_ADD(beacon_int);
894 DEBUGFS_ADD(pretbtt);
895
896#undef DEBUGFS_ADD
897}
898
899void carl9170_debugfs_unregister(struct ar9170 *ar)
900{
901 debugfs_remove_recursive(ar->debug_dir);
902}
diff --git a/drivers/net/wireless/ath/carl9170/debug.h b/drivers/net/wireless/ath/carl9170/debug.h
new file mode 100644
index 000000000000..ea4b97524122
--- /dev/null
+++ b/drivers/net/wireless/ath/carl9170/debug.h
@@ -0,0 +1,134 @@
1/*
2 * Atheros CARL9170 driver
3 *
4 * debug header
5 *
6 * Copyright 2010, Christian Lamparter <chunkeey@googlemail.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; see the file COPYING. If not, see
20 * http://www.gnu.org/licenses/.
21 *
22 * This file incorporates work covered by the following copyright and
23 * permission notice:
24 * Copyright (c) 2007-2008 Atheros Communications, Inc.
25 *
26 * Permission to use, copy, modify, and/or distribute this software for any
27 * purpose with or without fee is hereby granted, provided that the above
28 * copyright notice and this permission notice appear in all copies.
29 *
30 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
31 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
32 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
33 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
34 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
35 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
36 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
37 */
38#ifndef __DEBUG_H
39#define __DEBUG_H
40
41#include "eeprom.h"
42#include "wlan.h"
43#include "hw.h"
44#include "fwdesc.h"
45#include "fwcmd.h"
46#include "../regd.h"
47
48struct hw_stat_reg_entry {
49 u32 reg;
50 char nreg[32];
51};
52
53#define STAT_MAC_REG(reg) \
54 { (AR9170_MAC_REG_##reg), #reg }
55
56#define STAT_PTA_REG(reg) \
57 { (AR9170_PTA_REG_##reg), #reg }
58
59#define STAT_USB_REG(reg) \
60 { (AR9170_USB_REG_##reg), #reg }
61
62static const struct hw_stat_reg_entry hw_rx_tally_regs[] = {
63 STAT_MAC_REG(RX_CRC32), STAT_MAC_REG(RX_CRC16),
64 STAT_MAC_REG(RX_TIMEOUT_COUNT), STAT_MAC_REG(RX_ERR_DECRYPTION_UNI),
65 STAT_MAC_REG(RX_ERR_DECRYPTION_MUL), STAT_MAC_REG(RX_MPDU),
66 STAT_MAC_REG(RX_DROPPED_MPDU), STAT_MAC_REG(RX_DEL_MPDU),
67};
68
69static const struct hw_stat_reg_entry hw_phy_errors_regs[] = {
70 STAT_MAC_REG(RX_PHY_MISC_ERROR), STAT_MAC_REG(RX_PHY_XR_ERROR),
71 STAT_MAC_REG(RX_PHY_OFDM_ERROR), STAT_MAC_REG(RX_PHY_CCK_ERROR),
72 STAT_MAC_REG(RX_PHY_HT_ERROR), STAT_MAC_REG(RX_PHY_TOTAL),
73};
74
75static const struct hw_stat_reg_entry hw_tx_tally_regs[] = {
76 STAT_MAC_REG(TX_TOTAL), STAT_MAC_REG(TX_UNDERRUN),
77 STAT_MAC_REG(TX_RETRY),
78};
79
80static const struct hw_stat_reg_entry hw_wlan_queue_regs[] = {
81 STAT_MAC_REG(DMA_STATUS), STAT_MAC_REG(DMA_TRIGGER),
82 STAT_MAC_REG(DMA_TXQ0_ADDR), STAT_MAC_REG(DMA_TXQ0_CURR_ADDR),
83 STAT_MAC_REG(DMA_TXQ1_ADDR), STAT_MAC_REG(DMA_TXQ1_CURR_ADDR),
84 STAT_MAC_REG(DMA_TXQ2_ADDR), STAT_MAC_REG(DMA_TXQ2_CURR_ADDR),
85 STAT_MAC_REG(DMA_TXQ3_ADDR), STAT_MAC_REG(DMA_TXQ3_CURR_ADDR),
86 STAT_MAC_REG(DMA_RXQ_ADDR), STAT_MAC_REG(DMA_RXQ_CURR_ADDR),
87};
88
89static const struct hw_stat_reg_entry hw_ampdu_info_regs[] = {
90 STAT_MAC_REG(AMPDU_DENSITY), STAT_MAC_REG(AMPDU_FACTOR),
91};
92
93static const struct hw_stat_reg_entry hw_pta_queue_regs[] = {
94 STAT_PTA_REG(DN_CURR_ADDRH), STAT_PTA_REG(DN_CURR_ADDRL),
95 STAT_PTA_REG(UP_CURR_ADDRH), STAT_PTA_REG(UP_CURR_ADDRL),
96 STAT_PTA_REG(DMA_STATUS), STAT_PTA_REG(DMA_MODE_CTRL),
97};
98
99#define DEFINE_TALLY(name) \
100 u32 name##_sum[ARRAY_SIZE(name##_regs)], \
101 name##_counter[ARRAY_SIZE(name##_regs)] \
102
103#define DEFINE_STAT(name) \
104 u32 name##_counter[ARRAY_SIZE(name##_regs)] \
105
106struct ath_stats {
107 DEFINE_TALLY(hw_tx_tally);
108 DEFINE_TALLY(hw_rx_tally);
109 DEFINE_TALLY(hw_phy_errors);
110 DEFINE_STAT(hw_wlan_queue);
111 DEFINE_STAT(hw_pta_queue);
112 DEFINE_STAT(hw_ampdu_info);
113};
114
115struct carl9170_debug_mem_rbe {
116 u32 reg;
117 u32 value;
118};
119
120#define CARL9170_DEBUG_RING_SIZE 64
121
122struct carl9170_debug {
123 struct ath_stats stats;
124 struct carl9170_debug_mem_rbe ring[CARL9170_DEBUG_RING_SIZE];
125 struct mutex ring_lock;
126 unsigned int ring_head, ring_tail;
127 struct delayed_work update_tally;
128};
129
130struct ar9170;
131
132void carl9170_debugfs_register(struct ar9170 *ar);
133void carl9170_debugfs_unregister(struct ar9170 *ar);
134#endif /* __DEBUG_H */
diff --git a/drivers/net/wireless/ath/carl9170/eeprom.h b/drivers/net/wireless/ath/carl9170/eeprom.h
new file mode 100644
index 000000000000..7cff40ac7759
--- /dev/null
+++ b/drivers/net/wireless/ath/carl9170/eeprom.h
@@ -0,0 +1,216 @@
1/*
2 * Shared Atheros AR9170 Header
3 *
4 * EEPROM layout
5 *
6 * Copyright 2008, Johannes Berg <johannes@sipsolutions.net>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; see the file COPYING. If not, see
20 * http://www.gnu.org/licenses/.
21 *
22 * This file incorporates work covered by the following copyright and
23 * permission notice:
24 * Copyright (c) 2007-2008 Atheros Communications, Inc.
25 *
26 * Permission to use, copy, modify, and/or distribute this software for any
27 * purpose with or without fee is hereby granted, provided that the above
28 * copyright notice and this permission notice appear in all copies.
29 *
30 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
31 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
32 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
33 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
34 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
35 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
36 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
37 */
38#ifndef __CARL9170_SHARED_EEPROM_H
39#define __CARL9170_SHARED_EEPROM_H
40
41#define AR9170_EEPROM_START 0x1600
42
43#define AR5416_MAX_CHAINS 2
44#define AR5416_MODAL_SPURS 5
45
46struct ar9170_eeprom_modal {
47 __le32 antCtrlChain[AR5416_MAX_CHAINS];
48 __le32 antCtrlCommon;
49 s8 antennaGainCh[AR5416_MAX_CHAINS];
50 u8 switchSettling;
51 u8 txRxAttenCh[AR5416_MAX_CHAINS];
52 u8 rxTxMarginCh[AR5416_MAX_CHAINS];
53 s8 adcDesiredSize;
54 s8 pgaDesiredSize;
55 u8 xlnaGainCh[AR5416_MAX_CHAINS];
56 u8 txEndToXpaOff;
57 u8 txEndToRxOn;
58 u8 txFrameToXpaOn;
59 u8 thresh62;
60 s8 noiseFloorThreshCh[AR5416_MAX_CHAINS];
61 u8 xpdGain;
62 u8 xpd;
63 s8 iqCalICh[AR5416_MAX_CHAINS];
64 s8 iqCalQCh[AR5416_MAX_CHAINS];
65 u8 pdGainOverlap;
66 u8 ob;
67 u8 db;
68 u8 xpaBiasLvl;
69 u8 pwrDecreaseFor2Chain;
70 u8 pwrDecreaseFor3Chain;
71 u8 txFrameToDataStart;
72 u8 txFrameToPaOn;
73 u8 ht40PowerIncForPdadc;
74 u8 bswAtten[AR5416_MAX_CHAINS];
75 u8 bswMargin[AR5416_MAX_CHAINS];
76 u8 swSettleHt40;
77 u8 reserved[22];
78 struct spur_channel {
79 __le16 spurChan;
80 u8 spurRangeLow;
81 u8 spurRangeHigh;
82 } __packed spur_channels[AR5416_MODAL_SPURS];
83} __packed;
84
85#define AR5416_NUM_PD_GAINS 4
86#define AR5416_PD_GAIN_ICEPTS 5
87
88struct ar9170_calibration_data_per_freq {
89 u8 pwr_pdg[AR5416_NUM_PD_GAINS][AR5416_PD_GAIN_ICEPTS];
90 u8 vpd_pdg[AR5416_NUM_PD_GAINS][AR5416_PD_GAIN_ICEPTS];
91} __packed;
92
93#define AR5416_NUM_5G_CAL_PIERS 8
94#define AR5416_NUM_2G_CAL_PIERS 4
95
96#define AR5416_NUM_5G_TARGET_PWRS 8
97#define AR5416_NUM_2G_CCK_TARGET_PWRS 3
98#define AR5416_NUM_2G_OFDM_TARGET_PWRS 4
99#define AR5416_MAX_NUM_TGT_PWRS 8
100
101struct ar9170_calibration_target_power_legacy {
102 u8 freq;
103 u8 power[4];
104} __packed;
105
106struct ar9170_calibration_target_power_ht {
107 u8 freq;
108 u8 power[8];
109} __packed;
110
111#define AR5416_NUM_CTLS 24
112
113struct ar9170_calctl_edges {
114 u8 channel;
115#define AR9170_CALCTL_EDGE_FLAGS 0xC0
116 u8 power_flags;
117} __packed;
118
119#define AR5416_NUM_BAND_EDGES 8
120
121struct ar9170_calctl_data {
122 struct ar9170_calctl_edges
123 control_edges[AR5416_MAX_CHAINS][AR5416_NUM_BAND_EDGES];
124} __packed;
125
126struct ar9170_eeprom {
127 __le16 length;
128 __le16 checksum;
129 __le16 version;
130 u8 operating_flags;
131#define AR9170_OPFLAG_5GHZ 1
132#define AR9170_OPFLAG_2GHZ 2
133 u8 misc;
134 __le16 reg_domain[2];
135 u8 mac_address[6];
136 u8 rx_mask;
137 u8 tx_mask;
138 __le16 rf_silent;
139 __le16 bluetooth_options;
140 __le16 device_capabilities;
141 __le32 build_number;
142 u8 deviceType;
143 u8 reserved[33];
144
145 u8 customer_data[64];
146
147 struct ar9170_eeprom_modal
148 modal_header[2];
149
150 u8 cal_freq_pier_5G[AR5416_NUM_5G_CAL_PIERS];
151 u8 cal_freq_pier_2G[AR5416_NUM_2G_CAL_PIERS];
152
153 struct ar9170_calibration_data_per_freq
154 cal_pier_data_5G[AR5416_MAX_CHAINS][AR5416_NUM_5G_CAL_PIERS],
155 cal_pier_data_2G[AR5416_MAX_CHAINS][AR5416_NUM_2G_CAL_PIERS];
156
157 /* power calibration data */
158 struct ar9170_calibration_target_power_legacy
159 cal_tgt_pwr_5G[AR5416_NUM_5G_TARGET_PWRS];
160 struct ar9170_calibration_target_power_ht
161 cal_tgt_pwr_5G_ht20[AR5416_NUM_5G_TARGET_PWRS],
162 cal_tgt_pwr_5G_ht40[AR5416_NUM_5G_TARGET_PWRS];
163
164 struct ar9170_calibration_target_power_legacy
165 cal_tgt_pwr_2G_cck[AR5416_NUM_2G_CCK_TARGET_PWRS],
166 cal_tgt_pwr_2G_ofdm[AR5416_NUM_2G_OFDM_TARGET_PWRS];
167 struct ar9170_calibration_target_power_ht
168 cal_tgt_pwr_2G_ht20[AR5416_NUM_2G_OFDM_TARGET_PWRS],
169 cal_tgt_pwr_2G_ht40[AR5416_NUM_2G_OFDM_TARGET_PWRS];
170
171 /* conformance testing limits */
172 u8 ctl_index[AR5416_NUM_CTLS];
173 struct ar9170_calctl_data
174 ctl_data[AR5416_NUM_CTLS];
175
176 u8 pad;
177 __le16 subsystem_id;
178} __packed;
179
180#define AR9170_LED_MODE_POWER_ON 0x0001
181#define AR9170_LED_MODE_RESERVED 0x0002
182#define AR9170_LED_MODE_DISABLE_STATE 0x0004
183#define AR9170_LED_MODE_OFF_IN_PSM 0x0008
184
185/* AR9170_LED_MODE BIT is set */
186#define AR9170_LED_MODE_FREQUENCY_S 4
187#define AR9170_LED_MODE_FREQUENCY 0x0030
188#define AR9170_LED_MODE_FREQUENCY_1HZ 0x0000
189#define AR9170_LED_MODE_FREQUENCY_0_5HZ 0x0010
190#define AR9170_LED_MODE_FREQUENCY_0_25HZ 0x0020
191#define AR9170_LED_MODE_FREQUENCY_0_125HZ 0x0030
192
193/* AR9170_LED_MODE BIT is not set */
194#define AR9170_LED_MODE_CONN_STATE_S 4
195#define AR9170_LED_MODE_CONN_STATE 0x0030
196#define AR9170_LED_MODE_CONN_STATE_FORCE_OFF 0x0000
197#define AR9170_LED_MODE_CONN_STATE_FORCE_ON 0x0010
198/* Idle off / Active on */
199#define AR9170_LED_MODE_CONN_STATE_IOFF_AON 0x0020
200/* Idle on / Active off */
201#define AR9170_LED_MODE_CONN_STATE_ION_AOFF 0x0010
202
203#define AR9170_LED_MODE_MODE 0x0040
204#define AR9170_LED_MODE_RESERVED2 0x0080
205
206#define AR9170_LED_MODE_TON_SCAN_S 8
207#define AR9170_LED_MODE_TON_SCAN 0x0f00
208
209#define AR9170_LED_MODE_TOFF_SCAN_S 12
210#define AR9170_LED_MODE_TOFF_SCAN 0xf000
211
212struct ar9170_led_mode {
213 __le16 led;
214};
215
216#endif /* __CARL9170_SHARED_EEPROM_H */
diff --git a/drivers/net/wireless/ath/carl9170/fw.c b/drivers/net/wireless/ath/carl9170/fw.c
new file mode 100644
index 000000000000..ae6c006bbc56
--- /dev/null
+++ b/drivers/net/wireless/ath/carl9170/fw.c
@@ -0,0 +1,402 @@
1/*
2 * Atheros CARL9170 driver
3 *
4 * firmware parser
5 *
6 * Copyright 2009, 2010, Christian Lamparter <chunkeey@googlemail.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; see the file COPYING. If not, see
20 * http://www.gnu.org/licenses/.
21 */
22
23#include <linux/kernel.h>
24#include <linux/firmware.h>
25#include <linux/crc32.h>
26#include "carl9170.h"
27#include "fwcmd.h"
28#include "version.h"
29
30#define MAKE_STR(symbol) #symbol
31#define TO_STR(symbol) MAKE_STR(symbol)
32#define CARL9170FW_API_VER_STR TO_STR(CARL9170FW_API_MAX_VER)
33MODULE_VERSION(CARL9170FW_API_VER_STR ":" CARL9170FW_VERSION_GIT);
34
35static const u8 otus_magic[4] = { OTUS_MAGIC };
36
37static const void *carl9170_fw_find_desc(struct ar9170 *ar, const u8 descid[4],
38 const unsigned int len, const u8 compatible_revision)
39{
40 const struct carl9170fw_desc_head *iter;
41
42 carl9170fw_for_each_hdr(iter, ar->fw.desc) {
43 if (carl9170fw_desc_cmp(iter, descid, len,
44 compatible_revision))
45 return (void *)iter;
46 }
47
48 /* needed to find the LAST desc */
49 if (carl9170fw_desc_cmp(iter, descid, len,
50 compatible_revision))
51 return (void *)iter;
52
53 return NULL;
54}
55
56static int carl9170_fw_verify_descs(struct ar9170 *ar,
57 const struct carl9170fw_desc_head *head, unsigned int max_len)
58{
59 const struct carl9170fw_desc_head *pos;
60 unsigned long pos_addr, end_addr;
61 unsigned int pos_length;
62
63 if (max_len < sizeof(*pos))
64 return -ENODATA;
65
66 max_len = min_t(unsigned int, CARL9170FW_DESC_MAX_LENGTH, max_len);
67
68 pos = head;
69 pos_addr = (unsigned long) pos;
70 end_addr = pos_addr + max_len;
71
72 while (pos_addr < end_addr) {
73 if (pos_addr + sizeof(*head) > end_addr)
74 return -E2BIG;
75
76 pos_length = le16_to_cpu(pos->length);
77
78 if (pos_length < sizeof(*head))
79 return -EBADMSG;
80
81 if (pos_length > max_len)
82 return -EOVERFLOW;
83
84 if (pos_addr + pos_length > end_addr)
85 return -EMSGSIZE;
86
87 if (carl9170fw_desc_cmp(pos, LAST_MAGIC,
88 CARL9170FW_LAST_DESC_SIZE,
89 CARL9170FW_LAST_DESC_CUR_VER))
90 return 0;
91
92 pos_addr += pos_length;
93 pos = (void *)pos_addr;
94 max_len -= pos_length;
95 }
96 return -EINVAL;
97}
98
99static void carl9170_fw_info(struct ar9170 *ar)
100{
101 const struct carl9170fw_motd_desc *motd_desc;
102 unsigned int str_ver_len;
103 u32 fw_date;
104
105 dev_info(&ar->udev->dev, "driver API: %s 2%03d-%02d-%02d [%d-%d]\n",
106 CARL9170FW_VERSION_GIT, CARL9170FW_VERSION_YEAR,
107 CARL9170FW_VERSION_MONTH, CARL9170FW_VERSION_DAY,
108 CARL9170FW_API_MIN_VER, CARL9170FW_API_MAX_VER);
109
110 motd_desc = carl9170_fw_find_desc(ar, MOTD_MAGIC,
111 sizeof(*motd_desc), CARL9170FW_MOTD_DESC_CUR_VER);
112
113 if (motd_desc) {
114 str_ver_len = strnlen(motd_desc->release,
115 CARL9170FW_MOTD_RELEASE_LEN);
116
117 fw_date = le32_to_cpu(motd_desc->fw_year_month_day);
118
119 dev_info(&ar->udev->dev, "firmware API: %.*s 2%03d-%02d-%02d\n",
120 str_ver_len, motd_desc->release,
121 CARL9170FW_GET_YEAR(fw_date),
122 CARL9170FW_GET_MONTH(fw_date),
123 CARL9170FW_GET_DAY(fw_date));
124
125 strlcpy(ar->hw->wiphy->fw_version, motd_desc->release,
126 sizeof(ar->hw->wiphy->fw_version));
127 }
128}
129
130static bool valid_dma_addr(const u32 address)
131{
132 if (address >= AR9170_SRAM_OFFSET &&
133 address < (AR9170_SRAM_OFFSET + AR9170_SRAM_SIZE))
134 return true;
135
136 return false;
137}
138
139static bool valid_cpu_addr(const u32 address)
140{
141 if (valid_dma_addr(address) || (address >= AR9170_PRAM_OFFSET &&
142 address < (AR9170_PRAM_OFFSET + AR9170_PRAM_SIZE)))
143 return true;
144
145 return false;
146}
147
148static int carl9170_fw(struct ar9170 *ar, const __u8 *data, size_t len)
149{
150 const struct carl9170fw_otus_desc *otus_desc;
151 const struct carl9170fw_chk_desc *chk_desc;
152 const struct carl9170fw_last_desc *last_desc;
153
154 last_desc = carl9170_fw_find_desc(ar, LAST_MAGIC,
155 sizeof(*last_desc), CARL9170FW_LAST_DESC_CUR_VER);
156 if (!last_desc)
157 return -EINVAL;
158
159 otus_desc = carl9170_fw_find_desc(ar, OTUS_MAGIC,
160 sizeof(*otus_desc), CARL9170FW_OTUS_DESC_CUR_VER);
161 if (!otus_desc) {
162 dev_err(&ar->udev->dev, "failed to find compatible firmware "
163 "descriptor.\n");
164 return -ENODATA;
165 }
166
167 chk_desc = carl9170_fw_find_desc(ar, CHK_MAGIC,
168 sizeof(*chk_desc), CARL9170FW_CHK_DESC_CUR_VER);
169
170 if (chk_desc) {
171 unsigned long fin, diff;
172 unsigned int dsc_len;
173 u32 crc32;
174
175 dsc_len = min_t(unsigned int, len,
176 (unsigned long)chk_desc - (unsigned long)otus_desc);
177
178 fin = (unsigned long) last_desc + sizeof(*last_desc);
179 diff = fin - (unsigned long) otus_desc;
180
181 if (diff < len)
182 len -= diff;
183
184 if (len < 256)
185 return -EIO;
186
187 crc32 = crc32_le(~0, data, len);
188 if (cpu_to_le32(crc32) != chk_desc->fw_crc32) {
189 dev_err(&ar->udev->dev, "fw checksum test failed.\n");
190 return -ENOEXEC;
191 }
192
193 crc32 = crc32_le(crc32, (void *)otus_desc, dsc_len);
194 if (cpu_to_le32(crc32) != chk_desc->hdr_crc32) {
195 dev_err(&ar->udev->dev, "descriptor check failed.\n");
196 return -EINVAL;
197 }
198 } else {
199 dev_warn(&ar->udev->dev, "Unprotected firmware image.\n");
200 }
201
202#define SUPP(feat) \
203 (carl9170fw_supports(otus_desc->feature_set, feat))
204
205 if (!SUPP(CARL9170FW_DUMMY_FEATURE)) {
206 dev_err(&ar->udev->dev, "invalid firmware descriptor "
207 "format detected.\n");
208 return -EINVAL;
209 }
210
211 ar->fw.api_version = otus_desc->api_ver;
212
213 if (ar->fw.api_version < CARL9170FW_API_MIN_VER ||
214 ar->fw.api_version > CARL9170FW_API_MAX_VER) {
215 dev_err(&ar->udev->dev, "unsupported firmware api version.\n");
216 return -EINVAL;
217 }
218
219 if (!SUPP(CARL9170FW_COMMAND_PHY) || SUPP(CARL9170FW_UNUSABLE) ||
220 !SUPP(CARL9170FW_HANDLE_BACK_REQ)) {
221 dev_err(&ar->udev->dev, "firmware does support "
222 "mandatory features.\n");
223 return -ECANCELED;
224 }
225
226 if (ilog2(le32_to_cpu(otus_desc->feature_set)) >=
227 __CARL9170FW_FEATURE_NUM) {
228 dev_warn(&ar->udev->dev, "driver does not support all "
229 "firmware features.\n");
230 }
231
232 if (!SUPP(CARL9170FW_COMMAND_CAM)) {
233 dev_info(&ar->udev->dev, "crypto offloading is disabled "
234 "by firmware.\n");
235 ar->disable_offload = true;
236 }
237
238 if (SUPP(CARL9170FW_PSM))
239 ar->hw->flags |= IEEE80211_HW_SUPPORTS_PS;
240
241 if (!SUPP(CARL9170FW_USB_INIT_FIRMWARE)) {
242 dev_err(&ar->udev->dev, "firmware does not provide "
243 "mandatory interfaces.\n");
244 return -EINVAL;
245 }
246
247 if (SUPP(CARL9170FW_MINIBOOT))
248 ar->fw.offset = le16_to_cpu(otus_desc->miniboot_size);
249 else
250 ar->fw.offset = 0;
251
252 if (SUPP(CARL9170FW_USB_DOWN_STREAM)) {
253 ar->hw->extra_tx_headroom += sizeof(struct ar9170_stream);
254 ar->fw.tx_stream = true;
255 }
256
257 if (SUPP(CARL9170FW_USB_UP_STREAM))
258 ar->fw.rx_stream = true;
259
260 if (SUPP(CARL9170FW_RX_FILTER)) {
261 ar->fw.rx_filter = true;
262 ar->rx_filter_caps = FIF_FCSFAIL | FIF_PLCPFAIL |
263 FIF_CONTROL | FIF_PSPOLL | FIF_OTHER_BSS |
264 FIF_PROMISC_IN_BSS;
265 }
266
267 ar->fw.vif_num = otus_desc->vif_num;
268 ar->fw.cmd_bufs = otus_desc->cmd_bufs;
269 ar->fw.address = le32_to_cpu(otus_desc->fw_address);
270 ar->fw.rx_size = le16_to_cpu(otus_desc->rx_max_frame_len);
271 ar->fw.mem_blocks = min_t(unsigned int, otus_desc->tx_descs, 0xfe);
272 atomic_set(&ar->mem_free_blocks, ar->fw.mem_blocks);
273 ar->fw.mem_block_size = le16_to_cpu(otus_desc->tx_frag_len);
274
275 if (ar->fw.vif_num >= AR9170_MAX_VIRTUAL_MAC || !ar->fw.vif_num ||
276 ar->fw.mem_blocks < 16 || !ar->fw.cmd_bufs ||
277 ar->fw.mem_block_size < 64 || ar->fw.mem_block_size > 512 ||
278 ar->fw.rx_size > 32768 || ar->fw.rx_size < 4096 ||
279 !valid_cpu_addr(ar->fw.address)) {
280 dev_err(&ar->udev->dev, "firmware shows obvious signs of "
281 "malicious tampering.\n");
282 return -EINVAL;
283 }
284
285 ar->fw.beacon_addr = le32_to_cpu(otus_desc->bcn_addr);
286 ar->fw.beacon_max_len = le16_to_cpu(otus_desc->bcn_len);
287
288 if (valid_dma_addr(ar->fw.beacon_addr) && ar->fw.beacon_max_len >=
289 AR9170_MAC_BCN_LENGTH_MAX) {
290 ar->hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_ADHOC);
291
292 if (SUPP(CARL9170FW_WLANTX_CAB)) {
293 ar->hw->wiphy->interface_modes |=
294 BIT(NL80211_IFTYPE_AP);
295 }
296 }
297
298#undef SUPPORTED
299 return 0;
300}
301
302static struct carl9170fw_desc_head *
303carl9170_find_fw_desc(struct ar9170 *ar, const __u8 *fw_data, const size_t len)
304
305{
306 int scan = 0, found = 0;
307
308 if (!carl9170fw_size_check(len)) {
309 dev_err(&ar->udev->dev, "firmware size is out of bound.\n");
310 return NULL;
311 }
312
313 while (scan < len - sizeof(struct carl9170fw_desc_head)) {
314 if (fw_data[scan++] == otus_magic[found])
315 found++;
316 else
317 found = 0;
318
319 if (scan >= len)
320 break;
321
322 if (found == sizeof(otus_magic))
323 break;
324 }
325
326 if (found != sizeof(otus_magic))
327 return NULL;
328
329 return (void *)&fw_data[scan - found];
330}
331
332int carl9170_fw_fix_eeprom(struct ar9170 *ar)
333{
334 const struct carl9170fw_fix_desc *fix_desc = NULL;
335 unsigned int i, n, off;
336 u32 *data = (void *)&ar->eeprom;
337
338 fix_desc = carl9170_fw_find_desc(ar, FIX_MAGIC,
339 sizeof(*fix_desc), CARL9170FW_FIX_DESC_CUR_VER);
340
341 if (!fix_desc)
342 return 0;
343
344 n = (le16_to_cpu(fix_desc->head.length) - sizeof(*fix_desc)) /
345 sizeof(struct carl9170fw_fix_entry);
346
347 for (i = 0; i < n; i++) {
348 off = le32_to_cpu(fix_desc->data[i].address) -
349 AR9170_EEPROM_START;
350
351 if (off >= sizeof(struct ar9170_eeprom) || (off & 3)) {
352 dev_err(&ar->udev->dev, "Skip invalid entry %d\n", i);
353 continue;
354 }
355
356 data[off / sizeof(*data)] &=
357 le32_to_cpu(fix_desc->data[i].mask);
358 data[off / sizeof(*data)] |=
359 le32_to_cpu(fix_desc->data[i].value);
360 }
361
362 return 0;
363}
364
365int carl9170_parse_firmware(struct ar9170 *ar)
366{
367 const struct carl9170fw_desc_head *fw_desc = NULL;
368 const struct firmware *fw = ar->fw.fw;
369 unsigned long header_offset = 0;
370 int err;
371
372 if (WARN_ON(!fw))
373 return -EINVAL;
374
375 fw_desc = carl9170_find_fw_desc(ar, fw->data, fw->size);
376
377 if (!fw_desc) {
378 dev_err(&ar->udev->dev, "unsupported firmware.\n");
379 return -ENODATA;
380 }
381
382 header_offset = (unsigned long)fw_desc - (unsigned long)fw->data;
383
384 err = carl9170_fw_verify_descs(ar, fw_desc, fw->size - header_offset);
385 if (err) {
386 dev_err(&ar->udev->dev, "damaged firmware (%d).\n", err);
387 return err;
388 }
389
390 ar->fw.desc = fw_desc;
391
392 carl9170_fw_info(ar);
393
394 err = carl9170_fw(ar, fw->data, fw->size);
395 if (err) {
396 dev_err(&ar->udev->dev, "failed to parse firmware (%d).\n",
397 err);
398 return err;
399 }
400
401 return 0;
402}
diff --git a/drivers/net/wireless/ath/carl9170/fwcmd.h b/drivers/net/wireless/ath/carl9170/fwcmd.h
new file mode 100644
index 000000000000..d552166db505
--- /dev/null
+++ b/drivers/net/wireless/ath/carl9170/fwcmd.h
@@ -0,0 +1,284 @@
1/*
2 * Shared Atheros AR9170 Header
3 *
4 * Firmware command interface definitions
5 *
6 * Copyright 2008, Johannes Berg <johannes@sipsolutions.net>
7 * Copyright 2009, 2010, Christian Lamparter <chunkeey@googlemail.com>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; see the file COPYING. If not, see
20 * http://www.gnu.org/licenses/.
21 *
22 * This file incorporates work covered by the following copyright and
23 * permission notice:
24 * Copyright (c) 2007-2008 Atheros Communications, Inc.
25 *
26 * Permission to use, copy, modify, and/or distribute this software for any
27 * purpose with or without fee is hereby granted, provided that the above
28 * copyright notice and this permission notice appear in all copies.
29 *
30 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
31 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
32 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
33 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
34 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
35 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
36 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
37 */
38
39#ifndef __CARL9170_SHARED_FWCMD_H
40#define __CARL9170_SHARED_FWCMD_H
41
42#define CARL9170_MAX_CMD_LEN 64
43#define CARL9170_MAX_CMD_PAYLOAD_LEN 60
44
45#define CARL9170FW_API_MIN_VER 1
46#define CARL9170FW_API_MAX_VER 1
47
48enum carl9170_cmd_oids {
49 CARL9170_CMD_RREG = 0x00,
50 CARL9170_CMD_WREG = 0x01,
51 CARL9170_CMD_ECHO = 0x02,
52 CARL9170_CMD_SWRST = 0x03,
53 CARL9170_CMD_REBOOT = 0x04,
54 CARL9170_CMD_BCN_CTRL = 0x05,
55 CARL9170_CMD_READ_TSF = 0x06,
56 CARL9170_CMD_RX_FILTER = 0x07,
57
58 /* CAM */
59 CARL9170_CMD_EKEY = 0x10,
60 CARL9170_CMD_DKEY = 0x11,
61
62 /* RF / PHY */
63 CARL9170_CMD_FREQUENCY = 0x20,
64 CARL9170_CMD_RF_INIT = 0x21,
65 CARL9170_CMD_SYNTH = 0x22,
66 CARL9170_CMD_FREQ_START = 0x23,
67 CARL9170_CMD_PSM = 0x24,
68
69 /* Asychronous command flag */
70 CARL9170_CMD_ASYNC_FLAG = 0x40,
71 CARL9170_CMD_WREG_ASYNC = (CARL9170_CMD_WREG |
72 CARL9170_CMD_ASYNC_FLAG),
73 CARL9170_CMD_REBOOT_ASYNC = (CARL9170_CMD_REBOOT |
74 CARL9170_CMD_ASYNC_FLAG),
75 CARL9170_CMD_BCN_CTRL_ASYNC = (CARL9170_CMD_BCN_CTRL |
76 CARL9170_CMD_ASYNC_FLAG),
77 CARL9170_CMD_PSM_ASYNC = (CARL9170_CMD_PSM |
78 CARL9170_CMD_ASYNC_FLAG),
79
80 /* responses and traps */
81 CARL9170_RSP_FLAG = 0xc0,
82 CARL9170_RSP_PRETBTT = 0xc0,
83 CARL9170_RSP_TXCOMP = 0xc1,
84 CARL9170_RSP_BEACON_CONFIG = 0xc2,
85 CARL9170_RSP_ATIM = 0xc3,
86 CARL9170_RSP_WATCHDOG = 0xc6,
87 CARL9170_RSP_TEXT = 0xca,
88 CARL9170_RSP_HEXDUMP = 0xcc,
89 CARL9170_RSP_RADAR = 0xcd,
90 CARL9170_RSP_GPIO = 0xce,
91 CARL9170_RSP_BOOT = 0xcf,
92};
93
94struct carl9170_set_key_cmd {
95 __le16 user;
96 __le16 keyId;
97 __le16 type;
98 u8 macAddr[6];
99 u32 key[4];
100} __packed;
101#define CARL9170_SET_KEY_CMD_SIZE 28
102
103struct carl9170_disable_key_cmd {
104 __le16 user;
105 __le16 padding;
106} __packed;
107#define CARL9170_DISABLE_KEY_CMD_SIZE 4
108
109struct carl9170_u32_list {
110 u32 vals[0];
111} __packed;
112
113struct carl9170_reg_list {
114 __le32 regs[0];
115} __packed;
116
117struct carl9170_write_reg {
118 struct {
119 __le32 addr;
120 __le32 val;
121 } regs[0] __packed;
122} __packed;
123
124#define CARL9170FW_PHY_HT_ENABLE 0x4
125#define CARL9170FW_PHY_HT_DYN2040 0x8
126#define CARL9170FW_PHY_HT_EXT_CHAN_OFF 0x3
127#define CARL9170FW_PHY_HT_EXT_CHAN_OFF_S 2
128
129struct carl9170_rf_init {
130 __le32 freq;
131 u8 ht_settings;
132 u8 padding2[3];
133 __le32 delta_slope_coeff_exp;
134 __le32 delta_slope_coeff_man;
135 __le32 delta_slope_coeff_exp_shgi;
136 __le32 delta_slope_coeff_man_shgi;
137 __le32 finiteLoopCount;
138} __packed;
139#define CARL9170_RF_INIT_SIZE 28
140
141struct carl9170_rf_init_result {
142 __le32 ret; /* AR9170_PHY_REG_AGC_CONTROL */
143} __packed;
144#define CARL9170_RF_INIT_RESULT_SIZE 4
145
146#define CARL9170_PSM_SLEEP 0x1000
147#define CARL9170_PSM_SOFTWARE 0
148#define CARL9170_PSM_WAKE 0 /* internally used. */
149#define CARL9170_PSM_COUNTER 0xfff
150#define CARL9170_PSM_COUNTER_S 0
151
152struct carl9170_psm {
153 __le32 state;
154} __packed;
155#define CARL9170_PSM_SIZE 4
156
157struct carl9170_rx_filter_cmd {
158 __le32 rx_filter;
159} __packed;
160#define CARL9170_RX_FILTER_CMD_SIZE 4
161
162#define CARL9170_RX_FILTER_BAD 0x01
163#define CARL9170_RX_FILTER_OTHER_RA 0x02
164#define CARL9170_RX_FILTER_DECRY_FAIL 0x04
165#define CARL9170_RX_FILTER_CTL_OTHER 0x08
166#define CARL9170_RX_FILTER_CTL_PSPOLL 0x10
167#define CARL9170_RX_FILTER_CTL_BACKR 0x20
168#define CARL9170_RX_FILTER_MGMT 0x40
169#define CARL9170_RX_FILTER_DATA 0x80
170
171struct carl9170_bcn_ctrl_cmd {
172 __le32 vif_id;
173 __le32 mode;
174 __le32 bcn_addr;
175 __le32 bcn_len;
176} __packed;
177#define CARL9170_BCN_CTRL_CMD_SIZE 16
178
179#define CARL9170_BCN_CTRL_DRAIN 0
180#define CARL9170_BCN_CTRL_CAB_TRIGGER 1
181
182struct carl9170_cmd_head {
183 union {
184 struct {
185 u8 len;
186 u8 cmd;
187 u8 seq;
188 u8 ext;
189 } __packed;
190
191 u32 hdr_data;
192 } __packed;
193} __packed;
194
195struct carl9170_cmd {
196 struct carl9170_cmd_head hdr;
197 union {
198 struct carl9170_set_key_cmd setkey;
199 struct carl9170_disable_key_cmd disablekey;
200 struct carl9170_u32_list echo;
201 struct carl9170_reg_list rreg;
202 struct carl9170_write_reg wreg;
203 struct carl9170_rf_init rf_init;
204 struct carl9170_psm psm;
205 struct carl9170_bcn_ctrl_cmd bcn_ctrl;
206 struct carl9170_rx_filter_cmd rx_filter;
207 u8 data[CARL9170_MAX_CMD_PAYLOAD_LEN];
208 } __packed;
209} __packed;
210
211#define CARL9170_TX_STATUS_QUEUE 3
212#define CARL9170_TX_STATUS_QUEUE_S 0
213#define CARL9170_TX_STATUS_RIX_S 2
214#define CARL9170_TX_STATUS_RIX (3 << CARL9170_TX_STATUS_RIX_S)
215#define CARL9170_TX_STATUS_TRIES_S 4
216#define CARL9170_TX_STATUS_TRIES (7 << CARL9170_TX_STATUS_TRIES_S)
217#define CARL9170_TX_STATUS_SUCCESS 0x80
218
219/*
220 * NOTE:
221 * Both structs [carl9170_tx_status and _carl9170_tx_status]
222 * need to be "bit for bit" in sync.
223 */
224struct carl9170_tx_status {
225 /*
226 * Beware of compiler bugs in all gcc pre 4.4!
227 */
228
229 u8 cookie;
230 u8 queue:2;
231 u8 rix:2;
232 u8 tries:3;
233 u8 success:1;
234} __packed;
235struct _carl9170_tx_status {
236 /*
237 * This version should be immune to all alignment bugs.
238 */
239
240 u8 cookie;
241 u8 info;
242} __packed;
243#define CARL9170_TX_STATUS_SIZE 2
244
245#define CARL9170_RSP_TX_STATUS_NUM (CARL9170_MAX_CMD_PAYLOAD_LEN / \
246 sizeof(struct _carl9170_tx_status))
247
248#define CARL9170_TX_MAX_RATE_TRIES 7
249
250#define CARL9170_TX_MAX_RATES 4
251#define CARL9170_TX_MAX_RETRY_RATES (CARL9170_TX_MAX_RATES - 1)
252#define CARL9170_ERR_MAGIC "ERR:"
253#define CARL9170_BUG_MAGIC "BUG:"
254
255struct carl9170_gpio {
256 __le32 gpio;
257} __packed;
258#define CARL9170_GPIO_SIZE 4
259
260struct carl9170_tsf_rsp {
261 union {
262 __le32 tsf[2];
263 __le64 tsf_64;
264 } __packed;
265} __packed;
266#define CARL9170_TSF_RSP_SIZE 8
267
268struct carl9170_rsp {
269 struct carl9170_cmd_head hdr;
270
271 union {
272 struct carl9170_rf_init_result rf_init_res;
273 struct carl9170_u32_list rreg_res;
274 struct carl9170_u32_list echo;
275 struct carl9170_tx_status tx_status[0];
276 struct _carl9170_tx_status _tx_status[0];
277 struct carl9170_gpio gpio;
278 struct carl9170_tsf_rsp tsf;
279 struct carl9170_psm psm;
280 u8 data[CARL9170_MAX_CMD_PAYLOAD_LEN];
281 } __packed;
282} __packed;
283
284#endif /* __CARL9170_SHARED_FWCMD_H */
diff --git a/drivers/net/wireless/ath/carl9170/fwdesc.h b/drivers/net/wireless/ath/carl9170/fwdesc.h
new file mode 100644
index 000000000000..71f3821f6058
--- /dev/null
+++ b/drivers/net/wireless/ath/carl9170/fwdesc.h
@@ -0,0 +1,241 @@
1/*
2 * Shared CARL9170 Header
3 *
4 * Firmware descriptor format
5 *
6 * Copyright 2009, 2010, Christian Lamparter <chunkeey@googlemail.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; see the file COPYING. If not, see
19 * http://www.gnu.org/licenses/.
20 */
21
22#ifndef __CARL9170_SHARED_FWDESC_H
23#define __CARL9170_SHARED_FWDESC_H
24
25/* NOTE: Don't mess with the order of the flags! */
26enum carl9170fw_feature_list {
27 /* Always set */
28 CARL9170FW_DUMMY_FEATURE,
29
30 /*
31 * Indicates that this image has special boot block which prevents
32 * legacy drivers to drive the firmware.
33 */
34 CARL9170FW_MINIBOOT,
35
36 /* usb registers are initialized by the firmware */
37 CARL9170FW_USB_INIT_FIRMWARE,
38
39 /* command traps & notifications are send through EP2 */
40 CARL9170FW_USB_RESP_EP2,
41
42 /* usb download (app -> fw) stream */
43 CARL9170FW_USB_DOWN_STREAM,
44
45 /* usb upload (fw -> app) stream */
46 CARL9170FW_USB_UP_STREAM,
47
48 /* unusable - reserved to flag non-functional debug firmwares */
49 CARL9170FW_UNUSABLE,
50
51 /* AR9170_CMD_RF_INIT, AR9170_CMD_FREQ_START, AR9170_CMD_FREQUENCY */
52 CARL9170FW_COMMAND_PHY,
53
54 /* AR9170_CMD_EKEY, AR9170_CMD_DKEY */
55 CARL9170FW_COMMAND_CAM,
56
57 /* Firmware has a software Content After Beacon Queueing mechanism */
58 CARL9170FW_WLANTX_CAB,
59
60 /* The firmware is capable of responding to incoming BAR frames */
61 CARL9170FW_HANDLE_BACK_REQ,
62
63 /* GPIO Interrupt | CARL9170_RSP_GPIO */
64 CARL9170FW_GPIO_INTERRUPT,
65
66 /* Firmware PSM support | CARL9170_CMD_PSM */
67 CARL9170FW_PSM,
68
69 /* Firmware RX filter | CARL9170_CMD_RX_FILTER */
70 CARL9170FW_RX_FILTER,
71
72 /* KEEP LAST */
73 __CARL9170FW_FEATURE_NUM
74};
75
76#define OTUS_MAGIC "OTAR"
77#define MOTD_MAGIC "MOTD"
78#define FIX_MAGIC "FIX\0"
79#define DBG_MAGIC "DBG\0"
80#define CHK_MAGIC "CHK\0"
81#define LAST_MAGIC "LAST"
82
83#define CARL9170FW_SET_DAY(d) (((d) - 1) % 31)
84#define CARL9170FW_SET_MONTH(m) ((((m) - 1) % 12) * 31)
85#define CARL9170FW_SET_YEAR(y) (((y) - 10) * 372)
86
87#define CARL9170FW_GET_DAY(d) (((d) % 31) + 1)
88#define CARL9170FW_GET_MONTH(m) ((((m) / 31) % 12) + 1)
89#define CARL9170FW_GET_YEAR(y) ((y) / 372 + 10)
90
91struct carl9170fw_desc_head {
92 u8 magic[4];
93 __le16 length;
94 u8 min_ver;
95 u8 cur_ver;
96} __packed;
97#define CARL9170FW_DESC_HEAD_SIZE \
98 (sizeof(struct carl9170fw_desc_head))
99
100#define CARL9170FW_OTUS_DESC_MIN_VER 6
101#define CARL9170FW_OTUS_DESC_CUR_VER 6
102struct carl9170fw_otus_desc {
103 struct carl9170fw_desc_head head;
104 __le32 feature_set;
105 __le32 fw_address;
106 __le32 bcn_addr;
107 __le16 bcn_len;
108 __le16 miniboot_size;
109 __le16 tx_frag_len;
110 __le16 rx_max_frame_len;
111 u8 tx_descs;
112 u8 cmd_bufs;
113 u8 api_ver;
114 u8 vif_num;
115} __packed;
116#define CARL9170FW_OTUS_DESC_SIZE \
117 (sizeof(struct carl9170fw_otus_desc))
118
119#define CARL9170FW_MOTD_STRING_LEN 24
120#define CARL9170FW_MOTD_RELEASE_LEN 20
121#define CARL9170FW_MOTD_DESC_MIN_VER 1
122#define CARL9170FW_MOTD_DESC_CUR_VER 2
123struct carl9170fw_motd_desc {
124 struct carl9170fw_desc_head head;
125 __le32 fw_year_month_day;
126 char desc[CARL9170FW_MOTD_STRING_LEN];
127 char release[CARL9170FW_MOTD_RELEASE_LEN];
128} __packed;
129#define CARL9170FW_MOTD_DESC_SIZE \
130 (sizeof(struct carl9170fw_motd_desc))
131
132#define CARL9170FW_FIX_DESC_MIN_VER 1
133#define CARL9170FW_FIX_DESC_CUR_VER 2
134struct carl9170fw_fix_entry {
135 __le32 address;
136 __le32 mask;
137 __le32 value;
138} __packed;
139
140struct carl9170fw_fix_desc {
141 struct carl9170fw_desc_head head;
142 struct carl9170fw_fix_entry data[0];
143} __packed;
144#define CARL9170FW_FIX_DESC_SIZE \
145 (sizeof(struct carl9170fw_fix_desc))
146
147#define CARL9170FW_DBG_DESC_MIN_VER 1
148#define CARL9170FW_DBG_DESC_CUR_VER 3
149struct carl9170fw_dbg_desc {
150 struct carl9170fw_desc_head head;
151
152 __le32 bogoclock_addr;
153 __le32 counter_addr;
154 __le32 rx_total_addr;
155 __le32 rx_overrun_addr;
156 __le32 rx_filter;
157
158 /* Put your debugging definitions here */
159} __packed;
160#define CARL9170FW_DBG_DESC_SIZE \
161 (sizeof(struct carl9170fw_dbg_desc))
162
163#define CARL9170FW_CHK_DESC_MIN_VER 1
164#define CARL9170FW_CHK_DESC_CUR_VER 2
165struct carl9170fw_chk_desc {
166 struct carl9170fw_desc_head head;
167 __le32 fw_crc32;
168 __le32 hdr_crc32;
169} __packed;
170#define CARL9170FW_CHK_DESC_SIZE \
171 (sizeof(struct carl9170fw_chk_desc))
172
173#define CARL9170FW_LAST_DESC_MIN_VER 1
174#define CARL9170FW_LAST_DESC_CUR_VER 2
175struct carl9170fw_last_desc {
176 struct carl9170fw_desc_head head;
177} __packed;
178#define CARL9170FW_LAST_DESC_SIZE \
179 (sizeof(struct carl9170fw_fix_desc))
180
181#define CARL9170FW_DESC_MAX_LENGTH 8192
182
183#define CARL9170FW_FILL_DESC(_magic, _length, _min_ver, _cur_ver) \
184 .head = { \
185 .magic = _magic, \
186 .length = cpu_to_le16(_length), \
187 .min_ver = _min_ver, \
188 .cur_ver = _cur_ver, \
189 }
190
191static inline void carl9170fw_fill_desc(struct carl9170fw_desc_head *head,
192 u8 magic[4], __le16 length,
193 u8 min_ver, u8 cur_ver)
194{
195 head->magic[0] = magic[0];
196 head->magic[1] = magic[1];
197 head->magic[2] = magic[2];
198 head->magic[3] = magic[3];
199
200 head->length = length;
201 head->min_ver = min_ver;
202 head->cur_ver = cur_ver;
203}
204
205#define carl9170fw_for_each_hdr(desc, fw_desc) \
206 for (desc = fw_desc; \
207 memcmp(desc->magic, LAST_MAGIC, 4) && \
208 le16_to_cpu(desc->length) >= CARL9170FW_DESC_HEAD_SIZE && \
209 le16_to_cpu(desc->length) < CARL9170FW_DESC_MAX_LENGTH; \
210 desc = (void *)((unsigned long)desc + le16_to_cpu(desc->length)))
211
212#define CHECK_HDR_VERSION(head, _min_ver) \
213 (((head)->cur_ver < _min_ver) || ((head)->min_ver > _min_ver)) \
214
215static inline bool carl9170fw_supports(__le32 list, u8 feature)
216{
217 return le32_to_cpu(list) & BIT(feature);
218}
219
220static inline bool carl9170fw_desc_cmp(const struct carl9170fw_desc_head *head,
221 const u8 descid[4], u16 min_len,
222 u8 compatible_revision)
223{
224 if (descid[0] == head->magic[0] && descid[1] == head->magic[1] &&
225 descid[2] == head->magic[2] && descid[3] == head->magic[3] &&
226 !CHECK_HDR_VERSION(head, compatible_revision) &&
227 (le16_to_cpu(head->length) >= min_len))
228 return true;
229
230 return false;
231}
232
233#define CARL9170FW_MIN_SIZE 32
234#define CARL9170FW_MAX_SIZE 16384
235
236static inline bool carl9170fw_size_check(unsigned int len)
237{
238 return (len <= CARL9170FW_MAX_SIZE && len >= CARL9170FW_MIN_SIZE);
239}
240
241#endif /* __CARL9170_SHARED_FWDESC_H */
diff --git a/drivers/net/wireless/ath/carl9170/hw.h b/drivers/net/wireless/ath/carl9170/hw.h
new file mode 100644
index 000000000000..2f471b3f05af
--- /dev/null
+++ b/drivers/net/wireless/ath/carl9170/hw.h
@@ -0,0 +1,739 @@
1/*
2 * Shared Atheros AR9170 Header
3 *
4 * Register map, hardware-specific definitions
5 *
6 * Copyright 2008, Johannes Berg <johannes@sipsolutions.net>
7 * Copyright 2009, 2010, Christian Lamparter <chunkeey@googlemail.com>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; see the file COPYING. If not, see
20 * http://www.gnu.org/licenses/.
21 *
22 * This file incorporates work covered by the following copyright and
23 * permission notice:
24 * Copyright (c) 2007-2008 Atheros Communications, Inc.
25 *
26 * Permission to use, copy, modify, and/or distribute this software for any
27 * purpose with or without fee is hereby granted, provided that the above
28 * copyright notice and this permission notice appear in all copies.
29 *
30 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
31 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
32 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
33 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
34 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
35 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
36 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
37 */
38
39#ifndef __CARL9170_SHARED_HW_H
40#define __CARL9170_SHARED_HW_H
41
42/* High Speed UART */
43#define AR9170_UART_REG_BASE 0x1c0000
44
45/* Definitions of interrupt registers */
46#define AR9170_UART_REG_RX_BUFFER (AR9170_UART_REG_BASE + 0x000)
47#define AR9170_UART_REG_TX_HOLDING (AR9170_UART_REG_BASE + 0x004)
48#define AR9170_UART_REG_FIFO_CONTROL (AR9170_UART_REG_BASE + 0x010)
49#define AR9170_UART_FIFO_CTRL_RESET_RX_FIFO 0x02
50#define AR9170_UART_FIFO_CTRL_RESET_TX_FIFO 0x04
51
52#define AR9170_UART_REG_LINE_CONTROL (AR9170_UART_REG_BASE + 0x014)
53#define AR9170_UART_REG_MODEM_CONTROL (AR9170_UART_REG_BASE + 0x018)
54#define AR9170_UART_MODEM_CTRL_DTR_BIT 0x01
55#define AR9170_UART_MODEM_CTRL_RTS_BIT 0x02
56#define AR9170_UART_MODEM_CTRL_INTERNAL_LOOP_BACK 0x10
57#define AR9170_UART_MODEM_CTRL_AUTO_RTS 0x20
58#define AR9170_UART_MODEM_CTRL_AUTO_CTR 0x40
59
60#define AR9170_UART_REG_LINE_STATUS (AR9170_UART_REG_BASE + 0x01c)
61#define AR9170_UART_LINE_STS_RX_DATA_READY 0x01
62#define AR9170_UART_LINE_STS_RX_BUFFER_OVERRUN 0x02
63#define AR9170_UART_LINE_STS_RX_BREAK_IND 0x10
64#define AR9170_UART_LINE_STS_TX_FIFO_NEAR_EMPTY 0x20
65#define AR9170_UART_LINE_STS_TRANSMITTER_EMPTY 0x40
66
67#define AR9170_UART_REG_MODEM_STATUS (AR9170_UART_REG_BASE + 0x020)
68#define AR9170_UART_MODEM_STS_CTS_CHANGE 0x01
69#define AR9170_UART_MODEM_STS_DSR_CHANGE 0x02
70#define AR9170_UART_MODEM_STS_DCD_CHANGE 0x08
71#define AR9170_UART_MODEM_STS_CTS_COMPL 0x10
72#define AR9170_UART_MODEM_STS_DSR_COMPL 0x20
73#define AR9170_UART_MODEM_STS_DCD_COMPL 0x80
74
75#define AR9170_UART_REG_SCRATCH (AR9170_UART_REG_BASE + 0x024)
76#define AR9170_UART_REG_DIVISOR_LSB (AR9170_UART_REG_BASE + 0x028)
77#define AR9170_UART_REG_DIVISOR_MSB (AR9170_UART_REG_BASE + 0x02c)
78#define AR9170_UART_REG_WORD_RX_BUFFER (AR9170_UART_REG_BASE + 0x034)
79#define AR9170_UART_REG_WORD_TX_HOLDING (AR9170_UART_REG_BASE + 0x038)
80#define AR9170_UART_REG_FIFO_COUNT (AR9170_UART_REG_BASE + 0x03c)
81#define AR9170_UART_REG_REMAINDER (AR9170_UART_REG_BASE + 0x04c)
82
83/* Timer */
84#define AR9170_TIMER_REG_BASE 0x1c1000
85
86#define AR9170_TIMER_REG_WATCH_DOG (AR9170_TIMER_REG_BASE + 0x000)
87#define AR9170_TIMER_REG_TIMER0 (AR9170_TIMER_REG_BASE + 0x010)
88#define AR9170_TIMER_REG_TIMER1 (AR9170_TIMER_REG_BASE + 0x014)
89#define AR9170_TIMER_REG_TIMER2 (AR9170_TIMER_REG_BASE + 0x018)
90#define AR9170_TIMER_REG_TIMER3 (AR9170_TIMER_REG_BASE + 0x01c)
91#define AR9170_TIMER_REG_TIMER4 (AR9170_TIMER_REG_BASE + 0x020)
92#define AR9170_TIMER_REG_CONTROL (AR9170_TIMER_REG_BASE + 0x024)
93#define AR9170_TIMER_CTRL_DISABLE_CLOCK 0x100
94
95#define AR9170_TIMER_REG_INTERRUPT (AR9170_TIMER_REG_BASE + 0x028)
96#define AR9170_TIMER_INT_TIMER0 0x001
97#define AR9170_TIMER_INT_TIMER1 0x002
98#define AR9170_TIMER_INT_TIMER2 0x004
99#define AR9170_TIMER_INT_TIMER3 0x008
100#define AR9170_TIMER_INT_TIMER4 0x010
101#define AR9170_TIMER_INT_TICK_TIMER 0x100
102
103#define AR9170_TIMER_REG_TICK_TIMER (AR9170_TIMER_REG_BASE + 0x030)
104#define AR9170_TIMER_REG_CLOCK_LOW (AR9170_TIMER_REG_BASE + 0x040)
105#define AR9170_TIMER_REG_CLOCK_HIGH (AR9170_TIMER_REG_BASE + 0x044)
106
107#define AR9170_MAC_REG_BASE 0x1c3000
108
109#define AR9170_MAC_REG_POWER_STATE_CTRL (AR9170_MAC_REG_BASE + 0x500)
110#define AR9170_MAC_POWER_STATE_CTRL_RESET 0x20
111
112#define AR9170_MAC_REG_MAC_POWER_STATE_CTRL (AR9170_MAC_REG_BASE + 0x50c)
113
114#define AR9170_MAC_REG_INT_CTRL (AR9170_MAC_REG_BASE + 0x510)
115#define AR9170_MAC_INT_TXC BIT(0)
116#define AR9170_MAC_INT_RXC BIT(1)
117#define AR9170_MAC_INT_RETRY_FAIL BIT(2)
118#define AR9170_MAC_INT_WAKEUP BIT(3)
119#define AR9170_MAC_INT_ATIM BIT(4)
120#define AR9170_MAC_INT_DTIM BIT(5)
121#define AR9170_MAC_INT_CFG_BCN BIT(6)
122#define AR9170_MAC_INT_ABORT BIT(7)
123#define AR9170_MAC_INT_QOS BIT(8)
124#define AR9170_MAC_INT_MIMO_PS BIT(9)
125#define AR9170_MAC_INT_KEY_GEN BIT(10)
126#define AR9170_MAC_INT_DECRY_NOUSER BIT(11)
127#define AR9170_MAC_INT_RADAR BIT(12)
128#define AR9170_MAC_INT_QUIET_FRAME BIT(13)
129#define AR9170_MAC_INT_PRETBTT BIT(14)
130
131#define AR9170_MAC_REG_TSF_L (AR9170_MAC_REG_BASE + 0x514)
132#define AR9170_MAC_REG_TSF_H (AR9170_MAC_REG_BASE + 0x518)
133
134#define AR9170_MAC_REG_ATIM_WINDOW (AR9170_MAC_REG_BASE + 0x51c)
135#define AR9170_MAC_ATIM_PERIOD_S 0
136#define AR9170_MAC_ATIM_PERIOD 0x0000ffff
137
138#define AR9170_MAC_REG_BCN_PERIOD (AR9170_MAC_REG_BASE + 0x520)
139#define AR9170_MAC_BCN_PERIOD_S 0
140#define AR9170_MAC_BCN_PERIOD 0x0000ffff
141#define AR9170_MAC_BCN_DTIM_S 16
142#define AR9170_MAC_BCN_DTIM 0x00ff0000
143#define AR9170_MAC_BCN_AP_MODE BIT(24)
144#define AR9170_MAC_BCN_IBSS_MODE BIT(25)
145#define AR9170_MAC_BCN_PWR_MGT BIT(26)
146#define AR9170_MAC_BCN_STA_PS BIT(27)
147
148#define AR9170_MAC_REG_PRETBTT (AR9170_MAC_REG_BASE + 0x524)
149#define AR9170_MAC_PRETBTT_S 0
150#define AR9170_MAC_PRETBTT 0x0000ffff
151#define AR9170_MAC_PRETBTT2_S 16
152#define AR9170_MAC_PRETBTT2 0xffff0000
153
154#define AR9170_MAC_REG_MAC_ADDR_L (AR9170_MAC_REG_BASE + 0x610)
155#define AR9170_MAC_REG_MAC_ADDR_H (AR9170_MAC_REG_BASE + 0x614)
156#define AR9170_MAC_REG_BSSID_L (AR9170_MAC_REG_BASE + 0x618)
157#define AR9170_MAC_REG_BSSID_H (AR9170_MAC_REG_BASE + 0x61c)
158
159#define AR9170_MAC_REG_GROUP_HASH_TBL_L (AR9170_MAC_REG_BASE + 0x624)
160#define AR9170_MAC_REG_GROUP_HASH_TBL_H (AR9170_MAC_REG_BASE + 0x628)
161
162#define AR9170_MAC_REG_RX_TIMEOUT (AR9170_MAC_REG_BASE + 0x62c)
163
164#define AR9170_MAC_REG_BASIC_RATE (AR9170_MAC_REG_BASE + 0x630)
165#define AR9170_MAC_REG_MANDATORY_RATE (AR9170_MAC_REG_BASE + 0x634)
166#define AR9170_MAC_REG_RTS_CTS_RATE (AR9170_MAC_REG_BASE + 0x638)
167#define AR9170_MAC_REG_BACKOFF_PROTECT (AR9170_MAC_REG_BASE + 0x63c)
168#define AR9170_MAC_REG_RX_THRESHOLD (AR9170_MAC_REG_BASE + 0x640)
169#define AR9170_MAC_REG_AFTER_PNP (AR9170_MAC_REG_BASE + 0x648)
170#define AR9170_MAC_REG_RX_PE_DELAY (AR9170_MAC_REG_BASE + 0x64c)
171
172#define AR9170_MAC_REG_DYNAMIC_SIFS_ACK (AR9170_MAC_REG_BASE + 0x658)
173#define AR9170_MAC_REG_SNIFFER (AR9170_MAC_REG_BASE + 0x674)
174#define AR9170_MAC_SNIFFER_ENABLE_PROMISC BIT(0)
175#define AR9170_MAC_SNIFFER_DEFAULTS 0x02000000
176#define AR9170_MAC_REG_ENCRYPTION (AR9170_MAC_REG_BASE + 0x678)
177#define AR9170_MAC_ENCRYPTION_RX_SOFTWARE BIT(3)
178#define AR9170_MAC_ENCRYPTION_DEFAULTS 0x70
179
180#define AR9170_MAC_REG_MISC_680 (AR9170_MAC_REG_BASE + 0x680)
181#define AR9170_MAC_REG_MISC_684 (AR9170_MAC_REG_BASE + 0x684)
182#define AR9170_MAC_REG_TX_UNDERRUN (AR9170_MAC_REG_BASE + 0x688)
183
184#define AR9170_MAC_REG_FRAMETYPE_FILTER (AR9170_MAC_REG_BASE + 0x68c)
185#define AR9170_MAC_FTF_ASSOC_REQ BIT(0)
186#define AR9170_MAC_FTF_ASSOC_RESP BIT(1)
187#define AR9170_MAC_FTF_REASSOC_REQ BIT(2)
188#define AR9170_MAC_FTF_REASSOC_RESP BIT(3)
189#define AR9170_MAC_FTF_PRB_REQ BIT(4)
190#define AR9170_MAC_FTF_PRB_RESP BIT(5)
191#define AR9170_MAC_FTF_BIT6 BIT(6)
192#define AR9170_MAC_FTF_BIT7 BIT(7)
193#define AR9170_MAC_FTF_BEACON BIT(8)
194#define AR9170_MAC_FTF_ATIM BIT(9)
195#define AR9170_MAC_FTF_DEASSOC BIT(10)
196#define AR9170_MAC_FTF_AUTH BIT(11)
197#define AR9170_MAC_FTF_DEAUTH BIT(12)
198#define AR9170_MAC_FTF_BIT13 BIT(13)
199#define AR9170_MAC_FTF_BIT14 BIT(14)
200#define AR9170_MAC_FTF_BIT15 BIT(15)
201#define AR9170_MAC_FTF_BAR BIT(24)
202#define AR9170_MAC_FTF_BA BIT(25)
203#define AR9170_MAC_FTF_PSPOLL BIT(26)
204#define AR9170_MAC_FTF_RTS BIT(27)
205#define AR9170_MAC_FTF_CTS BIT(28)
206#define AR9170_MAC_FTF_ACK BIT(29)
207#define AR9170_MAC_FTF_CFE BIT(30)
208#define AR9170_MAC_FTF_CFE_ACK BIT(31)
209#define AR9170_MAC_FTF_DEFAULTS 0x0500ffff
210#define AR9170_MAC_FTF_MONITOR 0xff00ffff
211
212#define AR9170_MAC_REG_ACK_EXTENSION (AR9170_MAC_REG_BASE + 0x690)
213#define AR9170_MAC_REG_ACK_TPC (AR9170_MAC_REG_BASE + 0x694)
214#define AR9170_MAC_REG_EIFS_AND_SIFS (AR9170_MAC_REG_BASE + 0x698)
215#define AR9170_MAC_REG_RX_TIMEOUT_COUNT (AR9170_MAC_REG_BASE + 0x69c)
216#define AR9170_MAC_REG_RX_TOTAL (AR9170_MAC_REG_BASE + 0x6a0)
217#define AR9170_MAC_REG_RX_CRC32 (AR9170_MAC_REG_BASE + 0x6a4)
218#define AR9170_MAC_REG_RX_CRC16 (AR9170_MAC_REG_BASE + 0x6a8)
219#define AR9170_MAC_REG_RX_ERR_DECRYPTION_UNI (AR9170_MAC_REG_BASE + 0x6ac)
220#define AR9170_MAC_REG_RX_OVERRUN (AR9170_MAC_REG_BASE + 0x6b0)
221#define AR9170_MAC_REG_RX_ERR_DECRYPTION_MUL (AR9170_MAC_REG_BASE + 0x6bc)
222#define AR9170_MAC_REG_TX_BLOCKACKS (AR9170_MAC_REG_BASE + 0x6c0)
223#define AR9170_MAC_REG_NAV_COUNT (AR9170_MAC_REG_BASE + 0x6c4)
224#define AR9170_MAC_REG_BACKOFF_STATUS (AR9170_MAC_REG_BASE + 0x6c8)
225#define AR9170_MAC_REG_TX_RETRY (AR9170_MAC_REG_BASE + 0x6cc)
226
227#define AR9170_MAC_REG_TX_COMPLETE (AR9170_MAC_REG_BASE + 0x6d4)
228
229#define AR9170_MAC_REG_CHANNEL_BUSY (AR9170_MAC_REG_BASE + 0x6e8)
230#define AR9170_MAC_REG_EXT_BUSY (AR9170_MAC_REG_BASE + 0x6ec)
231
232#define AR9170_MAC_REG_SLOT_TIME (AR9170_MAC_REG_BASE + 0x6f0)
233#define AR9170_MAC_REG_TX_TOTAL (AR9170_MAC_REG_BASE + 0x6f4)
234#define AR9170_MAC_REG_ACK_FC (AR9170_MAC_REG_BASE + 0x6f8)
235
236#define AR9170_MAC_REG_CAM_MODE (AR9170_MAC_REG_BASE + 0x700)
237#define AR9170_MAC_CAM_IBSS 0xe0
238#define AR9170_MAC_CAM_AP 0xa1
239#define AR9170_MAC_CAM_STA 0x2
240#define AR9170_MAC_CAM_AP_WDS 0x3
241#define AR9170_MAC_CAM_DEFAULTS (0xf << 24)
242#define AR9170_MAC_CAM_HOST_PENDING 0x80000000
243
244#define AR9170_MAC_REG_CAM_ROLL_CALL_TBL_L (AR9170_MAC_REG_BASE + 0x704)
245#define AR9170_MAC_REG_CAM_ROLL_CALL_TBL_H (AR9170_MAC_REG_BASE + 0x708)
246
247#define AR9170_MAC_REG_CAM_ADDR (AR9170_MAC_REG_BASE + 0x70c)
248#define AR9170_MAC_CAM_ADDR_WRITE 0x80000000
249#define AR9170_MAC_REG_CAM_DATA0 (AR9170_MAC_REG_BASE + 0x720)
250#define AR9170_MAC_REG_CAM_DATA1 (AR9170_MAC_REG_BASE + 0x724)
251#define AR9170_MAC_REG_CAM_DATA2 (AR9170_MAC_REG_BASE + 0x728)
252#define AR9170_MAC_REG_CAM_DATA3 (AR9170_MAC_REG_BASE + 0x72c)
253
254#define AR9170_MAC_REG_CAM_DBG0 (AR9170_MAC_REG_BASE + 0x730)
255#define AR9170_MAC_REG_CAM_DBG1 (AR9170_MAC_REG_BASE + 0x734)
256#define AR9170_MAC_REG_CAM_DBG2 (AR9170_MAC_REG_BASE + 0x738)
257#define AR9170_MAC_REG_CAM_STATE (AR9170_MAC_REG_BASE + 0x73c)
258#define AR9170_MAC_CAM_STATE_READ_PENDING 0x40000000
259#define AR9170_MAC_CAM_STATE_WRITE_PENDING 0x80000000
260
261#define AR9170_MAC_REG_CAM_TXKEY (AR9170_MAC_REG_BASE + 0x740)
262#define AR9170_MAC_REG_CAM_RXKEY (AR9170_MAC_REG_BASE + 0x750)
263
264#define AR9170_MAC_REG_CAM_TX_ENC_TYPE (AR9170_MAC_REG_BASE + 0x760)
265#define AR9170_MAC_REG_CAM_RX_ENC_TYPE (AR9170_MAC_REG_BASE + 0x770)
266#define AR9170_MAC_REG_CAM_TX_SERACH_HIT (AR9170_MAC_REG_BASE + 0x780)
267#define AR9170_MAC_REG_CAM_RX_SERACH_HIT (AR9170_MAC_REG_BASE + 0x790)
268
269#define AR9170_MAC_REG_AC0_CW (AR9170_MAC_REG_BASE + 0xb00)
270#define AR9170_MAC_REG_AC1_CW (AR9170_MAC_REG_BASE + 0xb04)
271#define AR9170_MAC_REG_AC2_CW (AR9170_MAC_REG_BASE + 0xb08)
272#define AR9170_MAC_REG_AC3_CW (AR9170_MAC_REG_BASE + 0xb0c)
273#define AR9170_MAC_REG_AC4_CW (AR9170_MAC_REG_BASE + 0xb10)
274#define AR9170_MAC_REG_AC2_AC1_AC0_AIFS (AR9170_MAC_REG_BASE + 0xb14)
275#define AR9170_MAC_REG_AC4_AC3_AC2_AIFS (AR9170_MAC_REG_BASE + 0xb18)
276#define AR9170_MAC_REG_TXOP_ACK_EXTENSION (AR9170_MAC_REG_BASE + 0xb1c)
277#define AR9170_MAC_REG_TXOP_ACK_INTERVAL (AR9170_MAC_REG_BASE + 0xb20)
278#define AR9170_MAC_REG_CONTENTION_POINT (AR9170_MAC_REG_BASE + 0xb24)
279#define AR9170_MAC_REG_RETRY_MAX (AR9170_MAC_REG_BASE + 0xb28)
280#define AR9170_MAC_REG_TID_CFACK_CFEND_RATE (AR9170_MAC_REG_BASE + 0xb2c)
281#define AR9170_MAC_REG_TXOP_NOT_ENOUGH_IND (AR9170_MAC_REG_BASE + 0xb30)
282#define AR9170_MAC_REG_TKIP_TSC (AR9170_MAC_REG_BASE + 0xb34)
283#define AR9170_MAC_REG_TXOP_DURATION (AR9170_MAC_REG_BASE + 0xb38)
284#define AR9170_MAC_REG_TX_QOS_THRESHOLD (AR9170_MAC_REG_BASE + 0xb3c)
285#define AR9170_MAC_REG_QOS_PRIORITY_VIRTUAL_CCA (AR9170_MAC_REG_BASE + 0xb40)
286#define AR9170_MAC_VIRTUAL_CCA_Q0 BIT(15)
287#define AR9170_MAC_VIRTUAL_CCA_Q1 BIT(16)
288#define AR9170_MAC_VIRTUAL_CCA_Q2 BIT(17)
289#define AR9170_MAC_VIRTUAL_CCA_Q3 BIT(18)
290#define AR9170_MAC_VIRTUAL_CCA_Q4 BIT(19)
291#define AR9170_MAC_VIRTUAL_CCA_ALL (0xf8000)
292
293#define AR9170_MAC_REG_AC1_AC0_TXOP (AR9170_MAC_REG_BASE + 0xb44)
294#define AR9170_MAC_REG_AC3_AC2_TXOP (AR9170_MAC_REG_BASE + 0xb48)
295
296#define AR9170_MAC_REG_AMPDU_COUNT (AR9170_MAC_REG_BASE + 0xb88)
297#define AR9170_MAC_REG_MPDU_COUNT (AR9170_MAC_REG_BASE + 0xb8c)
298
299#define AR9170_MAC_REG_AMPDU_FACTOR (AR9170_MAC_REG_BASE + 0xb9c)
300#define AR9170_MAC_AMPDU_FACTOR 0x7f0000
301#define AR9170_MAC_AMPDU_FACTOR_S 16
302#define AR9170_MAC_REG_AMPDU_DENSITY (AR9170_MAC_REG_BASE + 0xba0)
303#define AR9170_MAC_AMPDU_DENSITY 0x7
304#define AR9170_MAC_AMPDU_DENSITY_S 0
305
306#define AR9170_MAC_REG_FCS_SELECT (AR9170_MAC_REG_BASE + 0xbb0)
307#define AR9170_MAC_FCS_SWFCS 0x1
308#define AR9170_MAC_FCS_FIFO_PROT 0x4
309
310#define AR9170_MAC_REG_RTS_CTS_TPC (AR9170_MAC_REG_BASE + 0xbb4)
311#define AR9170_MAC_REG_CFEND_QOSNULL_TPC (AR9170_MAC_REG_BASE + 0xbb8)
312
313#define AR9170_MAC_REG_ACK_TABLE (AR9170_MAC_REG_BASE + 0xc00)
314#define AR9170_MAC_REG_RX_CONTROL (AR9170_MAC_REG_BASE + 0xc40)
315#define AR9170_MAC_RX_CTRL_DEAGG 0x1
316#define AR9170_MAC_RX_CTRL_SHORT_FILTER 0x2
317#define AR9170_MAC_RX_CTRL_SA_DA_SEARCH 0x20
318#define AR9170_MAC_RX_CTRL_PASS_TO_HOST BIT(28)
319#define AR9170_MAC_RX_CTRL_ACK_IN_SNIFFER BIT(30)
320
321#define AR9170_MAC_REG_RX_CONTROL_1 (AR9170_MAC_REG_BASE + 0xc44)
322
323#define AR9170_MAC_REG_AMPDU_RX_THRESH (AR9170_MAC_REG_BASE + 0xc50)
324
325#define AR9170_MAC_REG_RX_MPDU (AR9170_MAC_REG_BASE + 0xca0)
326#define AR9170_MAC_REG_RX_DROPPED_MPDU (AR9170_MAC_REG_BASE + 0xca4)
327#define AR9170_MAC_REG_RX_DEL_MPDU (AR9170_MAC_REG_BASE + 0xca8)
328#define AR9170_MAC_REG_RX_PHY_MISC_ERROR (AR9170_MAC_REG_BASE + 0xcac)
329#define AR9170_MAC_REG_RX_PHY_XR_ERROR (AR9170_MAC_REG_BASE + 0xcb0)
330#define AR9170_MAC_REG_RX_PHY_OFDM_ERROR (AR9170_MAC_REG_BASE + 0xcb4)
331#define AR9170_MAC_REG_RX_PHY_CCK_ERROR (AR9170_MAC_REG_BASE + 0xcb8)
332#define AR9170_MAC_REG_RX_PHY_HT_ERROR (AR9170_MAC_REG_BASE + 0xcbc)
333#define AR9170_MAC_REG_RX_PHY_TOTAL (AR9170_MAC_REG_BASE + 0xcc0)
334
335#define AR9170_MAC_REG_DMA_TXQ_ADDR (AR9170_MAC_REG_BASE + 0xd00)
336#define AR9170_MAC_REG_DMA_TXQ_CURR_ADDR (AR9170_MAC_REG_BASE + 0xd04)
337#define AR9170_MAC_REG_DMA_TXQ0_ADDR (AR9170_MAC_REG_BASE + 0xd00)
338#define AR9170_MAC_REG_DMA_TXQ0_CURR_ADDR (AR9170_MAC_REG_BASE + 0xd04)
339#define AR9170_MAC_REG_DMA_TXQ1_ADDR (AR9170_MAC_REG_BASE + 0xd08)
340#define AR9170_MAC_REG_DMA_TXQ1_CURR_ADDR (AR9170_MAC_REG_BASE + 0xd0c)
341#define AR9170_MAC_REG_DMA_TXQ2_ADDR (AR9170_MAC_REG_BASE + 0xd10)
342#define AR9170_MAC_REG_DMA_TXQ2_CURR_ADDR (AR9170_MAC_REG_BASE + 0xd14)
343#define AR9170_MAC_REG_DMA_TXQ3_ADDR (AR9170_MAC_REG_BASE + 0xd18)
344#define AR9170_MAC_REG_DMA_TXQ3_CURR_ADDR (AR9170_MAC_REG_BASE + 0xd1c)
345#define AR9170_MAC_REG_DMA_TXQ4_ADDR (AR9170_MAC_REG_BASE + 0xd20)
346#define AR9170_MAC_REG_DMA_TXQ4_CURR_ADDR (AR9170_MAC_REG_BASE + 0xd24)
347#define AR9170_MAC_REG_DMA_RXQ_ADDR (AR9170_MAC_REG_BASE + 0xd28)
348#define AR9170_MAC_REG_DMA_RXQ_CURR_ADDR (AR9170_MAC_REG_BASE + 0xd2c)
349
350#define AR9170_MAC_REG_DMA_TRIGGER (AR9170_MAC_REG_BASE + 0xd30)
351#define AR9170_DMA_TRIGGER_TXQ0 BIT(0)
352#define AR9170_DMA_TRIGGER_TXQ1 BIT(1)
353#define AR9170_DMA_TRIGGER_TXQ2 BIT(2)
354#define AR9170_DMA_TRIGGER_TXQ3 BIT(3)
355#define AR9170_DMA_TRIGGER_TXQ4 BIT(4)
356#define AR9170_DMA_TRIGGER_RXQ BIT(8)
357
358#define AR9170_MAC_REG_DMA_WLAN_STATUS (AR9170_MAC_REG_BASE + 0xd38)
359#define AR9170_MAC_REG_DMA_STATUS (AR9170_MAC_REG_BASE + 0xd3c)
360
361#define AR9170_MAC_REG_TXRX_MPI (AR9170_MAC_REG_BASE + 0xd7c)
362#define AR9170_MAC_TXRX_MPI_TX_MPI_MASK 0x0000000f
363#define AR9170_MAC_TXRX_MPI_TX_TO_MASK 0x0000fff0
364#define AR9170_MAC_TXRX_MPI_RX_MPI_MASK 0x000f0000
365#define AR9170_MAC_TXRX_MPI_RX_TO_MASK 0xfff00000
366
367#define AR9170_MAC_REG_BCN_ADDR (AR9170_MAC_REG_BASE + 0xd84)
368#define AR9170_MAC_REG_BCN_LENGTH (AR9170_MAC_REG_BASE + 0xd88)
369#define AR9170_MAC_BCN_LENGTH_MAX 256
370
371#define AR9170_MAC_REG_BCN_STATUS (AR9170_MAC_REG_BASE + 0xd8c)
372
373#define AR9170_MAC_REG_BCN_PLCP (AR9170_MAC_REG_BASE + 0xd90)
374#define AR9170_MAC_REG_BCN_CTRL (AR9170_MAC_REG_BASE + 0xd94)
375#define AR9170_BCN_CTRL_READY 0x01
376#define AR9170_BCN_CTRL_LOCK 0x02
377
378#define AR9170_MAC_REG_BCN_CURR_ADDR (AR9170_MAC_REG_BASE + 0xd98)
379#define AR9170_MAC_REG_BCN_COUNT (AR9170_MAC_REG_BASE + 0xd9c)
380
381
382#define AR9170_MAC_REG_BCN_HT1 (AR9170_MAC_REG_BASE + 0xda0)
383#define AR9170_MAC_REG_BCN_HT2 (AR9170_MAC_REG_BASE + 0xda4)
384
385#define AR9170_MAC_REG_DMA_TXQX_ADDR_CURR (AR9170_MAC_REG_BASE + 0xdc0)
386
387/* Random number generator */
388#define AR9170_RAND_REG_BASE 0x1d0000
389
390#define AR9170_RAND_REG_NUM (AR9170_RAND_REG_BASE + 0x000)
391#define AR9170_RAND_REG_MODE (AR9170_RAND_REG_BASE + 0x004)
392#define AR9170_RAND_MODE_MANUAL 0x000
393#define AR9170_RAND_MODE_FREE 0x001
394
395/* GPIO */
396#define AR9170_GPIO_REG_BASE 0x1d0100
397#define AR9170_GPIO_REG_PORT_TYPE (AR9170_GPIO_REG_BASE + 0x000)
398#define AR9170_GPIO_REG_PORT_DATA (AR9170_GPIO_REG_BASE + 0x004)
399#define AR9170_GPIO_PORT_LED_0 1
400#define AR9170_GPIO_PORT_LED_1 2
401/* WPS Button GPIO for TP-Link TL-WN821N */
402#define AR9170_GPIO_PORT_WPS_BUTTON_PRESSED 4
403
404/* Memory Controller */
405#define AR9170_MC_REG_BASE 0x1d1000
406
407#define AR9170_MC_REG_FLASH_WAIT_STATE (AR9170_MC_REG_BASE + 0x000)
408#define AR9170_MC_REG_SEEPROM_WP0 (AR9170_MC_REG_BASE + 0x400)
409#define AR9170_MC_REG_SEEPROM_WP1 (AR9170_MC_REG_BASE + 0x404)
410#define AR9170_MC_REG_SEEPROM_WP2 (AR9170_MC_REG_BASE + 0x408)
411
412/* Interrupt Controller */
413#define AR9170_MAX_INT_SRC 9
414#define AR9170_INT_REG_BASE 0x1d2000
415
416#define AR9170_INT_REG_FLAG (AR9170_INT_REG_BASE + 0x000)
417#define AR9170_INT_REG_FIQ_MASK (AR9170_INT_REG_BASE + 0x004)
418#define AR9170_INT_REG_IRQ_MASK (AR9170_INT_REG_BASE + 0x008)
419/* INT_REG_FLAG, INT_REG_FIQ_MASK and INT_REG_IRQ_MASK */
420#define AR9170_INT_FLAG_WLAN 0x001
421#define AR9170_INT_FLAG_PTAB_BIT 0x002
422#define AR9170_INT_FLAG_SE_BIT 0x004
423#define AR9170_INT_FLAG_UART_BIT 0x008
424#define AR9170_INT_FLAG_TIMER_BIT 0x010
425#define AR9170_INT_FLAG_EXT_BIT 0x020
426#define AR9170_INT_FLAG_SW_BIT 0x040
427#define AR9170_INT_FLAG_USB_BIT 0x080
428#define AR9170_INT_FLAG_ETHERNET_BIT 0x100
429
430#define AR9170_INT_REG_PRIORITY1 (AR9170_INT_REG_BASE + 0x00c)
431#define AR9170_INT_REG_PRIORITY2 (AR9170_INT_REG_BASE + 0x010)
432#define AR9170_INT_REG_PRIORITY3 (AR9170_INT_REG_BASE + 0x014)
433#define AR9170_INT_REG_EXT_INT_CONTROL (AR9170_INT_REG_BASE + 0x018)
434#define AR9170_INT_REG_SW_INT_CONTROL (AR9170_INT_REG_BASE + 0x01c)
435#define AR9170_INT_SW_INT_ENABLE 0x1
436
437#define AR9170_INT_REG_FIQ_ENCODE (AR9170_INT_REG_BASE + 0x020)
438#define AR9170_INT_INT_IRQ_ENCODE (AR9170_INT_REG_BASE + 0x024)
439
440/* Power Management */
441#define AR9170_PWR_REG_BASE 0x1d4000
442
443#define AR9170_PWR_REG_POWER_STATE (AR9170_PWR_REG_BASE + 0x000)
444
445#define AR9170_PWR_REG_RESET (AR9170_PWR_REG_BASE + 0x004)
446#define AR9170_PWR_RESET_COMMIT_RESET_MASK BIT(0)
447#define AR9170_PWR_RESET_WLAN_MASK BIT(1)
448#define AR9170_PWR_RESET_DMA_MASK BIT(2)
449#define AR9170_PWR_RESET_BRIDGE_MASK BIT(3)
450#define AR9170_PWR_RESET_AHB_MASK BIT(9)
451#define AR9170_PWR_RESET_BB_WARM_RESET BIT(10)
452#define AR9170_PWR_RESET_BB_COLD_RESET BIT(11)
453#define AR9170_PWR_RESET_ADDA_CLK_COLD_RESET BIT(12)
454#define AR9170_PWR_RESET_PLL BIT(13)
455#define AR9170_PWR_RESET_USB_PLL BIT(14)
456
457#define AR9170_PWR_REG_CLOCK_SEL (AR9170_PWR_REG_BASE + 0x008)
458#define AR9170_PWR_CLK_AHB_40MHZ 0
459#define AR9170_PWR_CLK_AHB_20_22MHZ 1
460#define AR9170_PWR_CLK_AHB_40_44MHZ 2
461#define AR9170_PWR_CLK_AHB_80_88MHZ 3
462#define AR9170_PWR_CLK_DAC_160_INV_DLY 0x70
463
464#define AR9170_PWR_REG_CHIP_REVISION (AR9170_PWR_REG_BASE + 0x010)
465#define AR9170_PWR_REG_PLL_ADDAC (AR9170_PWR_REG_BASE + 0x014)
466#define AR9170_PWR_REG_WATCH_DOG_MAGIC (AR9170_PWR_REG_BASE + 0x020)
467
468/* Faraday USB Controller */
469#define AR9170_USB_REG_BASE 0x1e1000
470
471#define AR9170_USB_REG_MAIN_CTRL (AR9170_USB_REG_BASE + 0x000)
472#define AR9170_USB_MAIN_CTRL_REMOTE_WAKEUP BIT(0)
473#define AR9170_USB_MAIN_CTRL_ENABLE_GLOBAL_INT BIT(2)
474#define AR9170_USB_MAIN_CTRL_HIGHSPEED BIT(6)
475
476#define AR9170_USB_REG_DEVICE_ADDRESS (AR9170_USB_REG_BASE + 0x001)
477#define AR9170_USB_DEVICE_ADDRESS_CONFIGURE BIT(7)
478
479#define AR9170_USB_REG_TEST (AR9170_USB_REG_BASE + 0x002)
480#define AR9170_USB_REG_PHY_TEST_SELECT (AR9170_USB_REG_BASE + 0x008)
481#define AR9170_USB_REG_CX_CONFIG_STATUS (AR9170_USB_REG_BASE + 0x00b)
482#define AR9170_USB_REG_EP0_DATA (AR9170_USB_REG_BASE + 0x00c)
483#define AR9170_USB_REG_EP0_DATA1 (AR9170_USB_REG_BASE + 0x00c)
484#define AR9170_USB_REG_EP0_DATA2 (AR9170_USB_REG_BASE + 0x00d)
485
486#define AR9170_USB_REG_INTR_MASK_BYTE_0 (AR9170_USB_REG_BASE + 0x011)
487#define AR9170_USB_REG_INTR_MASK_BYTE_1 (AR9170_USB_REG_BASE + 0x012)
488#define AR9170_USB_REG_INTR_MASK_BYTE_2 (AR9170_USB_REG_BASE + 0x013)
489#define AR9170_USB_REG_INTR_MASK_BYTE_3 (AR9170_USB_REG_BASE + 0x014)
490#define AR9170_USB_REG_INTR_MASK_BYTE_4 (AR9170_USB_REG_BASE + 0x015)
491#define AR9170_USB_INTR_DISABLE_OUT_INT (BIT(7) | BIT(6))
492
493#define AR9170_USB_REG_INTR_MASK_BYTE_5 (AR9170_USB_REG_BASE + 0x016)
494#define AR9170_USB_REG_INTR_MASK_BYTE_6 (AR9170_USB_REG_BASE + 0x017)
495#define AR9170_USB_INTR_DISABLE_IN_INT BIT(6)
496
497#define AR9170_USB_REG_INTR_MASK_BYTE_7 (AR9170_USB_REG_BASE + 0x018)
498
499#define AR9170_USB_REG_INTR_GROUP (AR9170_USB_REG_BASE + 0x020)
500
501#define AR9170_USB_REG_INTR_SOURCE_0 (AR9170_USB_REG_BASE + 0x021)
502#define AR9170_USB_REG_INTR_SOURCE_1 (AR9170_USB_REG_BASE + 0x022)
503#define AR9170_USB_REG_INTR_SOURCE_2 (AR9170_USB_REG_BASE + 0x023)
504#define AR9170_USB_REG_INTR_SOURCE_3 (AR9170_USB_REG_BASE + 0x024)
505#define AR9170_USB_REG_INTR_SOURCE_4 (AR9170_USB_REG_BASE + 0x025)
506#define AR9170_USB_REG_INTR_SOURCE_5 (AR9170_USB_REG_BASE + 0x026)
507#define AR9170_USB_REG_INTR_SOURCE_6 (AR9170_USB_REG_BASE + 0x027)
508#define AR9170_USB_REG_INTR_SOURCE_7 (AR9170_USB_REG_BASE + 0x028)
509
510#define AR9170_USB_REG_EP_MAP (AR9170_USB_REG_BASE + 0x030)
511#define AR9170_USB_REG_EP1_MAP (AR9170_USB_REG_BASE + 0x030)
512#define AR9170_USB_REG_EP2_MAP (AR9170_USB_REG_BASE + 0x031)
513#define AR9170_USB_REG_EP3_MAP (AR9170_USB_REG_BASE + 0x032)
514#define AR9170_USB_REG_EP4_MAP (AR9170_USB_REG_BASE + 0x033)
515#define AR9170_USB_REG_EP5_MAP (AR9170_USB_REG_BASE + 0x034)
516#define AR9170_USB_REG_EP6_MAP (AR9170_USB_REG_BASE + 0x035)
517#define AR9170_USB_REG_EP7_MAP (AR9170_USB_REG_BASE + 0x036)
518#define AR9170_USB_REG_EP8_MAP (AR9170_USB_REG_BASE + 0x037)
519#define AR9170_USB_REG_EP9_MAP (AR9170_USB_REG_BASE + 0x038)
520#define AR9170_USB_REG_EP10_MAP (AR9170_USB_REG_BASE + 0x039)
521
522#define AR9170_USB_REG_EP_IN_MAX_SIZE_HIGH (AR9170_USB_REG_BASE + 0x03f)
523#define AR9170_USB_EP_IN_TOGGLE 0x10
524
525#define AR9170_USB_REG_EP_IN_MAX_SIZE_LOW (AR9170_USB_REG_BASE + 0x03e)
526
527#define AR9170_USB_REG_EP_OUT_MAX_SIZE_HIGH (AR9170_USB_REG_BASE + 0x05f)
528#define AR9170_USB_EP_OUT_TOGGLE 0x10
529
530#define AR9170_USB_REG_EP_OUT_MAX_SIZE_LOW (AR9170_USB_REG_BASE + 0x05e)
531
532#define AR9170_USB_REG_EP3_BYTE_COUNT_HIGH (AR9170_USB_REG_BASE + 0x0ae)
533#define AR9170_USB_REG_EP3_BYTE_COUNT_LOW (AR9170_USB_REG_BASE + 0x0be)
534#define AR9170_USB_REG_EP4_BYTE_COUNT_HIGH (AR9170_USB_REG_BASE + 0x0af)
535#define AR9170_USB_REG_EP4_BYTE_COUNT_LOW (AR9170_USB_REG_BASE + 0x0bf)
536
537#define AR9170_USB_REG_FIFO_MAP (AR9170_USB_REG_BASE + 0x080)
538#define AR9170_USB_REG_FIFO0_MAP (AR9170_USB_REG_BASE + 0x080)
539#define AR9170_USB_REG_FIFO1_MAP (AR9170_USB_REG_BASE + 0x081)
540#define AR9170_USB_REG_FIFO2_MAP (AR9170_USB_REG_BASE + 0x082)
541#define AR9170_USB_REG_FIFO3_MAP (AR9170_USB_REG_BASE + 0x083)
542#define AR9170_USB_REG_FIFO4_MAP (AR9170_USB_REG_BASE + 0x084)
543#define AR9170_USB_REG_FIFO5_MAP (AR9170_USB_REG_BASE + 0x085)
544#define AR9170_USB_REG_FIFO6_MAP (AR9170_USB_REG_BASE + 0x086)
545#define AR9170_USB_REG_FIFO7_MAP (AR9170_USB_REG_BASE + 0x087)
546#define AR9170_USB_REG_FIFO8_MAP (AR9170_USB_REG_BASE + 0x088)
547#define AR9170_USB_REG_FIFO9_MAP (AR9170_USB_REG_BASE + 0x089)
548
549#define AR9170_USB_REG_FIFO_CONFIG (AR9170_USB_REG_BASE + 0x090)
550#define AR9170_USB_REG_FIFO0_CONFIG (AR9170_USB_REG_BASE + 0x090)
551#define AR9170_USB_REG_FIFO1_CONFIG (AR9170_USB_REG_BASE + 0x091)
552#define AR9170_USB_REG_FIFO2_CONFIG (AR9170_USB_REG_BASE + 0x092)
553#define AR9170_USB_REG_FIFO3_CONFIG (AR9170_USB_REG_BASE + 0x093)
554#define AR9170_USB_REG_FIFO4_CONFIG (AR9170_USB_REG_BASE + 0x094)
555#define AR9170_USB_REG_FIFO5_CONFIG (AR9170_USB_REG_BASE + 0x095)
556#define AR9170_USB_REG_FIFO6_CONFIG (AR9170_USB_REG_BASE + 0x096)
557#define AR9170_USB_REG_FIFO7_CONFIG (AR9170_USB_REG_BASE + 0x097)
558#define AR9170_USB_REG_FIFO8_CONFIG (AR9170_USB_REG_BASE + 0x098)
559#define AR9170_USB_REG_FIFO9_CONFIG (AR9170_USB_REG_BASE + 0x099)
560
561#define AR9170_USB_REG_EP3_DATA (AR9170_USB_REG_BASE + 0x0f8)
562#define AR9170_USB_REG_EP4_DATA (AR9170_USB_REG_BASE + 0x0fc)
563
564#define AR9170_USB_REG_FIFO_SIZE (AR9170_USB_REG_BASE + 0x100)
565#define AR9170_USB_REG_DMA_CTL (AR9170_USB_REG_BASE + 0x108)
566#define AR9170_USB_DMA_CTL_ENABLE_TO_DEVICE BIT(0)
567#define AR9170_USB_DMA_CTL_ENABLE_FROM_DEVICE BIT(1)
568#define AR9170_USB_DMA_CTL_HIGH_SPEED BIT(2)
569#define AR9170_USB_DMA_CTL_UP_PACKET_MODE BIT(3)
570#define AR9170_USB_DMA_CTL_UP_STREAM_S 4
571#define AR9170_USB_DMA_CTL_UP_STREAM (BIT(4) | BIT(5))
572#define AR9170_USB_DMA_CTL_UP_STREAM_4K (0)
573#define AR9170_USB_DMA_CTL_UP_STREAM_8K BIT(4)
574#define AR9170_USB_DMA_CTL_UP_STREAM_16K BIT(5)
575#define AR9170_USB_DMA_CTL_UP_STREAM_32K (BIT(4) | BIT(5))
576#define AR9170_USB_DMA_CTL_DOWN_STREAM BIT(6)
577
578#define AR9170_USB_REG_DMA_STATUS (AR9170_USB_REG_BASE + 0x10c)
579#define AR9170_USB_DMA_STATUS_UP_IDLE BIT(8)
580#define AR9170_USB_DMA_STATUS_DN_IDLE BIT(16)
581
582#define AR9170_USB_REG_MAX_AGG_UPLOAD (AR9170_USB_REG_BASE + 0x110)
583#define AR9170_USB_REG_UPLOAD_TIME_CTL (AR9170_USB_REG_BASE + 0x114)
584#define AR9170_USB_REG_CBUS_CTRL (AR9170_USB_REG_BASE + 0x1f0)
585#define AR9170_USB_CBUS_CTRL_BUFFER_END (BIT(1))
586
587/* PCI/USB to AHB Bridge */
588#define AR9170_PTA_REG_BASE 0x1e2000
589
590#define AR9170_PTA_REG_CMD (AR9170_PTA_REG_BASE + 0x000)
591#define AR9170_PTA_REG_PARAM1 (AR9170_PTA_REG_BASE + 0x004)
592#define AR9170_PTA_REG_PARAM2 (AR9170_PTA_REG_BASE + 0x008)
593#define AR9170_PTA_REG_PARAM3 (AR9170_PTA_REG_BASE + 0x00c)
594#define AR9170_PTA_REG_RSP (AR9170_PTA_REG_BASE + 0x010)
595#define AR9170_PTA_REG_STATUS1 (AR9170_PTA_REG_BASE + 0x014)
596#define AR9170_PTA_REG_STATUS2 (AR9170_PTA_REG_BASE + 0x018)
597#define AR9170_PTA_REG_STATUS3 (AR9170_PTA_REG_BASE + 0x01c)
598#define AR9170_PTA_REG_AHB_INT_FLAG (AR9170_PTA_REG_BASE + 0x020)
599#define AR9170_PTA_REG_AHB_INT_MASK (AR9170_PTA_REG_BASE + 0x024)
600#define AR9170_PTA_REG_AHB_INT_ACK (AR9170_PTA_REG_BASE + 0x028)
601#define AR9170_PTA_REG_AHB_SCRATCH1 (AR9170_PTA_REG_BASE + 0x030)
602#define AR9170_PTA_REG_AHB_SCRATCH2 (AR9170_PTA_REG_BASE + 0x034)
603#define AR9170_PTA_REG_AHB_SCRATCH3 (AR9170_PTA_REG_BASE + 0x038)
604#define AR9170_PTA_REG_AHB_SCRATCH4 (AR9170_PTA_REG_BASE + 0x03c)
605
606#define AR9170_PTA_REG_SHARE_MEM_CTRL (AR9170_PTA_REG_BASE + 0x124)
607
608/*
609 * PCI to AHB Bridge
610 */
611
612#define AR9170_PTA_REG_INT_FLAG (AR9170_PTA_REG_BASE + 0x100)
613#define AR9170_PTA_INT_FLAG_DN 0x01
614#define AR9170_PTA_INT_FLAG_UP 0x02
615#define AR9170_PTA_INT_FLAG_CMD 0x04
616
617#define AR9170_PTA_REG_INT_MASK (AR9170_PTA_REG_BASE + 0x104)
618#define AR9170_PTA_REG_DN_DMA_ADDRL (AR9170_PTA_REG_BASE + 0x108)
619#define AR9170_PTA_REG_DN_DMA_ADDRH (AR9170_PTA_REG_BASE + 0x10c)
620#define AR9170_PTA_REG_UP_DMA_ADDRL (AR9170_PTA_REG_BASE + 0x110)
621#define AR9170_PTA_REG_UP_DMA_ADDRH (AR9170_PTA_REG_BASE + 0x114)
622#define AR9170_PTA_REG_DN_PEND_TIME (AR9170_PTA_REG_BASE + 0x118)
623#define AR9170_PTA_REG_UP_PEND_TIME (AR9170_PTA_REG_BASE + 0x11c)
624#define AR9170_PTA_REG_CONTROL (AR9170_PTA_REG_BASE + 0x120)
625#define AR9170_PTA_CTRL_4_BEAT_BURST 0x00
626#define AR9170_PTA_CTRL_8_BEAT_BURST 0x01
627#define AR9170_PTA_CTRL_16_BEAT_BURST 0x02
628#define AR9170_PTA_CTRL_LOOPBACK_MODE 0x10
629
630#define AR9170_PTA_REG_MEM_CTRL (AR9170_PTA_REG_BASE + 0x124)
631#define AR9170_PTA_REG_MEM_ADDR (AR9170_PTA_REG_BASE + 0x128)
632#define AR9170_PTA_REG_DN_DMA_TRIGGER (AR9170_PTA_REG_BASE + 0x12c)
633#define AR9170_PTA_REG_UP_DMA_TRIGGER (AR9170_PTA_REG_BASE + 0x130)
634#define AR9170_PTA_REG_DMA_STATUS (AR9170_PTA_REG_BASE + 0x134)
635#define AR9170_PTA_REG_DN_CURR_ADDRL (AR9170_PTA_REG_BASE + 0x138)
636#define AR9170_PTA_REG_DN_CURR_ADDRH (AR9170_PTA_REG_BASE + 0x13c)
637#define AR9170_PTA_REG_UP_CURR_ADDRL (AR9170_PTA_REG_BASE + 0x140)
638#define AR9170_PTA_REG_UP_CURR_ADDRH (AR9170_PTA_REG_BASE + 0x144)
639#define AR9170_PTA_REG_DMA_MODE_CTRL (AR9170_PTA_REG_BASE + 0x148)
640#define AR9170_PTA_DMA_MODE_CTRL_RESET BIT(0)
641#define AR9170_PTA_DMA_MODE_CTRL_DISABLE_USB BIT(1)
642
643/* Protocol Controller Module */
644#define AR9170_MAC_REG_PC_REG_BASE (AR9170_MAC_REG_BASE + 0xe00)
645
646
647#define AR9170_NUM_LEDS 2
648
649/* CAM */
650#define AR9170_CAM_MAX_USER 64
651#define AR9170_CAM_MAX_KEY_LENGTH 16
652
653#define AR9170_SRAM_OFFSET 0x100000
654#define AR9170_SRAM_SIZE 0x18000
655
656#define AR9170_PRAM_OFFSET 0x200000
657#define AR9170_PRAM_SIZE 0x8000
658
659enum cpu_clock {
660 AHB_STATIC_40MHZ = 0,
661 AHB_GMODE_22MHZ = 1,
662 AHB_AMODE_20MHZ = 1,
663 AHB_GMODE_44MHZ = 2,
664 AHB_AMODE_40MHZ = 2,
665 AHB_GMODE_88MHZ = 3,
666 AHB_AMODE_80MHZ = 3
667};
668
669/* USB endpoints */
670enum ar9170_usb_ep {
671 /*
672 * Control EP is always EP 0 (USB SPEC)
673 *
674 * The weird thing is: the original firmware has a few
675 * comments that suggest that the actual EP numbers
676 * are in the 1 to 10 range?!
677 */
678 AR9170_USB_EP_CTRL = 0,
679
680 AR9170_USB_EP_TX,
681 AR9170_USB_EP_RX,
682 AR9170_USB_EP_IRQ,
683 AR9170_USB_EP_CMD,
684 AR9170_USB_NUM_EXTRA_EP = 4,
685
686 __AR9170_USB_NUM_EP,
687
688 __AR9170_USB_NUM_MAX_EP = 10
689};
690
691enum ar9170_usb_fifo {
692 __AR9170_USB_NUM_MAX_FIFO = 10
693};
694
695enum ar9170_tx_queues {
696 AR9170_TXQ0 = 0,
697 AR9170_TXQ1,
698 AR9170_TXQ2,
699 AR9170_TXQ3,
700 AR9170_TXQ_SPECIAL,
701
702 /* keep last */
703 __AR9170_NUM_TX_QUEUES = 5
704};
705
706#define AR9170_TX_STREAM_TAG 0x697e
707#define AR9170_RX_STREAM_TAG 0x4e00
708#define AR9170_RX_STREAM_MAX_SIZE 0xffff
709
710struct ar9170_stream {
711 __le16 length;
712 __le16 tag;
713
714 u8 payload[0];
715};
716
717#define AR9170_MAX_ACKTABLE_ENTRIES 8
718#define AR9170_MAX_VIRTUAL_MAC 7
719
720#define AR9170_USB_EP_CTRL_MAX 64
721#define AR9170_USB_EP_TX_MAX 512
722#define AR9170_USB_EP_RX_MAX 512
723#define AR9170_USB_EP_IRQ_MAX 64
724#define AR9170_USB_EP_CMD_MAX 64
725
726/* Trigger PRETBTT interrupt 6 Kus earlier */
727#define CARL9170_PRETBTT_KUS 6
728
729#define AR5416_MAX_RATE_POWER 63
730
731#define SET_VAL(reg, value, newvalue) \
732 (value = ((value) & ~reg) | (((newvalue) << reg##_S) & reg))
733
734#define SET_CONSTVAL(reg, newvalue) \
735 (((newvalue) << reg##_S) & reg)
736
737#define MOD_VAL(reg, value, newvalue) \
738 (((value) & ~reg) | (((newvalue) << reg##_S) & reg))
739#endif /* __CARL9170_SHARED_HW_H */
diff --git a/drivers/net/wireless/ath/carl9170/led.c b/drivers/net/wireless/ath/carl9170/led.c
new file mode 100644
index 000000000000..4bb2cbd8bd9b
--- /dev/null
+++ b/drivers/net/wireless/ath/carl9170/led.c
@@ -0,0 +1,190 @@
1/*
2 * Atheros CARL9170 driver
3 *
4 * LED handling
5 *
6 * Copyright 2008, Johannes Berg <johannes@sipsolutions.net>
7 * Copyright 2009, 2010, Christian Lamparer <chunkeey@googlemail.com>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; see the file COPYING. If not, see
21 * http://www.gnu.org/licenses/.
22 *
23 * This file incorporates work covered by the following copyright and
24 * permission notice:
25 * Copyright (c) 2007-2008 Atheros Communications, Inc.
26 *
27 * Permission to use, copy, modify, and/or distribute this software for any
28 * purpose with or without fee is hereby granted, provided that the above
29 * copyright notice and this permission notice appear in all copies.
30 *
31 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
32 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
33 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
34 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
35 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
36 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
37 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
38 */
39
40#include "carl9170.h"
41#include "cmd.h"
42
43int carl9170_led_set_state(struct ar9170 *ar, const u32 led_state)
44{
45 return carl9170_write_reg(ar, AR9170_GPIO_REG_PORT_DATA, led_state);
46}
47
48int carl9170_led_init(struct ar9170 *ar)
49{
50 int err;
51
52 /* disable LEDs */
53 /* GPIO [0/1 mode: output, 2/3: input] */
54 err = carl9170_write_reg(ar, AR9170_GPIO_REG_PORT_TYPE, 3);
55 if (err)
56 goto out;
57
58 /* GPIO 0/1 value: off */
59 err = carl9170_led_set_state(ar, 0);
60
61out:
62 return err;
63}
64
65#ifdef CONFIG_CARL9170_LEDS
66static void carl9170_led_update(struct work_struct *work)
67{
68 struct ar9170 *ar = container_of(work, struct ar9170, led_work.work);
69 int i, tmp = 300, blink_delay = 1000;
70 u32 led_val = 0;
71 bool rerun = false;
72
73 if (!IS_ACCEPTING_CMD(ar))
74 return;
75
76 mutex_lock(&ar->mutex);
77 for (i = 0; i < AR9170_NUM_LEDS; i++) {
78 if (ar->leds[i].registered) {
79 if (ar->leds[i].last_state ||
80 ar->leds[i].toggled) {
81
82 if (ar->leds[i].toggled)
83 tmp = 70 + 200 / (ar->leds[i].toggled);
84
85 if (tmp < blink_delay)
86 blink_delay = tmp;
87
88 led_val |= 1 << i;
89 ar->leds[i].toggled = 0;
90 rerun = true;
91 }
92 }
93 }
94
95 carl9170_led_set_state(ar, led_val);
96 mutex_unlock(&ar->mutex);
97
98 if (!rerun)
99 return;
100
101 ieee80211_queue_delayed_work(ar->hw,
102 &ar->led_work,
103 msecs_to_jiffies(blink_delay));
104}
105
106static void carl9170_led_set_brightness(struct led_classdev *led,
107 enum led_brightness brightness)
108{
109 struct carl9170_led *arl = container_of(led, struct carl9170_led, l);
110 struct ar9170 *ar = arl->ar;
111
112 if (!arl->registered)
113 return;
114
115 if (arl->last_state != !!brightness) {
116 arl->toggled++;
117 arl->last_state = !!brightness;
118 }
119
120 if (likely(IS_ACCEPTING_CMD(ar) && arl->toggled))
121 ieee80211_queue_delayed_work(ar->hw, &ar->led_work, HZ/10);
122}
123
124static int carl9170_led_register_led(struct ar9170 *ar, int i, char *name,
125 char *trigger)
126{
127 int err;
128
129 snprintf(ar->leds[i].name, sizeof(ar->leds[i].name),
130 "carl9170-%s::%s", wiphy_name(ar->hw->wiphy), name);
131
132 ar->leds[i].ar = ar;
133 ar->leds[i].l.name = ar->leds[i].name;
134 ar->leds[i].l.brightness_set = carl9170_led_set_brightness;
135 ar->leds[i].l.brightness = 0;
136 ar->leds[i].l.default_trigger = trigger;
137
138 err = led_classdev_register(wiphy_dev(ar->hw->wiphy),
139 &ar->leds[i].l);
140 if (err) {
141 wiphy_err(ar->hw->wiphy, "failed to register %s LED (%d).\n",
142 ar->leds[i].name, err);
143 } else {
144 ar->leds[i].registered = true;
145 }
146
147 return err;
148}
149
150void carl9170_led_unregister(struct ar9170 *ar)
151{
152 int i;
153
154 for (i = 0; i < AR9170_NUM_LEDS; i++)
155 if (ar->leds[i].registered) {
156 led_classdev_unregister(&ar->leds[i].l);
157 ar->leds[i].registered = false;
158 ar->leds[i].toggled = 0;
159 }
160
161 cancel_delayed_work_sync(&ar->led_work);
162}
163
164int carl9170_led_register(struct ar9170 *ar)
165{
166 int err;
167
168 INIT_DELAYED_WORK(&ar->led_work, carl9170_led_update);
169
170 err = carl9170_led_register_led(ar, 0, "tx",
171 ieee80211_get_tx_led_name(ar->hw));
172 if (err)
173 goto fail;
174
175 if (ar->features & CARL9170_ONE_LED)
176 return 0;
177
178 err = carl9170_led_register_led(ar, 1, "assoc",
179 ieee80211_get_assoc_led_name(ar->hw));
180 if (err)
181 goto fail;
182
183 return 0;
184
185fail:
186 carl9170_led_unregister(ar);
187 return err;
188}
189
190#endif /* CONFIG_CARL9170_LEDS */
diff --git a/drivers/net/wireless/ath/carl9170/mac.c b/drivers/net/wireless/ath/carl9170/mac.c
new file mode 100644
index 000000000000..2305bc27151c
--- /dev/null
+++ b/drivers/net/wireless/ath/carl9170/mac.c
@@ -0,0 +1,604 @@
1/*
2 * Atheros CARL9170 driver
3 *
4 * MAC programming
5 *
6 * Copyright 2008, Johannes Berg <johannes@sipsolutions.net>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; see the file COPYING. If not, see
20 * http://www.gnu.org/licenses/.
21 *
22 * This file incorporates work covered by the following copyright and
23 * permission notice:
24 * Copyright (c) 2007-2008 Atheros Communications, Inc.
25 *
26 * Permission to use, copy, modify, and/or distribute this software for any
27 * purpose with or without fee is hereby granted, provided that the above
28 * copyright notice and this permission notice appear in all copies.
29 *
30 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
31 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
32 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
33 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
34 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
35 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
36 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
37 */
38
39#include <asm/unaligned.h>
40
41#include "carl9170.h"
42#include "cmd.h"
43
44int carl9170_set_dyn_sifs_ack(struct ar9170 *ar)
45{
46 u32 val;
47
48 if (conf_is_ht40(&ar->hw->conf))
49 val = 0x010a;
50 else {
51 if (ar->hw->conf.channel->band == IEEE80211_BAND_2GHZ)
52 val = 0x105;
53 else
54 val = 0x104;
55 }
56
57 return carl9170_write_reg(ar, AR9170_MAC_REG_DYNAMIC_SIFS_ACK, val);
58}
59
60int carl9170_set_rts_cts_rate(struct ar9170 *ar)
61{
62 u32 rts_rate, cts_rate;
63
64 if (conf_is_ht(&ar->hw->conf)) {
65 /* 12 mbit OFDM */
66 rts_rate = 0x1da;
67 cts_rate = 0x10a;
68 } else {
69 if (ar->hw->conf.channel->band == IEEE80211_BAND_2GHZ) {
70 /* 11 mbit CCK */
71 rts_rate = 033;
72 cts_rate = 003;
73 } else {
74 /* 6 mbit OFDM */
75 rts_rate = 0x1bb;
76 cts_rate = 0x10b;
77 }
78 }
79
80 return carl9170_write_reg(ar, AR9170_MAC_REG_RTS_CTS_RATE,
81 rts_rate | (cts_rate) << 16);
82}
83
84int carl9170_set_slot_time(struct ar9170 *ar)
85{
86 struct ieee80211_vif *vif;
87 u32 slottime = 20;
88
89 rcu_read_lock();
90 vif = carl9170_get_main_vif(ar);
91 if (!vif) {
92 rcu_read_unlock();
93 return 0;
94 }
95
96 if ((ar->hw->conf.channel->band == IEEE80211_BAND_5GHZ) ||
97 vif->bss_conf.use_short_slot)
98 slottime = 9;
99
100 rcu_read_unlock();
101
102 return carl9170_write_reg(ar, AR9170_MAC_REG_SLOT_TIME,
103 slottime << 10);
104}
105
106int carl9170_set_mac_rates(struct ar9170 *ar)
107{
108 struct ieee80211_vif *vif;
109 u32 basic, mandatory;
110
111 rcu_read_lock();
112 vif = carl9170_get_main_vif(ar);
113
114 if (!vif) {
115 rcu_read_unlock();
116 return 0;
117 }
118
119 basic = (vif->bss_conf.basic_rates & 0xf);
120 basic |= (vif->bss_conf.basic_rates & 0xff0) << 4;
121 rcu_read_unlock();
122
123 if (ar->hw->conf.channel->band == IEEE80211_BAND_5GHZ)
124 mandatory = 0xff00; /* OFDM 6/9/12/18/24/36/48/54 */
125 else
126 mandatory = 0xff0f; /* OFDM (6/9../54) + CCK (1/2/5.5/11) */
127
128 carl9170_regwrite_begin(ar);
129 carl9170_regwrite(AR9170_MAC_REG_BASIC_RATE, basic);
130 carl9170_regwrite(AR9170_MAC_REG_MANDATORY_RATE, mandatory);
131 carl9170_regwrite_finish();
132
133 return carl9170_regwrite_result();
134}
135
136int carl9170_set_qos(struct ar9170 *ar)
137{
138 carl9170_regwrite_begin(ar);
139
140 carl9170_regwrite(AR9170_MAC_REG_AC0_CW, ar->edcf[0].cw_min |
141 (ar->edcf[0].cw_max << 16));
142 carl9170_regwrite(AR9170_MAC_REG_AC1_CW, ar->edcf[1].cw_min |
143 (ar->edcf[1].cw_max << 16));
144 carl9170_regwrite(AR9170_MAC_REG_AC2_CW, ar->edcf[2].cw_min |
145 (ar->edcf[2].cw_max << 16));
146 carl9170_regwrite(AR9170_MAC_REG_AC3_CW, ar->edcf[3].cw_min |
147 (ar->edcf[3].cw_max << 16));
148 carl9170_regwrite(AR9170_MAC_REG_AC4_CW, ar->edcf[4].cw_min |
149 (ar->edcf[4].cw_max << 16));
150
151 carl9170_regwrite(AR9170_MAC_REG_AC2_AC1_AC0_AIFS,
152 ((ar->edcf[0].aifs * 9 + 10)) |
153 ((ar->edcf[1].aifs * 9 + 10) << 12) |
154 ((ar->edcf[2].aifs * 9 + 10) << 24));
155 carl9170_regwrite(AR9170_MAC_REG_AC4_AC3_AC2_AIFS,
156 ((ar->edcf[2].aifs * 9 + 10) >> 8) |
157 ((ar->edcf[3].aifs * 9 + 10) << 4) |
158 ((ar->edcf[4].aifs * 9 + 10) << 16));
159
160 carl9170_regwrite(AR9170_MAC_REG_AC1_AC0_TXOP,
161 ar->edcf[0].txop | ar->edcf[1].txop << 16);
162 carl9170_regwrite(AR9170_MAC_REG_AC3_AC2_TXOP,
163 ar->edcf[2].txop | ar->edcf[3].txop << 16 |
164 ar->edcf[4].txop << 24);
165
166 carl9170_regwrite_finish();
167
168 return carl9170_regwrite_result();
169}
170
171int carl9170_init_mac(struct ar9170 *ar)
172{
173 carl9170_regwrite_begin(ar);
174
175 /* switch MAC to OTUS interface */
176 carl9170_regwrite(0x1c3600, 0x3);
177
178 carl9170_regwrite(AR9170_MAC_REG_ACK_EXTENSION, 0x40);
179
180 carl9170_regwrite(AR9170_MAC_REG_RETRY_MAX, 0x0);
181
182 carl9170_regwrite(AR9170_MAC_REG_FRAMETYPE_FILTER,
183 AR9170_MAC_FTF_MONITOR);
184
185 /* enable MMIC */
186 carl9170_regwrite(AR9170_MAC_REG_SNIFFER,
187 AR9170_MAC_SNIFFER_DEFAULTS);
188
189 carl9170_regwrite(AR9170_MAC_REG_RX_THRESHOLD, 0xc1f80);
190
191 carl9170_regwrite(AR9170_MAC_REG_RX_PE_DELAY, 0x70);
192 carl9170_regwrite(AR9170_MAC_REG_EIFS_AND_SIFS, 0xa144000);
193 carl9170_regwrite(AR9170_MAC_REG_SLOT_TIME, 9 << 10);
194
195 /* CF-END & CF-ACK rate => 24M OFDM */
196 carl9170_regwrite(AR9170_MAC_REG_TID_CFACK_CFEND_RATE, 0x59900000);
197
198 /* NAV protects ACK only (in TXOP) */
199 carl9170_regwrite(AR9170_MAC_REG_TXOP_DURATION, 0x201);
200
201 /* Set Beacon PHY CTRL's TPC to 0x7, TA1=1 */
202 /* OTUS set AM to 0x1 */
203 carl9170_regwrite(AR9170_MAC_REG_BCN_HT1, 0x8000170);
204
205 carl9170_regwrite(AR9170_MAC_REG_BACKOFF_PROTECT, 0x105);
206
207 /* Aggregation MAX number and timeout */
208 carl9170_regwrite(AR9170_MAC_REG_AMPDU_FACTOR, 0xa);
209 carl9170_regwrite(AR9170_MAC_REG_AMPDU_DENSITY, 0x140a00);
210
211 carl9170_regwrite(AR9170_MAC_REG_FRAMETYPE_FILTER,
212 AR9170_MAC_FTF_DEFAULTS);
213
214 carl9170_regwrite(AR9170_MAC_REG_RX_CONTROL,
215 AR9170_MAC_RX_CTRL_DEAGG |
216 AR9170_MAC_RX_CTRL_SHORT_FILTER);
217
218 /* rate sets */
219 carl9170_regwrite(AR9170_MAC_REG_BASIC_RATE, 0x150f);
220 carl9170_regwrite(AR9170_MAC_REG_MANDATORY_RATE, 0x150f);
221 carl9170_regwrite(AR9170_MAC_REG_RTS_CTS_RATE, 0x0030033);
222
223 /* MIMO response control */
224 carl9170_regwrite(AR9170_MAC_REG_ACK_TPC, 0x4003c1e);
225
226 carl9170_regwrite(AR9170_MAC_REG_AMPDU_RX_THRESH, 0xffff);
227
228 /* set PHY register read timeout (??) */
229 carl9170_regwrite(AR9170_MAC_REG_MISC_680, 0xf00008);
230
231 /* Disable Rx TimeOut, workaround for BB. */
232 carl9170_regwrite(AR9170_MAC_REG_RX_TIMEOUT, 0x0);
233
234 /* Set WLAN DMA interrupt mode: generate int per packet */
235 carl9170_regwrite(AR9170_MAC_REG_TXRX_MPI, 0x110011);
236
237 carl9170_regwrite(AR9170_MAC_REG_FCS_SELECT,
238 AR9170_MAC_FCS_FIFO_PROT);
239
240 /* Disables the CF_END frame, undocumented register */
241 carl9170_regwrite(AR9170_MAC_REG_TXOP_NOT_ENOUGH_IND,
242 0x141e0f48);
243
244 /* reset group hash table */
245 carl9170_regwrite(AR9170_MAC_REG_GROUP_HASH_TBL_L, 0xffffffff);
246 carl9170_regwrite(AR9170_MAC_REG_GROUP_HASH_TBL_H, 0xffffffff);
247
248 /* disable PRETBTT interrupt */
249 carl9170_regwrite(AR9170_MAC_REG_PRETBTT, 0x0);
250 carl9170_regwrite(AR9170_MAC_REG_BCN_PERIOD, 0x0);
251
252 carl9170_regwrite_finish();
253
254 return carl9170_regwrite_result();
255}
256
257static int carl9170_set_mac_reg(struct ar9170 *ar,
258 const u32 reg, const u8 *mac)
259{
260 static const u8 zero[ETH_ALEN] = { 0 };
261
262 if (!mac)
263 mac = zero;
264
265 carl9170_regwrite_begin(ar);
266
267 carl9170_regwrite(reg, get_unaligned_le32(mac));
268 carl9170_regwrite(reg + 4, get_unaligned_le16(mac + 4));
269
270 carl9170_regwrite_finish();
271
272 return carl9170_regwrite_result();
273}
274
275int carl9170_mod_virtual_mac(struct ar9170 *ar, const unsigned int id,
276 const u8 *mac)
277{
278 if (WARN_ON(id >= ar->fw.vif_num))
279 return -EINVAL;
280
281 return carl9170_set_mac_reg(ar,
282 AR9170_MAC_REG_ACK_TABLE + (id - 1) * 8, mac);
283}
284
285int carl9170_update_multicast(struct ar9170 *ar, const u64 mc_hash)
286{
287 int err;
288
289 carl9170_regwrite_begin(ar);
290 carl9170_regwrite(AR9170_MAC_REG_GROUP_HASH_TBL_H, mc_hash >> 32);
291 carl9170_regwrite(AR9170_MAC_REG_GROUP_HASH_TBL_L, mc_hash);
292 carl9170_regwrite_finish();
293 err = carl9170_regwrite_result();
294 if (err)
295 return err;
296
297 ar->cur_mc_hash = mc_hash;
298 return 0;
299}
300
301int carl9170_set_operating_mode(struct ar9170 *ar)
302{
303 struct ieee80211_vif *vif;
304 struct ath_common *common = &ar->common;
305 u8 *mac_addr, *bssid;
306 u32 cam_mode = AR9170_MAC_CAM_DEFAULTS;
307 u32 enc_mode = AR9170_MAC_ENCRYPTION_DEFAULTS;
308 u32 rx_ctrl = AR9170_MAC_RX_CTRL_DEAGG |
309 AR9170_MAC_RX_CTRL_SHORT_FILTER;
310 u32 sniffer = AR9170_MAC_SNIFFER_DEFAULTS;
311 int err = 0;
312
313 rcu_read_lock();
314 vif = carl9170_get_main_vif(ar);
315
316 if (vif) {
317 mac_addr = common->macaddr;
318 bssid = common->curbssid;
319
320 switch (vif->type) {
321 case NL80211_IFTYPE_MESH_POINT:
322 case NL80211_IFTYPE_ADHOC:
323 cam_mode |= AR9170_MAC_CAM_IBSS;
324 break;
325 case NL80211_IFTYPE_AP:
326 cam_mode |= AR9170_MAC_CAM_AP;
327
328 /* iwlagn 802.11n STA Workaround */
329 rx_ctrl |= AR9170_MAC_RX_CTRL_PASS_TO_HOST;
330 break;
331 case NL80211_IFTYPE_WDS:
332 cam_mode |= AR9170_MAC_CAM_AP_WDS;
333 rx_ctrl |= AR9170_MAC_RX_CTRL_PASS_TO_HOST;
334 break;
335 case NL80211_IFTYPE_STATION:
336 cam_mode |= AR9170_MAC_CAM_STA;
337 rx_ctrl |= AR9170_MAC_RX_CTRL_PASS_TO_HOST;
338 break;
339 default:
340 WARN(1, "Unsupported operation mode %x\n", vif->type);
341 err = -EOPNOTSUPP;
342 break;
343 }
344 } else {
345 mac_addr = NULL;
346 bssid = NULL;
347 }
348 rcu_read_unlock();
349
350 if (err)
351 return err;
352
353 if (ar->rx_software_decryption)
354 enc_mode |= AR9170_MAC_ENCRYPTION_RX_SOFTWARE;
355
356 if (ar->sniffer_enabled) {
357 rx_ctrl |= AR9170_MAC_RX_CTRL_ACK_IN_SNIFFER;
358 sniffer |= AR9170_MAC_SNIFFER_ENABLE_PROMISC;
359 enc_mode |= AR9170_MAC_ENCRYPTION_RX_SOFTWARE;
360 }
361
362 err = carl9170_set_mac_reg(ar, AR9170_MAC_REG_MAC_ADDR_L, mac_addr);
363 if (err)
364 return err;
365
366 err = carl9170_set_mac_reg(ar, AR9170_MAC_REG_BSSID_L, bssid);
367 if (err)
368 return err;
369
370 carl9170_regwrite_begin(ar);
371 carl9170_regwrite(AR9170_MAC_REG_SNIFFER, sniffer);
372 carl9170_regwrite(AR9170_MAC_REG_CAM_MODE, cam_mode);
373 carl9170_regwrite(AR9170_MAC_REG_ENCRYPTION, enc_mode);
374 carl9170_regwrite(AR9170_MAC_REG_RX_CONTROL, rx_ctrl);
375 carl9170_regwrite_finish();
376
377 return carl9170_regwrite_result();
378}
379
380int carl9170_set_hwretry_limit(struct ar9170 *ar, const unsigned int max_retry)
381{
382 u32 tmp = min_t(u32, 0x33333, max_retry * 0x11111);
383
384 return carl9170_write_reg(ar, AR9170_MAC_REG_RETRY_MAX, tmp);
385}
386
387int carl9170_set_beacon_timers(struct ar9170 *ar)
388{
389 struct ieee80211_vif *vif;
390 u32 v = 0;
391 u32 pretbtt = 0;
392
393 rcu_read_lock();
394 vif = carl9170_get_main_vif(ar);
395
396 if (vif) {
397 struct carl9170_vif_info *mvif;
398 mvif = (void *) vif->drv_priv;
399
400 if (mvif->enable_beacon && !WARN_ON(!ar->beacon_enabled)) {
401 ar->global_beacon_int = vif->bss_conf.beacon_int /
402 ar->beacon_enabled;
403
404 SET_VAL(AR9170_MAC_BCN_DTIM, v,
405 vif->bss_conf.dtim_period);
406
407 switch (vif->type) {
408 case NL80211_IFTYPE_MESH_POINT:
409 case NL80211_IFTYPE_ADHOC:
410 v |= AR9170_MAC_BCN_IBSS_MODE;
411 break;
412 case NL80211_IFTYPE_AP:
413 v |= AR9170_MAC_BCN_AP_MODE;
414 break;
415 default:
416 WARN_ON_ONCE(1);
417 break;
418 }
419 } else if (vif->type == NL80211_IFTYPE_STATION) {
420 ar->global_beacon_int = vif->bss_conf.beacon_int;
421
422 SET_VAL(AR9170_MAC_BCN_DTIM, v,
423 ar->hw->conf.ps_dtim_period);
424
425 v |= AR9170_MAC_BCN_STA_PS |
426 AR9170_MAC_BCN_PWR_MGT;
427 }
428
429 if (ar->global_beacon_int) {
430 if (ar->global_beacon_int < 15) {
431 rcu_read_unlock();
432 return -ERANGE;
433 }
434
435 ar->global_pretbtt = ar->global_beacon_int -
436 CARL9170_PRETBTT_KUS;
437 } else {
438 ar->global_pretbtt = 0;
439 }
440 } else {
441 ar->global_beacon_int = 0;
442 ar->global_pretbtt = 0;
443 }
444
445 rcu_read_unlock();
446
447 SET_VAL(AR9170_MAC_BCN_PERIOD, v, ar->global_beacon_int);
448 SET_VAL(AR9170_MAC_PRETBTT, pretbtt, ar->global_pretbtt);
449 SET_VAL(AR9170_MAC_PRETBTT2, pretbtt, ar->global_pretbtt);
450
451 carl9170_regwrite_begin(ar);
452 carl9170_regwrite(AR9170_MAC_REG_PRETBTT, pretbtt);
453 carl9170_regwrite(AR9170_MAC_REG_BCN_PERIOD, v);
454 carl9170_regwrite_finish();
455 return carl9170_regwrite_result();
456}
457
458int carl9170_update_beacon(struct ar9170 *ar, const bool submit)
459{
460 struct sk_buff *skb;
461 struct carl9170_vif_info *cvif;
462 __le32 *data, *old = NULL;
463 u32 word, off, addr, len;
464 int i = 0, err = 0;
465
466 rcu_read_lock();
467 cvif = rcu_dereference(ar->beacon_iter);
468retry:
469 if (ar->vifs == 0 || !cvif)
470 goto out_unlock;
471
472 list_for_each_entry_continue_rcu(cvif, &ar->vif_list, list) {
473 if (cvif->active && cvif->enable_beacon)
474 goto found;
475 }
476
477 if (!ar->beacon_enabled || i++)
478 goto out_unlock;
479
480 goto retry;
481
482found:
483 rcu_assign_pointer(ar->beacon_iter, cvif);
484
485 skb = ieee80211_beacon_get_tim(ar->hw, carl9170_get_vif(cvif),
486 NULL, NULL);
487
488 if (!skb) {
489 err = -ENOMEM;
490 goto out_unlock;
491 }
492
493 spin_lock_bh(&ar->beacon_lock);
494 data = (__le32 *)skb->data;
495 if (cvif->beacon)
496 old = (__le32 *)cvif->beacon->data;
497
498 off = cvif->id * AR9170_MAC_BCN_LENGTH_MAX;
499 addr = ar->fw.beacon_addr + off;
500 len = roundup(skb->len + FCS_LEN, 4);
501
502 if ((off + len) > ar->fw.beacon_max_len) {
503 if (net_ratelimit()) {
504 wiphy_err(ar->hw->wiphy, "beacon does not "
505 "fit into device memory!\n");
506 }
507
508 spin_unlock_bh(&ar->beacon_lock);
509 dev_kfree_skb_any(skb);
510 err = -EINVAL;
511 goto out_unlock;
512 }
513
514 if (len > AR9170_MAC_BCN_LENGTH_MAX) {
515 if (net_ratelimit()) {
516 wiphy_err(ar->hw->wiphy, "no support for beacons "
517 "bigger than %d (yours:%d).\n",
518 AR9170_MAC_BCN_LENGTH_MAX, len);
519 }
520
521 spin_unlock_bh(&ar->beacon_lock);
522 dev_kfree_skb_any(skb);
523 err = -EMSGSIZE;
524 goto out_unlock;
525 }
526
527 carl9170_async_regwrite_begin(ar);
528
529 /* XXX: use skb->cb info */
530 if (ar->hw->conf.channel->band == IEEE80211_BAND_2GHZ) {
531 carl9170_async_regwrite(AR9170_MAC_REG_BCN_PLCP,
532 ((skb->len + FCS_LEN) << (3 + 16)) + 0x0400);
533 } else {
534 carl9170_async_regwrite(AR9170_MAC_REG_BCN_PLCP,
535 ((skb->len + FCS_LEN) << 16) + 0x001b);
536 }
537
538 for (i = 0; i < DIV_ROUND_UP(skb->len, 4); i++) {
539 /*
540 * XXX: This accesses beyond skb data for up
541 * to the last 3 bytes!!
542 */
543
544 if (old && (data[i] == old[i]))
545 continue;
546
547 word = le32_to_cpu(data[i]);
548 carl9170_async_regwrite(addr + 4 * i, word);
549 }
550 carl9170_async_regwrite_finish();
551
552 dev_kfree_skb_any(cvif->beacon);
553 cvif->beacon = NULL;
554
555 err = carl9170_async_regwrite_result();
556 if (!err)
557 cvif->beacon = skb;
558 spin_unlock_bh(&ar->beacon_lock);
559 if (err)
560 goto out_unlock;
561
562 if (submit) {
563 err = carl9170_bcn_ctrl(ar, cvif->id,
564 CARL9170_BCN_CTRL_CAB_TRIGGER,
565 addr, skb->len + FCS_LEN);
566
567 if (err)
568 goto out_unlock;
569 }
570out_unlock:
571 rcu_read_unlock();
572 return err;
573}
574
575int carl9170_upload_key(struct ar9170 *ar, const u8 id, const u8 *mac,
576 const u8 ktype, const u8 keyidx, const u8 *keydata,
577 const int keylen)
578{
579 struct carl9170_set_key_cmd key = { };
580 static const u8 bcast[ETH_ALEN] = {
581 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
582
583 mac = mac ? : bcast;
584
585 key.user = cpu_to_le16(id);
586 key.keyId = cpu_to_le16(keyidx);
587 key.type = cpu_to_le16(ktype);
588 memcpy(&key.macAddr, mac, ETH_ALEN);
589 if (keydata)
590 memcpy(&key.key, keydata, keylen);
591
592 return carl9170_exec_cmd(ar, CARL9170_CMD_EKEY,
593 sizeof(key), (u8 *)&key, 0, NULL);
594}
595
596int carl9170_disable_key(struct ar9170 *ar, const u8 id)
597{
598 struct carl9170_disable_key_cmd key = { };
599
600 key.user = cpu_to_le16(id);
601
602 return carl9170_exec_cmd(ar, CARL9170_CMD_DKEY,
603 sizeof(key), (u8 *)&key, 0, NULL);
604}
diff --git a/drivers/net/wireless/ath/carl9170/main.c b/drivers/net/wireless/ath/carl9170/main.c
new file mode 100644
index 000000000000..3cc99f3f7ab5
--- /dev/null
+++ b/drivers/net/wireless/ath/carl9170/main.c
@@ -0,0 +1,1891 @@
1/*
2 * Atheros CARL9170 driver
3 *
4 * mac80211 interaction code
5 *
6 * Copyright 2008, Johannes Berg <johannes@sipsolutions.net>
7 * Copyright 2009, 2010, Christian Lamparter <chunkeey@googlemail.com>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; see the file COPYING. If not, see
21 * http://www.gnu.org/licenses/.
22 *
23 * This file incorporates work covered by the following copyright and
24 * permission notice:
25 * Copyright (c) 2007-2008 Atheros Communications, Inc.
26 *
27 * Permission to use, copy, modify, and/or distribute this software for any
28 * purpose with or without fee is hereby granted, provided that the above
29 * copyright notice and this permission notice appear in all copies.
30 *
31 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
32 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
33 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
34 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
35 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
36 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
37 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
38 */
39
40#include <linux/init.h>
41#include <linux/slab.h>
42#include <linux/module.h>
43#include <linux/etherdevice.h>
44#include <linux/random.h>
45#include <net/mac80211.h>
46#include <net/cfg80211.h>
47#include "hw.h"
48#include "carl9170.h"
49#include "cmd.h"
50
51static int modparam_nohwcrypt;
52module_param_named(nohwcrypt, modparam_nohwcrypt, bool, S_IRUGO);
53MODULE_PARM_DESC(nohwcrypt, "Disable hardware crypto offload.");
54
55int modparam_noht;
56module_param_named(noht, modparam_noht, int, S_IRUGO);
57MODULE_PARM_DESC(noht, "Disable MPDU aggregation.");
58
59#define RATE(_bitrate, _hw_rate, _txpidx, _flags) { \
60 .bitrate = (_bitrate), \
61 .flags = (_flags), \
62 .hw_value = (_hw_rate) | (_txpidx) << 4, \
63}
64
65struct ieee80211_rate __carl9170_ratetable[] = {
66 RATE(10, 0, 0, 0),
67 RATE(20, 1, 1, IEEE80211_RATE_SHORT_PREAMBLE),
68 RATE(55, 2, 2, IEEE80211_RATE_SHORT_PREAMBLE),
69 RATE(110, 3, 3, IEEE80211_RATE_SHORT_PREAMBLE),
70 RATE(60, 0xb, 0, 0),
71 RATE(90, 0xf, 0, 0),
72 RATE(120, 0xa, 0, 0),
73 RATE(180, 0xe, 0, 0),
74 RATE(240, 0x9, 0, 0),
75 RATE(360, 0xd, 1, 0),
76 RATE(480, 0x8, 2, 0),
77 RATE(540, 0xc, 3, 0),
78};
79#undef RATE
80
81#define carl9170_g_ratetable (__carl9170_ratetable + 0)
82#define carl9170_g_ratetable_size 12
83#define carl9170_a_ratetable (__carl9170_ratetable + 4)
84#define carl9170_a_ratetable_size 8
85
86/*
87 * NB: The hw_value is used as an index into the carl9170_phy_freq_params
88 * array in phy.c so that we don't have to do frequency lookups!
89 */
90#define CHAN(_freq, _idx) { \
91 .center_freq = (_freq), \
92 .hw_value = (_idx), \
93 .max_power = 18, /* XXX */ \
94}
95
96static struct ieee80211_channel carl9170_2ghz_chantable[] = {
97 CHAN(2412, 0),
98 CHAN(2417, 1),
99 CHAN(2422, 2),
100 CHAN(2427, 3),
101 CHAN(2432, 4),
102 CHAN(2437, 5),
103 CHAN(2442, 6),
104 CHAN(2447, 7),
105 CHAN(2452, 8),
106 CHAN(2457, 9),
107 CHAN(2462, 10),
108 CHAN(2467, 11),
109 CHAN(2472, 12),
110 CHAN(2484, 13),
111};
112
113static struct ieee80211_channel carl9170_5ghz_chantable[] = {
114 CHAN(4920, 14),
115 CHAN(4940, 15),
116 CHAN(4960, 16),
117 CHAN(4980, 17),
118 CHAN(5040, 18),
119 CHAN(5060, 19),
120 CHAN(5080, 20),
121 CHAN(5180, 21),
122 CHAN(5200, 22),
123 CHAN(5220, 23),
124 CHAN(5240, 24),
125 CHAN(5260, 25),
126 CHAN(5280, 26),
127 CHAN(5300, 27),
128 CHAN(5320, 28),
129 CHAN(5500, 29),
130 CHAN(5520, 30),
131 CHAN(5540, 31),
132 CHAN(5560, 32),
133 CHAN(5580, 33),
134 CHAN(5600, 34),
135 CHAN(5620, 35),
136 CHAN(5640, 36),
137 CHAN(5660, 37),
138 CHAN(5680, 38),
139 CHAN(5700, 39),
140 CHAN(5745, 40),
141 CHAN(5765, 41),
142 CHAN(5785, 42),
143 CHAN(5805, 43),
144 CHAN(5825, 44),
145 CHAN(5170, 45),
146 CHAN(5190, 46),
147 CHAN(5210, 47),
148 CHAN(5230, 48),
149};
150#undef CHAN
151
152#define CARL9170_HT_CAP \
153{ \
154 .ht_supported = true, \
155 .cap = IEEE80211_HT_CAP_MAX_AMSDU | \
156 IEEE80211_HT_CAP_SUP_WIDTH_20_40 | \
157 IEEE80211_HT_CAP_SGI_40 | \
158 IEEE80211_HT_CAP_DSSSCCK40 | \
159 IEEE80211_HT_CAP_SM_PS, \
160 .ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K, \
161 .ampdu_density = IEEE80211_HT_MPDU_DENSITY_8, \
162 .mcs = { \
163 .rx_mask = { 0xff, 0xff, 0, 0, 0x1, 0, 0, 0, 0, 0, }, \
164 .rx_highest = cpu_to_le16(300), \
165 .tx_params = IEEE80211_HT_MCS_TX_DEFINED, \
166 }, \
167}
168
169static struct ieee80211_supported_band carl9170_band_2GHz = {
170 .channels = carl9170_2ghz_chantable,
171 .n_channels = ARRAY_SIZE(carl9170_2ghz_chantable),
172 .bitrates = carl9170_g_ratetable,
173 .n_bitrates = carl9170_g_ratetable_size,
174 .ht_cap = CARL9170_HT_CAP,
175};
176
177static struct ieee80211_supported_band carl9170_band_5GHz = {
178 .channels = carl9170_5ghz_chantable,
179 .n_channels = ARRAY_SIZE(carl9170_5ghz_chantable),
180 .bitrates = carl9170_a_ratetable,
181 .n_bitrates = carl9170_a_ratetable_size,
182 .ht_cap = CARL9170_HT_CAP,
183};
184
185static void carl9170_ampdu_gc(struct ar9170 *ar)
186{
187 struct carl9170_sta_tid *tid_info;
188 LIST_HEAD(tid_gc);
189
190 rcu_read_lock();
191 list_for_each_entry_rcu(tid_info, &ar->tx_ampdu_list, list) {
192 spin_lock_bh(&ar->tx_ampdu_list_lock);
193 if (tid_info->state == CARL9170_TID_STATE_SHUTDOWN) {
194 tid_info->state = CARL9170_TID_STATE_KILLED;
195 list_del_rcu(&tid_info->list);
196 ar->tx_ampdu_list_len--;
197 list_add_tail(&tid_info->tmp_list, &tid_gc);
198 }
199 spin_unlock_bh(&ar->tx_ampdu_list_lock);
200
201 }
202 rcu_assign_pointer(ar->tx_ampdu_iter, tid_info);
203 rcu_read_unlock();
204
205 synchronize_rcu();
206
207 while (!list_empty(&tid_gc)) {
208 struct sk_buff *skb;
209 tid_info = list_first_entry(&tid_gc, struct carl9170_sta_tid,
210 tmp_list);
211
212 while ((skb = __skb_dequeue(&tid_info->queue)))
213 carl9170_tx_status(ar, skb, false);
214
215 list_del_init(&tid_info->tmp_list);
216 kfree(tid_info);
217 }
218}
219
220static void carl9170_flush(struct ar9170 *ar, bool drop_queued)
221{
222 if (drop_queued) {
223 int i;
224
225 /*
226 * We can only drop frames which have not been uploaded
227 * to the device yet.
228 */
229
230 for (i = 0; i < ar->hw->queues; i++) {
231 struct sk_buff *skb;
232
233 while ((skb = skb_dequeue(&ar->tx_pending[i]))) {
234 struct ieee80211_tx_info *info;
235
236 info = IEEE80211_SKB_CB(skb);
237 if (info->flags & IEEE80211_TX_CTL_AMPDU)
238 atomic_dec(&ar->tx_ampdu_upload);
239
240 carl9170_tx_status(ar, skb, false);
241 }
242 }
243 }
244
245 /* Wait for all other outstanding frames to timeout. */
246 if (atomic_read(&ar->tx_total_queued))
247 WARN_ON(wait_for_completion_timeout(&ar->tx_flush, HZ) == 0);
248}
249
250static void carl9170_flush_ba(struct ar9170 *ar)
251{
252 struct sk_buff_head free;
253 struct carl9170_sta_tid *tid_info;
254 struct sk_buff *skb;
255
256 __skb_queue_head_init(&free);
257
258 rcu_read_lock();
259 spin_lock_bh(&ar->tx_ampdu_list_lock);
260 list_for_each_entry_rcu(tid_info, &ar->tx_ampdu_list, list) {
261 if (tid_info->state > CARL9170_TID_STATE_SUSPEND) {
262 tid_info->state = CARL9170_TID_STATE_SUSPEND;
263
264 spin_lock(&tid_info->lock);
265 while ((skb = __skb_dequeue(&tid_info->queue)))
266 __skb_queue_tail(&free, skb);
267 spin_unlock(&tid_info->lock);
268 }
269 }
270 spin_unlock_bh(&ar->tx_ampdu_list_lock);
271 rcu_read_unlock();
272
273 while ((skb = __skb_dequeue(&free)))
274 carl9170_tx_status(ar, skb, false);
275}
276
277static void carl9170_zap_queues(struct ar9170 *ar)
278{
279 struct carl9170_vif_info *cvif;
280 unsigned int i;
281
282 carl9170_ampdu_gc(ar);
283
284 carl9170_flush_ba(ar);
285 carl9170_flush(ar, true);
286
287 for (i = 0; i < ar->hw->queues; i++) {
288 spin_lock_bh(&ar->tx_status[i].lock);
289 while (!skb_queue_empty(&ar->tx_status[i])) {
290 struct sk_buff *skb;
291
292 skb = skb_peek(&ar->tx_status[i]);
293 carl9170_tx_get_skb(skb);
294 spin_unlock_bh(&ar->tx_status[i].lock);
295 carl9170_tx_drop(ar, skb);
296 spin_lock_bh(&ar->tx_status[i].lock);
297 carl9170_tx_put_skb(skb);
298 }
299 spin_unlock_bh(&ar->tx_status[i].lock);
300 }
301
302 BUILD_BUG_ON(CARL9170_NUM_TX_LIMIT_SOFT < 1);
303 BUILD_BUG_ON(CARL9170_NUM_TX_LIMIT_HARD < CARL9170_NUM_TX_LIMIT_SOFT);
304 BUILD_BUG_ON(CARL9170_NUM_TX_LIMIT_HARD >= CARL9170_BAW_BITS);
305
306 /* reinitialize queues statistics */
307 memset(&ar->tx_stats, 0, sizeof(ar->tx_stats));
308 for (i = 0; i < ar->hw->queues; i++)
309 ar->tx_stats[i].limit = CARL9170_NUM_TX_LIMIT_HARD;
310
311 for (i = 0; i < DIV_ROUND_UP(ar->fw.mem_blocks, BITS_PER_LONG); i++)
312 ar->mem_bitmap[i] = 0;
313
314 rcu_read_lock();
315 list_for_each_entry_rcu(cvif, &ar->vif_list, list) {
316 spin_lock_bh(&ar->beacon_lock);
317 dev_kfree_skb_any(cvif->beacon);
318 cvif->beacon = NULL;
319 spin_unlock_bh(&ar->beacon_lock);
320 }
321 rcu_read_unlock();
322
323 atomic_set(&ar->tx_ampdu_upload, 0);
324 atomic_set(&ar->tx_ampdu_scheduler, 0);
325 atomic_set(&ar->tx_total_pending, 0);
326 atomic_set(&ar->tx_total_queued, 0);
327 atomic_set(&ar->mem_free_blocks, ar->fw.mem_blocks);
328}
329
330#define CARL9170_FILL_QUEUE(queue, ai_fs, cwmin, cwmax, _txop) \
331do { \
332 queue.aifs = ai_fs; \
333 queue.cw_min = cwmin; \
334 queue.cw_max = cwmax; \
335 queue.txop = _txop; \
336} while (0)
337
338static int carl9170_op_start(struct ieee80211_hw *hw)
339{
340 struct ar9170 *ar = hw->priv;
341 int err, i;
342
343 mutex_lock(&ar->mutex);
344
345 carl9170_zap_queues(ar);
346
347 /* reset QoS defaults */
348 CARL9170_FILL_QUEUE(ar->edcf[0], 3, 15, 1023, 0); /* BEST EFFORT */
349 CARL9170_FILL_QUEUE(ar->edcf[1], 2, 7, 15, 94); /* VIDEO */
350 CARL9170_FILL_QUEUE(ar->edcf[2], 2, 3, 7, 47); /* VOICE */
351 CARL9170_FILL_QUEUE(ar->edcf[3], 7, 15, 1023, 0); /* BACKGROUND */
352 CARL9170_FILL_QUEUE(ar->edcf[4], 2, 3, 7, 0); /* SPECIAL */
353
354 ar->current_factor = ar->current_density = -1;
355 /* "The first key is unique." */
356 ar->usedkeys = 1;
357 ar->filter_state = 0;
358 ar->ps.last_action = jiffies;
359 ar->ps.last_slept = jiffies;
360 ar->erp_mode = CARL9170_ERP_AUTO;
361 ar->rx_software_decryption = false;
362 ar->disable_offload = false;
363
364 for (i = 0; i < ar->hw->queues; i++) {
365 ar->queue_stop_timeout[i] = jiffies;
366 ar->max_queue_stop_timeout[i] = 0;
367 }
368
369 atomic_set(&ar->mem_allocs, 0);
370
371 err = carl9170_usb_open(ar);
372 if (err)
373 goto out;
374
375 err = carl9170_init_mac(ar);
376 if (err)
377 goto out;
378
379 err = carl9170_set_qos(ar);
380 if (err)
381 goto out;
382
383 if (ar->fw.rx_filter) {
384 err = carl9170_rx_filter(ar, CARL9170_RX_FILTER_OTHER_RA |
385 CARL9170_RX_FILTER_CTL_OTHER | CARL9170_RX_FILTER_BAD);
386 if (err)
387 goto out;
388 }
389
390 err = carl9170_write_reg(ar, AR9170_MAC_REG_DMA_TRIGGER,
391 AR9170_DMA_TRIGGER_RXQ);
392 if (err)
393 goto out;
394
395 /* Clear key-cache */
396 for (i = 0; i < AR9170_CAM_MAX_USER + 4; i++) {
397 err = carl9170_upload_key(ar, i, NULL, AR9170_ENC_ALG_NONE,
398 0, NULL, 0);
399 if (err)
400 goto out;
401
402 err = carl9170_upload_key(ar, i, NULL, AR9170_ENC_ALG_NONE,
403 1, NULL, 0);
404 if (err)
405 goto out;
406
407 if (i < AR9170_CAM_MAX_USER) {
408 err = carl9170_disable_key(ar, i);
409 if (err)
410 goto out;
411 }
412 }
413
414 carl9170_set_state_when(ar, CARL9170_IDLE, CARL9170_STARTED);
415
416 ieee80211_wake_queues(ar->hw);
417 err = 0;
418
419out:
420 mutex_unlock(&ar->mutex);
421 return err;
422}
423
424static void carl9170_cancel_worker(struct ar9170 *ar)
425{
426 cancel_delayed_work_sync(&ar->tx_janitor);
427#ifdef CONFIG_CARL9170_LEDS
428 cancel_delayed_work_sync(&ar->led_work);
429#endif /* CONFIG_CARL9170_LEDS */
430 cancel_work_sync(&ar->ps_work);
431 cancel_work_sync(&ar->ampdu_work);
432}
433
434static void carl9170_op_stop(struct ieee80211_hw *hw)
435{
436 struct ar9170 *ar = hw->priv;
437
438 carl9170_set_state_when(ar, CARL9170_STARTED, CARL9170_IDLE);
439
440 ieee80211_stop_queues(ar->hw);
441
442 mutex_lock(&ar->mutex);
443 if (IS_ACCEPTING_CMD(ar)) {
444 rcu_assign_pointer(ar->beacon_iter, NULL);
445
446 carl9170_led_set_state(ar, 0);
447
448 /* stop DMA */
449 carl9170_write_reg(ar, AR9170_MAC_REG_DMA_TRIGGER, 0);
450 carl9170_usb_stop(ar);
451 }
452
453 carl9170_zap_queues(ar);
454 mutex_unlock(&ar->mutex);
455
456 carl9170_cancel_worker(ar);
457}
458
459static void carl9170_restart_work(struct work_struct *work)
460{
461 struct ar9170 *ar = container_of(work, struct ar9170,
462 restart_work);
463 int err;
464
465 ar->usedkeys = 0;
466 ar->filter_state = 0;
467 carl9170_cancel_worker(ar);
468
469 mutex_lock(&ar->mutex);
470 err = carl9170_usb_restart(ar);
471 if (net_ratelimit()) {
472 if (err) {
473 dev_err(&ar->udev->dev, "Failed to restart device "
474 " (%d).\n", err);
475 } else {
476 dev_info(&ar->udev->dev, "device restarted "
477 "successfully.\n");
478 }
479 }
480
481 carl9170_zap_queues(ar);
482 mutex_unlock(&ar->mutex);
483 if (!err) {
484 ar->restart_counter++;
485 atomic_set(&ar->pending_restarts, 0);
486
487 ieee80211_restart_hw(ar->hw);
488 } else {
489 /*
490 * The reset was unsuccessful and the device seems to
491 * be dead. But there's still one option: a low-level
492 * usb subsystem reset...
493 */
494
495 carl9170_usb_reset(ar);
496 }
497}
498
499void carl9170_restart(struct ar9170 *ar, const enum carl9170_restart_reasons r)
500{
501 carl9170_set_state_when(ar, CARL9170_STARTED, CARL9170_IDLE);
502
503 /*
504 * Sometimes, an error can trigger several different reset events.
505 * By ignoring these *surplus* reset events, the device won't be
506 * killed again, right after it has recovered.
507 */
508 if (atomic_inc_return(&ar->pending_restarts) > 1) {
509 dev_dbg(&ar->udev->dev, "ignoring restart (%d)\n", r);
510 return;
511 }
512
513 ieee80211_stop_queues(ar->hw);
514
515 dev_err(&ar->udev->dev, "restart device (%d)\n", r);
516
517 if (!WARN_ON(r == CARL9170_RR_NO_REASON) ||
518 !WARN_ON(r >= __CARL9170_RR_LAST))
519 ar->last_reason = r;
520
521 if (!ar->registered)
522 return;
523
524 if (IS_ACCEPTING_CMD(ar) && !ar->needs_full_reset)
525 ieee80211_queue_work(ar->hw, &ar->restart_work);
526 else
527 carl9170_usb_reset(ar);
528
529 /*
530 * At this point, the device instance might have vanished/disabled.
531 * So, don't put any code which access the ar9170 struct
532 * without proper protection.
533 */
534}
535
536static int carl9170_init_interface(struct ar9170 *ar,
537 struct ieee80211_vif *vif)
538{
539 struct ath_common *common = &ar->common;
540 int err;
541
542 if (!vif) {
543 WARN_ON_ONCE(IS_STARTED(ar));
544 return 0;
545 }
546
547 memcpy(common->macaddr, vif->addr, ETH_ALEN);
548
549 if (modparam_nohwcrypt ||
550 ((vif->type != NL80211_IFTYPE_STATION) &&
551 (vif->type != NL80211_IFTYPE_AP))) {
552 ar->rx_software_decryption = true;
553 ar->disable_offload = true;
554 }
555
556 err = carl9170_set_operating_mode(ar);
557 return err;
558}
559
560static int carl9170_op_add_interface(struct ieee80211_hw *hw,
561 struct ieee80211_vif *vif)
562{
563 struct carl9170_vif_info *vif_priv = (void *) vif->drv_priv;
564 struct ieee80211_vif *main_vif;
565 struct ar9170 *ar = hw->priv;
566 int vif_id = -1, err = 0;
567
568 mutex_lock(&ar->mutex);
569 rcu_read_lock();
570 if (vif_priv->active) {
571 /*
572 * Skip the interface structure initialization,
573 * if the vif survived the _restart call.
574 */
575 vif_id = vif_priv->id;
576 vif_priv->enable_beacon = false;
577
578 spin_lock_bh(&ar->beacon_lock);
579 dev_kfree_skb_any(vif_priv->beacon);
580 vif_priv->beacon = NULL;
581 spin_unlock_bh(&ar->beacon_lock);
582
583 goto init;
584 }
585
586 main_vif = carl9170_get_main_vif(ar);
587
588 if (main_vif) {
589 switch (main_vif->type) {
590 case NL80211_IFTYPE_STATION:
591 if (vif->type == NL80211_IFTYPE_STATION)
592 break;
593
594 err = -EBUSY;
595 rcu_read_unlock();
596
597 goto unlock;
598
599 case NL80211_IFTYPE_AP:
600 if ((vif->type == NL80211_IFTYPE_STATION) ||
601 (vif->type == NL80211_IFTYPE_WDS) ||
602 (vif->type == NL80211_IFTYPE_AP))
603 break;
604
605 err = -EBUSY;
606 rcu_read_unlock();
607 goto unlock;
608
609 default:
610 rcu_read_unlock();
611 goto unlock;
612 }
613 }
614
615 vif_id = bitmap_find_free_region(&ar->vif_bitmap, ar->fw.vif_num, 0);
616
617 if (vif_id < 0) {
618 rcu_read_unlock();
619
620 err = -ENOSPC;
621 goto unlock;
622 }
623
624 BUG_ON(ar->vif_priv[vif_id].id != vif_id);
625
626 vif_priv->active = true;
627 vif_priv->id = vif_id;
628 vif_priv->enable_beacon = false;
629 ar->vifs++;
630 list_add_tail_rcu(&vif_priv->list, &ar->vif_list);
631 rcu_assign_pointer(ar->vif_priv[vif_id].vif, vif);
632
633init:
634 if (carl9170_get_main_vif(ar) == vif) {
635 rcu_assign_pointer(ar->beacon_iter, vif_priv);
636 rcu_read_unlock();
637
638 err = carl9170_init_interface(ar, vif);
639 if (err)
640 goto unlock;
641 } else {
642 err = carl9170_mod_virtual_mac(ar, vif_id, vif->addr);
643 rcu_read_unlock();
644
645 if (err)
646 goto unlock;
647 }
648
649unlock:
650 if (err && (vif_id != -1)) {
651 vif_priv->active = false;
652 bitmap_release_region(&ar->vif_bitmap, vif_id, 0);
653 ar->vifs--;
654 rcu_assign_pointer(ar->vif_priv[vif_id].vif, NULL);
655 list_del_rcu(&vif_priv->list);
656 mutex_unlock(&ar->mutex);
657 synchronize_rcu();
658 } else {
659 if (ar->vifs > 1)
660 ar->ps.off_override |= PS_OFF_VIF;
661
662 mutex_unlock(&ar->mutex);
663 }
664
665 return err;
666}
667
668static void carl9170_op_remove_interface(struct ieee80211_hw *hw,
669 struct ieee80211_vif *vif)
670{
671 struct carl9170_vif_info *vif_priv = (void *) vif->drv_priv;
672 struct ieee80211_vif *main_vif;
673 struct ar9170 *ar = hw->priv;
674 unsigned int id;
675
676 mutex_lock(&ar->mutex);
677
678 if (WARN_ON_ONCE(!vif_priv->active))
679 goto unlock;
680
681 ar->vifs--;
682
683 rcu_read_lock();
684 main_vif = carl9170_get_main_vif(ar);
685
686 id = vif_priv->id;
687
688 vif_priv->active = false;
689 WARN_ON(vif_priv->enable_beacon);
690 vif_priv->enable_beacon = false;
691 list_del_rcu(&vif_priv->list);
692 rcu_assign_pointer(ar->vif_priv[id].vif, NULL);
693
694 if (vif == main_vif) {
695 rcu_read_unlock();
696
697 if (ar->vifs) {
698 WARN_ON(carl9170_init_interface(ar,
699 carl9170_get_main_vif(ar)));
700 } else {
701 carl9170_set_operating_mode(ar);
702 }
703 } else {
704 rcu_read_unlock();
705
706 WARN_ON(carl9170_mod_virtual_mac(ar, id, NULL));
707 }
708
709 carl9170_update_beacon(ar, false);
710 carl9170_flush_cab(ar, id);
711
712 spin_lock_bh(&ar->beacon_lock);
713 dev_kfree_skb_any(vif_priv->beacon);
714 vif_priv->beacon = NULL;
715 spin_unlock_bh(&ar->beacon_lock);
716
717 bitmap_release_region(&ar->vif_bitmap, id, 0);
718
719 carl9170_set_beacon_timers(ar);
720
721 if (ar->vifs == 1)
722 ar->ps.off_override &= ~PS_OFF_VIF;
723
724unlock:
725 mutex_unlock(&ar->mutex);
726
727 synchronize_rcu();
728}
729
730void carl9170_ps_check(struct ar9170 *ar)
731{
732 ieee80211_queue_work(ar->hw, &ar->ps_work);
733}
734
735/* caller must hold ar->mutex */
736static int carl9170_ps_update(struct ar9170 *ar)
737{
738 bool ps = false;
739 int err = 0;
740
741 if (!ar->ps.off_override)
742 ps = (ar->hw->conf.flags & IEEE80211_CONF_PS);
743
744 if (ps != ar->ps.state) {
745 err = carl9170_powersave(ar, ps);
746 if (err)
747 return err;
748
749 if (ar->ps.state && !ps) {
750 ar->ps.sleep_ms = jiffies_to_msecs(jiffies -
751 ar->ps.last_action);
752 }
753
754 if (ps)
755 ar->ps.last_slept = jiffies;
756
757 ar->ps.last_action = jiffies;
758 ar->ps.state = ps;
759 }
760
761 return 0;
762}
763
764static void carl9170_ps_work(struct work_struct *work)
765{
766 struct ar9170 *ar = container_of(work, struct ar9170,
767 ps_work);
768 mutex_lock(&ar->mutex);
769 if (IS_STARTED(ar))
770 WARN_ON_ONCE(carl9170_ps_update(ar) != 0);
771 mutex_unlock(&ar->mutex);
772}
773
774
775static int carl9170_op_config(struct ieee80211_hw *hw, u32 changed)
776{
777 struct ar9170 *ar = hw->priv;
778 int err = 0;
779
780 mutex_lock(&ar->mutex);
781 if (changed & IEEE80211_CONF_CHANGE_LISTEN_INTERVAL) {
782 /* TODO */
783 err = 0;
784 }
785
786 if (changed & IEEE80211_CONF_CHANGE_PS) {
787 err = carl9170_ps_update(ar);
788 if (err)
789 goto out;
790 }
791
792 if (changed & IEEE80211_CONF_CHANGE_POWER) {
793 /* TODO */
794 err = 0;
795 }
796
797 if (changed & IEEE80211_CONF_CHANGE_SMPS) {
798 /* TODO */
799 err = 0;
800 }
801
802 if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
803 /* adjust slot time for 5 GHz */
804 err = carl9170_set_slot_time(ar);
805 if (err)
806 goto out;
807
808 err = carl9170_set_channel(ar, hw->conf.channel,
809 hw->conf.channel_type, CARL9170_RFI_NONE);
810 if (err)
811 goto out;
812
813 err = carl9170_set_dyn_sifs_ack(ar);
814 if (err)
815 goto out;
816
817 err = carl9170_set_rts_cts_rate(ar);
818 if (err)
819 goto out;
820 }
821
822out:
823 mutex_unlock(&ar->mutex);
824 return err;
825}
826
827static u64 carl9170_op_prepare_multicast(struct ieee80211_hw *hw,
828 struct netdev_hw_addr_list *mc_list)
829{
830 struct netdev_hw_addr *ha;
831 u64 mchash;
832
833 /* always get broadcast frames */
834 mchash = 1ULL << (0xff >> 2);
835
836 netdev_hw_addr_list_for_each(ha, mc_list)
837 mchash |= 1ULL << (ha->addr[5] >> 2);
838
839 return mchash;
840}
841
842static void carl9170_op_configure_filter(struct ieee80211_hw *hw,
843 unsigned int changed_flags,
844 unsigned int *new_flags,
845 u64 multicast)
846{
847 struct ar9170 *ar = hw->priv;
848
849 /* mask supported flags */
850 *new_flags &= FIF_ALLMULTI | ar->rx_filter_caps;
851
852 if (!IS_ACCEPTING_CMD(ar))
853 return;
854
855 mutex_lock(&ar->mutex);
856
857 ar->filter_state = *new_flags;
858 /*
859 * We can support more by setting the sniffer bit and
860 * then checking the error flags, later.
861 */
862
863 if (changed_flags & FIF_ALLMULTI && *new_flags & FIF_ALLMULTI)
864 multicast = ~0ULL;
865
866 if (multicast != ar->cur_mc_hash)
867 WARN_ON(carl9170_update_multicast(ar, multicast));
868
869 if (changed_flags & (FIF_OTHER_BSS | FIF_PROMISC_IN_BSS)) {
870 ar->sniffer_enabled = !!(*new_flags &
871 (FIF_OTHER_BSS | FIF_PROMISC_IN_BSS));
872
873 WARN_ON(carl9170_set_operating_mode(ar));
874 }
875
876 if (ar->fw.rx_filter && changed_flags & ar->rx_filter_caps) {
877 u32 rx_filter = 0;
878
879 if (!(*new_flags & (FIF_FCSFAIL | FIF_PLCPFAIL)))
880 rx_filter |= CARL9170_RX_FILTER_BAD;
881
882 if (!(*new_flags & FIF_CONTROL))
883 rx_filter |= CARL9170_RX_FILTER_CTL_OTHER;
884
885 if (!(*new_flags & FIF_PSPOLL))
886 rx_filter |= CARL9170_RX_FILTER_CTL_PSPOLL;
887
888 if (!(*new_flags & (FIF_OTHER_BSS | FIF_PROMISC_IN_BSS))) {
889 rx_filter |= CARL9170_RX_FILTER_OTHER_RA;
890 rx_filter |= CARL9170_RX_FILTER_DECRY_FAIL;
891 }
892
893 WARN_ON(carl9170_rx_filter(ar, rx_filter));
894 }
895
896 mutex_unlock(&ar->mutex);
897}
898
899
900static void carl9170_op_bss_info_changed(struct ieee80211_hw *hw,
901 struct ieee80211_vif *vif,
902 struct ieee80211_bss_conf *bss_conf,
903 u32 changed)
904{
905 struct ar9170 *ar = hw->priv;
906 struct ath_common *common = &ar->common;
907 int err = 0;
908 struct carl9170_vif_info *vif_priv;
909 struct ieee80211_vif *main_vif;
910
911 mutex_lock(&ar->mutex);
912 vif_priv = (void *) vif->drv_priv;
913 main_vif = carl9170_get_main_vif(ar);
914 if (WARN_ON(!main_vif))
915 goto out;
916
917 if (changed & BSS_CHANGED_BEACON_ENABLED) {
918 struct carl9170_vif_info *iter;
919 int i = 0;
920
921 vif_priv->enable_beacon = bss_conf->enable_beacon;
922 rcu_read_lock();
923 list_for_each_entry_rcu(iter, &ar->vif_list, list) {
924 if (iter->active && iter->enable_beacon)
925 i++;
926
927 }
928 rcu_read_unlock();
929
930 ar->beacon_enabled = i;
931 }
932
933 if (changed & BSS_CHANGED_BEACON) {
934 err = carl9170_update_beacon(ar, false);
935 if (err)
936 goto out;
937 }
938
939 if (changed & (BSS_CHANGED_BEACON_ENABLED | BSS_CHANGED_BEACON |
940 BSS_CHANGED_BEACON_INT)) {
941
942 if (main_vif != vif) {
943 bss_conf->beacon_int = main_vif->bss_conf.beacon_int;
944 bss_conf->dtim_period = main_vif->bss_conf.dtim_period;
945 }
946
947 /*
948 * Therefore a hard limit for the broadcast traffic should
949 * prevent false alarms.
950 */
951 if (vif->type != NL80211_IFTYPE_STATION &&
952 (bss_conf->beacon_int * bss_conf->dtim_period >=
953 (CARL9170_QUEUE_STUCK_TIMEOUT / 2))) {
954 err = -EINVAL;
955 goto out;
956 }
957
958 err = carl9170_set_beacon_timers(ar);
959 if (err)
960 goto out;
961 }
962
963 if (changed & BSS_CHANGED_HT) {
964 /* TODO */
965 err = 0;
966 if (err)
967 goto out;
968 }
969
970 if (main_vif != vif)
971 goto out;
972
973 /*
974 * The following settings can only be changed by the
975 * master interface.
976 */
977
978 if (changed & BSS_CHANGED_BSSID) {
979 memcpy(common->curbssid, bss_conf->bssid, ETH_ALEN);
980 err = carl9170_set_operating_mode(ar);
981 if (err)
982 goto out;
983 }
984
985 if (changed & BSS_CHANGED_ASSOC) {
986 ar->common.curaid = bss_conf->aid;
987 err = carl9170_set_beacon_timers(ar);
988 if (err)
989 goto out;
990 }
991
992 if (changed & BSS_CHANGED_ERP_SLOT) {
993 err = carl9170_set_slot_time(ar);
994 if (err)
995 goto out;
996 }
997
998 if (changed & BSS_CHANGED_BASIC_RATES) {
999 err = carl9170_set_mac_rates(ar);
1000 if (err)
1001 goto out;
1002 }
1003
1004out:
1005 WARN_ON_ONCE(err && IS_STARTED(ar));
1006 mutex_unlock(&ar->mutex);
1007}
1008
1009static u64 carl9170_op_get_tsf(struct ieee80211_hw *hw)
1010{
1011 struct ar9170 *ar = hw->priv;
1012 struct carl9170_tsf_rsp tsf;
1013 int err;
1014
1015 mutex_lock(&ar->mutex);
1016 err = carl9170_exec_cmd(ar, CARL9170_CMD_READ_TSF,
1017 0, NULL, sizeof(tsf), &tsf);
1018 mutex_unlock(&ar->mutex);
1019 if (WARN_ON(err))
1020 return 0;
1021
1022 return le64_to_cpu(tsf.tsf_64);
1023}
1024
1025static int carl9170_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
1026 struct ieee80211_vif *vif,
1027 struct ieee80211_sta *sta,
1028 struct ieee80211_key_conf *key)
1029{
1030 struct ar9170 *ar = hw->priv;
1031 int err = 0, i;
1032 u8 ktype;
1033
1034 if (ar->disable_offload || !vif)
1035 return -EOPNOTSUPP;
1036
1037 /*
1038 * We have to fall back to software encryption, whenever
1039 * the user choose to participates in an IBSS or is connected
1040 * to more than one network.
1041 *
1042 * This is very unfortunate, because some machines cannot handle
1043 * the high througput speed in 802.11n networks.
1044 */
1045
1046 if (!is_main_vif(ar, vif))
1047 goto err_softw;
1048
1049 /*
1050 * While the hardware supports *catch-all* key, for offloading
1051 * group-key en-/de-cryption. The way of how the hardware
1052 * decides which keyId maps to which key, remains a mystery...
1053 */
1054 if ((vif->type != NL80211_IFTYPE_STATION &&
1055 vif->type != NL80211_IFTYPE_ADHOC) &&
1056 !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
1057 return -EOPNOTSUPP;
1058
1059 switch (key->cipher) {
1060 case WLAN_CIPHER_SUITE_WEP40:
1061 ktype = AR9170_ENC_ALG_WEP64;
1062 break;
1063 case WLAN_CIPHER_SUITE_WEP104:
1064 ktype = AR9170_ENC_ALG_WEP128;
1065 break;
1066 case WLAN_CIPHER_SUITE_TKIP:
1067 ktype = AR9170_ENC_ALG_TKIP;
1068 break;
1069 case WLAN_CIPHER_SUITE_CCMP:
1070 ktype = AR9170_ENC_ALG_AESCCMP;
1071 break;
1072 default:
1073 return -EOPNOTSUPP;
1074 }
1075
1076 mutex_lock(&ar->mutex);
1077 if (cmd == SET_KEY) {
1078 if (!IS_STARTED(ar)) {
1079 err = -EOPNOTSUPP;
1080 goto out;
1081 }
1082
1083 if (!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)) {
1084 sta = NULL;
1085
1086 i = 64 + key->keyidx;
1087 } else {
1088 for (i = 0; i < 64; i++)
1089 if (!(ar->usedkeys & BIT(i)))
1090 break;
1091 if (i == 64)
1092 goto err_softw;
1093 }
1094
1095 key->hw_key_idx = i;
1096
1097 err = carl9170_upload_key(ar, i, sta ? sta->addr : NULL,
1098 ktype, 0, key->key,
1099 min_t(u8, 16, key->keylen));
1100 if (err)
1101 goto out;
1102
1103 if (key->cipher == WLAN_CIPHER_SUITE_TKIP) {
1104 err = carl9170_upload_key(ar, i, sta ? sta->addr :
1105 NULL, ktype, 1,
1106 key->key + 16, 16);
1107 if (err)
1108 goto out;
1109
1110 /*
1111 * hardware is not capable generating MMIC
1112 * of fragmented frames!
1113 */
1114 key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
1115 }
1116
1117 if (i < 64)
1118 ar->usedkeys |= BIT(i);
1119
1120 key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
1121 } else {
1122 if (!IS_STARTED(ar)) {
1123 /* The device is gone... together with the key ;-) */
1124 err = 0;
1125 goto out;
1126 }
1127
1128 if (key->hw_key_idx < 64) {
1129 ar->usedkeys &= ~BIT(key->hw_key_idx);
1130 } else {
1131 err = carl9170_upload_key(ar, key->hw_key_idx, NULL,
1132 AR9170_ENC_ALG_NONE, 0,
1133 NULL, 0);
1134 if (err)
1135 goto out;
1136
1137 if (key->cipher == WLAN_CIPHER_SUITE_TKIP) {
1138 err = carl9170_upload_key(ar, key->hw_key_idx,
1139 NULL,
1140 AR9170_ENC_ALG_NONE,
1141 1, NULL, 0);
1142 if (err)
1143 goto out;
1144 }
1145
1146 }
1147
1148 err = carl9170_disable_key(ar, key->hw_key_idx);
1149 if (err)
1150 goto out;
1151 }
1152
1153out:
1154 mutex_unlock(&ar->mutex);
1155 return err;
1156
1157err_softw:
1158 if (!ar->rx_software_decryption) {
1159 ar->rx_software_decryption = true;
1160 carl9170_set_operating_mode(ar);
1161 }
1162 mutex_unlock(&ar->mutex);
1163 return -ENOSPC;
1164}
1165
1166static int carl9170_op_sta_add(struct ieee80211_hw *hw,
1167 struct ieee80211_vif *vif,
1168 struct ieee80211_sta *sta)
1169{
1170 struct carl9170_sta_info *sta_info = (void *) sta->drv_priv;
1171 unsigned int i;
1172
1173 if (sta->ht_cap.ht_supported) {
1174 if (sta->ht_cap.ampdu_density > 6) {
1175 /*
1176 * HW does support 16us AMPDU density.
1177 * No HT-Xmit for station.
1178 */
1179
1180 return 0;
1181 }
1182
1183 for (i = 0; i < CARL9170_NUM_TID; i++)
1184 rcu_assign_pointer(sta_info->agg[i], NULL);
1185
1186 sta_info->ampdu_max_len = 1 << (3 + sta->ht_cap.ampdu_factor);
1187 sta_info->ht_sta = true;
1188 }
1189
1190 return 0;
1191}
1192
1193static int carl9170_op_sta_remove(struct ieee80211_hw *hw,
1194 struct ieee80211_vif *vif,
1195 struct ieee80211_sta *sta)
1196{
1197 struct ar9170 *ar = hw->priv;
1198 struct carl9170_sta_info *sta_info = (void *) sta->drv_priv;
1199 unsigned int i;
1200 bool cleanup = false;
1201
1202 if (sta->ht_cap.ht_supported) {
1203
1204 sta_info->ht_sta = false;
1205
1206 rcu_read_lock();
1207 for (i = 0; i < CARL9170_NUM_TID; i++) {
1208 struct carl9170_sta_tid *tid_info;
1209
1210 tid_info = rcu_dereference(sta_info->agg[i]);
1211 rcu_assign_pointer(sta_info->agg[i], NULL);
1212
1213 if (!tid_info)
1214 continue;
1215
1216 spin_lock_bh(&ar->tx_ampdu_list_lock);
1217 if (tid_info->state > CARL9170_TID_STATE_SHUTDOWN)
1218 tid_info->state = CARL9170_TID_STATE_SHUTDOWN;
1219 spin_unlock_bh(&ar->tx_ampdu_list_lock);
1220 cleanup = true;
1221 }
1222 rcu_read_unlock();
1223
1224 if (cleanup)
1225 carl9170_ampdu_gc(ar);
1226 }
1227
1228 return 0;
1229}
1230
1231static int carl9170_op_conf_tx(struct ieee80211_hw *hw, u16 queue,
1232 const struct ieee80211_tx_queue_params *param)
1233{
1234 struct ar9170 *ar = hw->priv;
1235 int ret;
1236
1237 mutex_lock(&ar->mutex);
1238 if (queue < ar->hw->queues) {
1239 memcpy(&ar->edcf[ar9170_qmap[queue]], param, sizeof(*param));
1240 ret = carl9170_set_qos(ar);
1241 } else {
1242 ret = -EINVAL;
1243 }
1244
1245 mutex_unlock(&ar->mutex);
1246 return ret;
1247}
1248
1249static void carl9170_ampdu_work(struct work_struct *work)
1250{
1251 struct ar9170 *ar = container_of(work, struct ar9170,
1252 ampdu_work);
1253
1254 if (!IS_STARTED(ar))
1255 return;
1256
1257 mutex_lock(&ar->mutex);
1258 carl9170_ampdu_gc(ar);
1259 mutex_unlock(&ar->mutex);
1260}
1261
1262static int carl9170_op_ampdu_action(struct ieee80211_hw *hw,
1263 struct ieee80211_vif *vif,
1264 enum ieee80211_ampdu_mlme_action action,
1265 struct ieee80211_sta *sta,
1266 u16 tid, u16 *ssn)
1267{
1268 struct ar9170 *ar = hw->priv;
1269 struct carl9170_sta_info *sta_info = (void *) sta->drv_priv;
1270 struct carl9170_sta_tid *tid_info;
1271
1272 if (modparam_noht)
1273 return -EOPNOTSUPP;
1274
1275 switch (action) {
1276 case IEEE80211_AMPDU_TX_START:
1277 if (!sta_info->ht_sta)
1278 return -EOPNOTSUPP;
1279
1280 rcu_read_lock();
1281 if (rcu_dereference(sta_info->agg[tid])) {
1282 rcu_read_unlock();
1283 return -EBUSY;
1284 }
1285
1286 tid_info = kzalloc(sizeof(struct carl9170_sta_tid),
1287 GFP_ATOMIC);
1288 if (!tid_info) {
1289 rcu_read_unlock();
1290 return -ENOMEM;
1291 }
1292
1293 tid_info->hsn = tid_info->bsn = tid_info->snx = (*ssn);
1294 tid_info->state = CARL9170_TID_STATE_PROGRESS;
1295 tid_info->tid = tid;
1296 tid_info->max = sta_info->ampdu_max_len;
1297
1298 INIT_LIST_HEAD(&tid_info->list);
1299 INIT_LIST_HEAD(&tid_info->tmp_list);
1300 skb_queue_head_init(&tid_info->queue);
1301 spin_lock_init(&tid_info->lock);
1302
1303 spin_lock_bh(&ar->tx_ampdu_list_lock);
1304 ar->tx_ampdu_list_len++;
1305 list_add_tail_rcu(&tid_info->list, &ar->tx_ampdu_list);
1306 rcu_assign_pointer(sta_info->agg[tid], tid_info);
1307 spin_unlock_bh(&ar->tx_ampdu_list_lock);
1308 rcu_read_unlock();
1309
1310 ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
1311 break;
1312
1313 case IEEE80211_AMPDU_TX_STOP:
1314 rcu_read_lock();
1315 tid_info = rcu_dereference(sta_info->agg[tid]);
1316 if (tid_info) {
1317 spin_lock_bh(&ar->tx_ampdu_list_lock);
1318 if (tid_info->state > CARL9170_TID_STATE_SHUTDOWN)
1319 tid_info->state = CARL9170_TID_STATE_SHUTDOWN;
1320 spin_unlock_bh(&ar->tx_ampdu_list_lock);
1321 }
1322
1323 rcu_assign_pointer(sta_info->agg[tid], NULL);
1324 rcu_read_unlock();
1325
1326 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
1327 ieee80211_queue_work(ar->hw, &ar->ampdu_work);
1328 break;
1329
1330 case IEEE80211_AMPDU_TX_OPERATIONAL:
1331 rcu_read_lock();
1332 tid_info = rcu_dereference(sta_info->agg[tid]);
1333
1334 sta_info->stats[tid].clear = true;
1335
1336 if (tid_info) {
1337 bitmap_zero(tid_info->bitmap, CARL9170_BAW_SIZE);
1338 tid_info->state = CARL9170_TID_STATE_IDLE;
1339 }
1340 rcu_read_unlock();
1341
1342 if (WARN_ON_ONCE(!tid_info))
1343 return -EFAULT;
1344
1345 break;
1346
1347 case IEEE80211_AMPDU_RX_START:
1348 case IEEE80211_AMPDU_RX_STOP:
1349 /* Handled by hardware */
1350 break;
1351
1352 default:
1353 return -EOPNOTSUPP;
1354 }
1355
1356 return 0;
1357}
1358
1359#ifdef CONFIG_CARL9170_WPC
1360static int carl9170_register_wps_button(struct ar9170 *ar)
1361{
1362 struct input_dev *input;
1363 int err;
1364
1365 if (!(ar->features & CARL9170_WPS_BUTTON))
1366 return 0;
1367
1368 input = input_allocate_device();
1369 if (!input)
1370 return -ENOMEM;
1371
1372 snprintf(ar->wps.name, sizeof(ar->wps.name), "%s WPS Button",
1373 wiphy_name(ar->hw->wiphy));
1374
1375 snprintf(ar->wps.phys, sizeof(ar->wps.phys),
1376 "ieee80211/%s/input0", wiphy_name(ar->hw->wiphy));
1377
1378 input->name = ar->wps.name;
1379 input->phys = ar->wps.phys;
1380 input->id.bustype = BUS_USB;
1381 input->dev.parent = &ar->hw->wiphy->dev;
1382
1383 input_set_capability(input, EV_KEY, KEY_WPS_BUTTON);
1384
1385 err = input_register_device(input);
1386 if (err) {
1387 input_free_device(input);
1388 return err;
1389 }
1390
1391 ar->wps.pbc = input;
1392 return 0;
1393}
1394#endif /* CONFIG_CARL9170_WPC */
1395
1396static int carl9170_op_get_survey(struct ieee80211_hw *hw, int idx,
1397 struct survey_info *survey)
1398{
1399 struct ar9170 *ar = hw->priv;
1400 int err;
1401
1402 if (idx != 0)
1403 return -ENOENT;
1404
1405 mutex_lock(&ar->mutex);
1406 err = carl9170_get_noisefloor(ar);
1407 mutex_unlock(&ar->mutex);
1408 if (err)
1409 return err;
1410
1411 survey->channel = ar->channel;
1412 survey->filled = SURVEY_INFO_NOISE_DBM;
1413 survey->noise = ar->noise[0];
1414 return 0;
1415}
1416
1417static void carl9170_op_flush(struct ieee80211_hw *hw, bool drop)
1418{
1419 struct ar9170 *ar = hw->priv;
1420 unsigned int vid;
1421
1422 mutex_lock(&ar->mutex);
1423 for_each_set_bit(vid, &ar->vif_bitmap, ar->fw.vif_num)
1424 carl9170_flush_cab(ar, vid);
1425
1426 carl9170_flush(ar, drop);
1427 mutex_unlock(&ar->mutex);
1428}
1429
1430static int carl9170_op_get_stats(struct ieee80211_hw *hw,
1431 struct ieee80211_low_level_stats *stats)
1432{
1433 struct ar9170 *ar = hw->priv;
1434
1435 memset(stats, 0, sizeof(*stats));
1436 stats->dot11ACKFailureCount = ar->tx_ack_failures;
1437 stats->dot11FCSErrorCount = ar->tx_fcs_errors;
1438 return 0;
1439}
1440
1441static void carl9170_op_sta_notify(struct ieee80211_hw *hw,
1442 struct ieee80211_vif *vif,
1443 enum sta_notify_cmd cmd,
1444 struct ieee80211_sta *sta)
1445{
1446 struct ar9170 *ar = hw->priv;
1447 struct carl9170_sta_info *sta_info = (void *) sta->drv_priv;
1448 struct sk_buff *skb, *tmp;
1449 struct sk_buff_head free;
1450 int i;
1451
1452 switch (cmd) {
1453 case STA_NOTIFY_SLEEP:
1454 /*
1455 * Since the peer is no longer listening, we have to return
1456 * as many SKBs as possible back to the mac80211 stack.
1457 * It will deal with the retry procedure, once the peer
1458 * has become available again.
1459 *
1460 * NB: Ideally, the driver should return the all frames in
1461 * the correct, ascending order. However, I think that this
1462 * functionality should be implemented in the stack and not
1463 * here...
1464 */
1465
1466 __skb_queue_head_init(&free);
1467
1468 if (sta->ht_cap.ht_supported) {
1469 rcu_read_lock();
1470 for (i = 0; i < CARL9170_NUM_TID; i++) {
1471 struct carl9170_sta_tid *tid_info;
1472
1473 tid_info = rcu_dereference(sta_info->agg[i]);
1474
1475 if (!tid_info)
1476 continue;
1477
1478 spin_lock_bh(&ar->tx_ampdu_list_lock);
1479 if (tid_info->state >
1480 CARL9170_TID_STATE_SUSPEND)
1481 tid_info->state =
1482 CARL9170_TID_STATE_SUSPEND;
1483 spin_unlock_bh(&ar->tx_ampdu_list_lock);
1484
1485 spin_lock_bh(&tid_info->lock);
1486 while ((skb = __skb_dequeue(&tid_info->queue)))
1487 __skb_queue_tail(&free, skb);
1488 spin_unlock_bh(&tid_info->lock);
1489 }
1490 rcu_read_unlock();
1491 }
1492
1493 for (i = 0; i < ar->hw->queues; i++) {
1494 spin_lock_bh(&ar->tx_pending[i].lock);
1495 skb_queue_walk_safe(&ar->tx_pending[i], skb, tmp) {
1496 struct _carl9170_tx_superframe *super;
1497 struct ieee80211_hdr *hdr;
1498 struct ieee80211_tx_info *info;
1499
1500 super = (void *) skb->data;
1501 hdr = (void *) super->frame_data;
1502
1503 if (compare_ether_addr(hdr->addr1, sta->addr))
1504 continue;
1505
1506 __skb_unlink(skb, &ar->tx_pending[i]);
1507
1508 info = IEEE80211_SKB_CB(skb);
1509 if (info->flags & IEEE80211_TX_CTL_AMPDU)
1510 atomic_dec(&ar->tx_ampdu_upload);
1511
1512 carl9170_tx_status(ar, skb, false);
1513 }
1514 spin_unlock_bh(&ar->tx_pending[i].lock);
1515 }
1516
1517 while ((skb = __skb_dequeue(&free)))
1518 carl9170_tx_status(ar, skb, false);
1519
1520 break;
1521
1522 case STA_NOTIFY_AWAKE:
1523 if (!sta->ht_cap.ht_supported)
1524 return;
1525
1526 rcu_read_lock();
1527 for (i = 0; i < CARL9170_NUM_TID; i++) {
1528 struct carl9170_sta_tid *tid_info;
1529
1530 tid_info = rcu_dereference(sta_info->agg[i]);
1531
1532 if (!tid_info)
1533 continue;
1534
1535 if ((tid_info->state == CARL9170_TID_STATE_SUSPEND))
1536 tid_info->state = CARL9170_TID_STATE_IDLE;
1537 }
1538 rcu_read_unlock();
1539 break;
1540 }
1541}
1542
1543static const struct ieee80211_ops carl9170_ops = {
1544 .start = carl9170_op_start,
1545 .stop = carl9170_op_stop,
1546 .tx = carl9170_op_tx,
1547 .flush = carl9170_op_flush,
1548 .add_interface = carl9170_op_add_interface,
1549 .remove_interface = carl9170_op_remove_interface,
1550 .config = carl9170_op_config,
1551 .prepare_multicast = carl9170_op_prepare_multicast,
1552 .configure_filter = carl9170_op_configure_filter,
1553 .conf_tx = carl9170_op_conf_tx,
1554 .bss_info_changed = carl9170_op_bss_info_changed,
1555 .get_tsf = carl9170_op_get_tsf,
1556 .set_key = carl9170_op_set_key,
1557 .sta_add = carl9170_op_sta_add,
1558 .sta_remove = carl9170_op_sta_remove,
1559 .sta_notify = carl9170_op_sta_notify,
1560 .get_survey = carl9170_op_get_survey,
1561 .get_stats = carl9170_op_get_stats,
1562 .ampdu_action = carl9170_op_ampdu_action,
1563};
1564
1565void *carl9170_alloc(size_t priv_size)
1566{
1567 struct ieee80211_hw *hw;
1568 struct ar9170 *ar;
1569 struct sk_buff *skb;
1570 int i;
1571
1572 /*
1573 * this buffer is used for rx stream reconstruction.
1574 * Under heavy load this device (or the transport layer?)
1575 * tends to split the streams into separate rx descriptors.
1576 */
1577
1578 skb = __dev_alloc_skb(AR9170_RX_STREAM_MAX_SIZE, GFP_KERNEL);
1579 if (!skb)
1580 goto err_nomem;
1581
1582 hw = ieee80211_alloc_hw(priv_size, &carl9170_ops);
1583 if (!hw)
1584 goto err_nomem;
1585
1586 ar = hw->priv;
1587 ar->hw = hw;
1588 ar->rx_failover = skb;
1589
1590 memset(&ar->rx_plcp, 0, sizeof(struct ar9170_rx_head));
1591 ar->rx_has_plcp = false;
1592
1593 /*
1594 * Here's a hidden pitfall!
1595 *
1596 * All 4 AC queues work perfectly well under _legacy_ operation.
1597 * However as soon as aggregation is enabled, the traffic flow
1598 * gets very bumpy. Therefore we have to _switch_ to a
1599 * software AC with a single HW queue.
1600 */
1601 hw->queues = __AR9170_NUM_TXQ;
1602
1603 mutex_init(&ar->mutex);
1604 spin_lock_init(&ar->beacon_lock);
1605 spin_lock_init(&ar->cmd_lock);
1606 spin_lock_init(&ar->tx_stats_lock);
1607 spin_lock_init(&ar->tx_ampdu_list_lock);
1608 spin_lock_init(&ar->mem_lock);
1609 spin_lock_init(&ar->state_lock);
1610 atomic_set(&ar->pending_restarts, 0);
1611 ar->vifs = 0;
1612 for (i = 0; i < ar->hw->queues; i++) {
1613 skb_queue_head_init(&ar->tx_status[i]);
1614 skb_queue_head_init(&ar->tx_pending[i]);
1615 }
1616 INIT_WORK(&ar->ps_work, carl9170_ps_work);
1617 INIT_WORK(&ar->restart_work, carl9170_restart_work);
1618 INIT_WORK(&ar->ampdu_work, carl9170_ampdu_work);
1619 INIT_DELAYED_WORK(&ar->tx_janitor, carl9170_tx_janitor);
1620 INIT_LIST_HEAD(&ar->tx_ampdu_list);
1621 rcu_assign_pointer(ar->tx_ampdu_iter,
1622 (struct carl9170_sta_tid *) &ar->tx_ampdu_list);
1623
1624 bitmap_zero(&ar->vif_bitmap, ar->fw.vif_num);
1625 INIT_LIST_HEAD(&ar->vif_list);
1626 init_completion(&ar->tx_flush);
1627
1628 /*
1629 * Note:
1630 * IBSS/ADHOC and AP mode are only enabled, if the firmware
1631 * supports these modes. The code which will add the
1632 * additional interface_modes is in fw.c.
1633 */
1634 hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION);
1635
1636 hw->flags |= IEEE80211_HW_RX_INCLUDES_FCS |
1637 IEEE80211_HW_REPORTS_TX_ACK_STATUS |
1638 IEEE80211_HW_SUPPORTS_PS |
1639 IEEE80211_HW_PS_NULLFUNC_STACK |
1640 IEEE80211_HW_SIGNAL_DBM;
1641
1642 if (!modparam_noht) {
1643 /*
1644 * see the comment above, why we allow the user
1645 * to disable HT by a module parameter.
1646 */
1647 hw->flags |= IEEE80211_HW_AMPDU_AGGREGATION;
1648 }
1649
1650 hw->extra_tx_headroom = sizeof(struct _carl9170_tx_superframe);
1651 hw->sta_data_size = sizeof(struct carl9170_sta_info);
1652 hw->vif_data_size = sizeof(struct carl9170_vif_info);
1653
1654 hw->max_rates = CARL9170_TX_MAX_RATES;
1655 hw->max_rate_tries = CARL9170_TX_USER_RATE_TRIES;
1656
1657 for (i = 0; i < ARRAY_SIZE(ar->noise); i++)
1658 ar->noise[i] = -95; /* ATH_DEFAULT_NOISE_FLOOR */
1659
1660 hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
1661 return ar;
1662
1663err_nomem:
1664 kfree_skb(skb);
1665 return ERR_PTR(-ENOMEM);
1666}
1667
1668static int carl9170_read_eeprom(struct ar9170 *ar)
1669{
1670#define RW 8 /* number of words to read at once */
1671#define RB (sizeof(u32) * RW)
1672 u8 *eeprom = (void *)&ar->eeprom;
1673 __le32 offsets[RW];
1674 int i, j, err;
1675
1676 BUILD_BUG_ON(sizeof(ar->eeprom) & 3);
1677
1678 BUILD_BUG_ON(RB > CARL9170_MAX_CMD_LEN - 4);
1679#ifndef __CHECKER__
1680 /* don't want to handle trailing remains */
1681 BUILD_BUG_ON(sizeof(ar->eeprom) % RB);
1682#endif
1683
1684 for (i = 0; i < sizeof(ar->eeprom)/RB; i++) {
1685 for (j = 0; j < RW; j++)
1686 offsets[j] = cpu_to_le32(AR9170_EEPROM_START +
1687 RB * i + 4 * j);
1688
1689 err = carl9170_exec_cmd(ar, CARL9170_CMD_RREG,
1690 RB, (u8 *) &offsets,
1691 RB, eeprom + RB * i);
1692 if (err)
1693 return err;
1694 }
1695
1696#undef RW
1697#undef RB
1698 return 0;
1699}
1700
1701static int carl9170_parse_eeprom(struct ar9170 *ar)
1702{
1703 struct ath_regulatory *regulatory = &ar->common.regulatory;
1704 unsigned int rx_streams, tx_streams, tx_params = 0;
1705 int bands = 0;
1706
1707 if (ar->eeprom.length == cpu_to_le16(0xffff))
1708 return -ENODATA;
1709
1710 rx_streams = hweight8(ar->eeprom.rx_mask);
1711 tx_streams = hweight8(ar->eeprom.tx_mask);
1712
1713 if (rx_streams != tx_streams) {
1714 tx_params = IEEE80211_HT_MCS_TX_RX_DIFF;
1715
1716 WARN_ON(!(tx_streams >= 1 && tx_streams <=
1717 IEEE80211_HT_MCS_TX_MAX_STREAMS));
1718
1719 tx_params = (tx_streams - 1) <<
1720 IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT;
1721
1722 carl9170_band_2GHz.ht_cap.mcs.tx_params |= tx_params;
1723 carl9170_band_5GHz.ht_cap.mcs.tx_params |= tx_params;
1724 }
1725
1726 if (ar->eeprom.operating_flags & AR9170_OPFLAG_2GHZ) {
1727 ar->hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
1728 &carl9170_band_2GHz;
1729 bands++;
1730 }
1731 if (ar->eeprom.operating_flags & AR9170_OPFLAG_5GHZ) {
1732 ar->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
1733 &carl9170_band_5GHz;
1734 bands++;
1735 }
1736
1737 /*
1738 * I measured this, a bandswitch takes roughly
1739 * 135 ms and a frequency switch about 80.
1740 *
1741 * FIXME: measure these values again once EEPROM settings
1742 * are used, that will influence them!
1743 */
1744 if (bands == 2)
1745 ar->hw->channel_change_time = 135 * 1000;
1746 else
1747 ar->hw->channel_change_time = 80 * 1000;
1748
1749 regulatory->current_rd = le16_to_cpu(ar->eeprom.reg_domain[0]);
1750 regulatory->current_rd_ext = le16_to_cpu(ar->eeprom.reg_domain[1]);
1751
1752 /* second part of wiphy init */
1753 SET_IEEE80211_PERM_ADDR(ar->hw, ar->eeprom.mac_address);
1754
1755 return bands ? 0 : -EINVAL;
1756}
1757
1758static int carl9170_reg_notifier(struct wiphy *wiphy,
1759 struct regulatory_request *request)
1760{
1761 struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
1762 struct ar9170 *ar = hw->priv;
1763
1764 return ath_reg_notifier_apply(wiphy, request, &ar->common.regulatory);
1765}
1766
1767int carl9170_register(struct ar9170 *ar)
1768{
1769 struct ath_regulatory *regulatory = &ar->common.regulatory;
1770 int err = 0, i;
1771
1772 if (WARN_ON(ar->mem_bitmap))
1773 return -EINVAL;
1774
1775 ar->mem_bitmap = kzalloc(roundup(ar->fw.mem_blocks, BITS_PER_LONG) *
1776 sizeof(unsigned long), GFP_KERNEL);
1777
1778 if (!ar->mem_bitmap)
1779 return -ENOMEM;
1780
1781 /* try to read EEPROM, init MAC addr */
1782 err = carl9170_read_eeprom(ar);
1783 if (err)
1784 return err;
1785
1786 err = carl9170_fw_fix_eeprom(ar);
1787 if (err)
1788 return err;
1789
1790 err = carl9170_parse_eeprom(ar);
1791 if (err)
1792 return err;
1793
1794 err = ath_regd_init(regulatory, ar->hw->wiphy,
1795 carl9170_reg_notifier);
1796 if (err)
1797 return err;
1798
1799 if (modparam_noht) {
1800 carl9170_band_2GHz.ht_cap.ht_supported = false;
1801 carl9170_band_5GHz.ht_cap.ht_supported = false;
1802 }
1803
1804 for (i = 0; i < ar->fw.vif_num; i++) {
1805 ar->vif_priv[i].id = i;
1806 ar->vif_priv[i].vif = NULL;
1807 }
1808
1809 err = ieee80211_register_hw(ar->hw);
1810 if (err)
1811 return err;
1812
1813 /* mac80211 interface is now registered */
1814 ar->registered = true;
1815
1816 if (!ath_is_world_regd(regulatory))
1817 regulatory_hint(ar->hw->wiphy, regulatory->alpha2);
1818
1819#ifdef CONFIG_CARL9170_DEBUGFS
1820 carl9170_debugfs_register(ar);
1821#endif /* CONFIG_CARL9170_DEBUGFS */
1822
1823 err = carl9170_led_init(ar);
1824 if (err)
1825 goto err_unreg;
1826
1827#ifdef CONFIG_CARL9170_LEDS
1828 err = carl9170_led_register(ar);
1829 if (err)
1830 goto err_unreg;
1831#endif /* CONFIG_CAR9L170_LEDS */
1832
1833#ifdef CONFIG_CARL9170_WPC
1834 err = carl9170_register_wps_button(ar);
1835 if (err)
1836 goto err_unreg;
1837#endif /* CONFIG_CARL9170_WPC */
1838
1839 dev_info(&ar->udev->dev, "Atheros AR9170 is registered as '%s'\n",
1840 wiphy_name(ar->hw->wiphy));
1841
1842 return 0;
1843
1844err_unreg:
1845 carl9170_unregister(ar);
1846 return err;
1847}
1848
1849void carl9170_unregister(struct ar9170 *ar)
1850{
1851 if (!ar->registered)
1852 return;
1853
1854 ar->registered = false;
1855
1856#ifdef CONFIG_CARL9170_LEDS
1857 carl9170_led_unregister(ar);
1858#endif /* CONFIG_CARL9170_LEDS */
1859
1860#ifdef CONFIG_CARL9170_DEBUGFS
1861 carl9170_debugfs_unregister(ar);
1862#endif /* CONFIG_CARL9170_DEBUGFS */
1863
1864#ifdef CONFIG_CARL9170_WPC
1865 if (ar->wps.pbc) {
1866 input_unregister_device(ar->wps.pbc);
1867 ar->wps.pbc = NULL;
1868 }
1869#endif /* CONFIG_CARL9170_WPC */
1870
1871 carl9170_cancel_worker(ar);
1872 cancel_work_sync(&ar->restart_work);
1873
1874 ieee80211_unregister_hw(ar->hw);
1875}
1876
1877void carl9170_free(struct ar9170 *ar)
1878{
1879 WARN_ON(ar->registered);
1880 WARN_ON(IS_INITIALIZED(ar));
1881
1882 kfree_skb(ar->rx_failover);
1883 ar->rx_failover = NULL;
1884
1885 kfree(ar->mem_bitmap);
1886 ar->mem_bitmap = NULL;
1887
1888 mutex_destroy(&ar->mutex);
1889
1890 ieee80211_free_hw(ar->hw);
1891}
diff --git a/drivers/net/wireless/ath/carl9170/phy.c b/drivers/net/wireless/ath/carl9170/phy.c
new file mode 100644
index 000000000000..89deca37a988
--- /dev/null
+++ b/drivers/net/wireless/ath/carl9170/phy.c
@@ -0,0 +1,1810 @@
1/*
2 * Atheros CARL9170 driver
3 *
4 * PHY and RF code
5 *
6 * Copyright 2008, Johannes Berg <johannes@sipsolutions.net>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; see the file COPYING. If not, see
20 * http://www.gnu.org/licenses/.
21 *
22 * This file incorporates work covered by the following copyright and
23 * permission notice:
24 * Copyright (c) 2007-2008 Atheros Communications, Inc.
25 *
26 * Permission to use, copy, modify, and/or distribute this software for any
27 * purpose with or without fee is hereby granted, provided that the above
28 * copyright notice and this permission notice appear in all copies.
29 *
30 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
31 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
32 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
33 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
34 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
35 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
36 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
37 */
38
39#include <linux/bitrev.h>
40#include "carl9170.h"
41#include "cmd.h"
42#include "phy.h"
43
44static int carl9170_init_power_cal(struct ar9170 *ar)
45{
46 carl9170_regwrite_begin(ar);
47
48 carl9170_regwrite(AR9170_PHY_REG_POWER_TX_RATE_MAX, 0x7f);
49 carl9170_regwrite(AR9170_PHY_REG_POWER_TX_RATE1, 0x3f3f3f3f);
50 carl9170_regwrite(AR9170_PHY_REG_POWER_TX_RATE2, 0x3f3f3f3f);
51 carl9170_regwrite(AR9170_PHY_REG_POWER_TX_RATE3, 0x3f3f3f3f);
52 carl9170_regwrite(AR9170_PHY_REG_POWER_TX_RATE4, 0x3f3f3f3f);
53 carl9170_regwrite(AR9170_PHY_REG_POWER_TX_RATE5, 0x3f3f3f3f);
54 carl9170_regwrite(AR9170_PHY_REG_POWER_TX_RATE6, 0x3f3f3f3f);
55 carl9170_regwrite(AR9170_PHY_REG_POWER_TX_RATE7, 0x3f3f3f3f);
56 carl9170_regwrite(AR9170_PHY_REG_POWER_TX_RATE8, 0x3f3f3f3f);
57 carl9170_regwrite(AR9170_PHY_REG_POWER_TX_RATE9, 0x3f3f3f3f);
58
59 carl9170_regwrite_finish();
60 return carl9170_regwrite_result();
61}
62
63struct carl9170_phy_init {
64 u32 reg, _5ghz_20, _5ghz_40, _2ghz_40, _2ghz_20;
65};
66
67static struct carl9170_phy_init ar5416_phy_init[] = {
68 { 0x1c5800, 0x00000007, 0x00000007, 0x00000007, 0x00000007, },
69 { 0x1c5804, 0x00000300, 0x000003c4, 0x000003c4, 0x00000300, },
70 { 0x1c5808, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
71 { 0x1c580c, 0xad848e19, 0xad848e19, 0xad848e19, 0xad848e19, },
72 { 0x1c5810, 0x7d14e000, 0x7d14e000, 0x7d14e000, 0x7d14e000, },
73 { 0x1c5814, 0x9c0a9f6b, 0x9c0a9f6b, 0x9c0a9f6b, 0x9c0a9f6b, },
74 { 0x1c5818, 0x00000090, 0x00000090, 0x00000090, 0x00000090, },
75 { 0x1c581c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
76 { 0x1c5820, 0x02020200, 0x02020200, 0x02020200, 0x02020200, },
77 { 0x1c5824, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, },
78 { 0x1c5828, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001, },
79 { 0x1c582c, 0x0000a000, 0x0000a000, 0x0000a000, 0x0000a000, },
80 { 0x1c5830, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
81 { 0x1c5834, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, },
82 { 0x1c5838, 0x00000007, 0x00000007, 0x00000007, 0x00000007, },
83 { 0x1c583c, 0x00200400, 0x00200400, 0x00200400, 0x00200400, },
84 { 0x1c5840, 0x206a002e, 0x206a002e, 0x206a002e, 0x206a002e, },
85 { 0x1c5844, 0x1372161e, 0x13721c1e, 0x13721c24, 0x137216a4, },
86 { 0x1c5848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, },
87 { 0x1c584c, 0x1284233c, 0x1284233c, 0x1284233c, 0x1284233c, },
88 { 0x1c5850, 0x6c48b4e4, 0x6d48b4e4, 0x6d48b0e4, 0x6c48b0e4, },
89 { 0x1c5854, 0x00000859, 0x00000859, 0x00000859, 0x00000859, },
90 { 0x1c5858, 0x7ec80d2e, 0x7ec80d2e, 0x7ec80d2e, 0x7ec80d2e, },
91 { 0x1c585c, 0x31395c5e, 0x3139605e, 0x3139605e, 0x31395c5e, },
92 { 0x1c5860, 0x0004dd10, 0x0004dd10, 0x0004dd20, 0x0004dd20, },
93 { 0x1c5864, 0x0001c600, 0x0001c600, 0x0001c600, 0x0001c600, },
94 { 0x1c5868, 0x409a4190, 0x409a4190, 0x409a4190, 0x409a4190, },
95 { 0x1c586c, 0x050cb081, 0x050cb081, 0x050cb081, 0x050cb081, },
96 { 0x1c5900, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
97 { 0x1c5904, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
98 { 0x1c5908, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
99 { 0x1c590c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
100 { 0x1c5914, 0x000007d0, 0x000007d0, 0x00000898, 0x00000898, },
101 { 0x1c5918, 0x00000118, 0x00000230, 0x00000268, 0x00000134, },
102 { 0x1c591c, 0x10000fff, 0x10000fff, 0x10000fff, 0x10000fff, },
103 { 0x1c5920, 0x0510081c, 0x0510081c, 0x0510001c, 0x0510001c, },
104 { 0x1c5924, 0xd0058a15, 0xd0058a15, 0xd0058a15, 0xd0058a15, },
105 { 0x1c5928, 0x00000001, 0x00000001, 0x00000001, 0x00000001, },
106 { 0x1c592c, 0x00000004, 0x00000004, 0x00000004, 0x00000004, },
107 { 0x1c5934, 0x3f3f3f3f, 0x3f3f3f3f, 0x3f3f3f3f, 0x3f3f3f3f, },
108 { 0x1c5938, 0x3f3f3f3f, 0x3f3f3f3f, 0x3f3f3f3f, 0x3f3f3f3f, },
109 { 0x1c593c, 0x0000007f, 0x0000007f, 0x0000007f, 0x0000007f, },
110 { 0x1c5944, 0xdfb81020, 0xdfb81020, 0xdfb81020, 0xdfb81020, },
111 { 0x1c5948, 0x9280b212, 0x9280b212, 0x9280b212, 0x9280b212, },
112 { 0x1c594c, 0x00020028, 0x00020028, 0x00020028, 0x00020028, },
113 { 0x1c5954, 0x5d50e188, 0x5d50e188, 0x5d50e188, 0x5d50e188, },
114 { 0x1c5958, 0x00081fff, 0x00081fff, 0x00081fff, 0x00081fff, },
115 { 0x1c5960, 0x00009b40, 0x00009b40, 0x00009b40, 0x00009b40, },
116 { 0x1c5964, 0x00001120, 0x00001120, 0x00001120, 0x00001120, },
117 { 0x1c5970, 0x190fb515, 0x190fb515, 0x190fb515, 0x190fb515, },
118 { 0x1c5974, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
119 { 0x1c5978, 0x00000001, 0x00000001, 0x00000001, 0x00000001, },
120 { 0x1c597c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
121 { 0x1c5980, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
122 { 0x1c5984, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
123 { 0x1c5988, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
124 { 0x1c598c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
125 { 0x1c5990, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
126 { 0x1c5994, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
127 { 0x1c5998, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
128 { 0x1c599c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
129 { 0x1c59a0, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
130 { 0x1c59a4, 0x00000007, 0x00000007, 0x00000007, 0x00000007, },
131 { 0x1c59a8, 0x001fff00, 0x001fff00, 0x001fff00, 0x001fff00, },
132 { 0x1c59ac, 0x006f00c4, 0x006f00c4, 0x006f00c4, 0x006f00c4, },
133 { 0x1c59b0, 0x03051000, 0x03051000, 0x03051000, 0x03051000, },
134 { 0x1c59b4, 0x00000820, 0x00000820, 0x00000820, 0x00000820, },
135 { 0x1c59bc, 0x00181400, 0x00181400, 0x00181400, 0x00181400, },
136 { 0x1c59c0, 0x038919be, 0x038919be, 0x038919be, 0x038919be, },
137 { 0x1c59c4, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77, },
138 { 0x1c59c8, 0x6af6532c, 0x6af6532c, 0x6af6532c, 0x6af6532c, },
139 { 0x1c59cc, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8, },
140 { 0x1c59d0, 0x00046384, 0x00046384, 0x00046384, 0x00046384, },
141 { 0x1c59d4, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
142 { 0x1c59d8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
143 { 0x1c59dc, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
144 { 0x1c59e0, 0x00000200, 0x00000200, 0x00000200, 0x00000200, },
145 { 0x1c59e4, 0x64646464, 0x64646464, 0x64646464, 0x64646464, },
146 { 0x1c59e8, 0x3c787878, 0x3c787878, 0x3c787878, 0x3c787878, },
147 { 0x1c59ec, 0x000000aa, 0x000000aa, 0x000000aa, 0x000000aa, },
148 { 0x1c59f0, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
149 { 0x1c59fc, 0x00001042, 0x00001042, 0x00001042, 0x00001042, },
150 { 0x1c5a00, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
151 { 0x1c5a04, 0x00000040, 0x00000040, 0x00000040, 0x00000040, },
152 { 0x1c5a08, 0x00000080, 0x00000080, 0x00000080, 0x00000080, },
153 { 0x1c5a0c, 0x000001a1, 0x000001a1, 0x00000141, 0x00000141, },
154 { 0x1c5a10, 0x000001e1, 0x000001e1, 0x00000181, 0x00000181, },
155 { 0x1c5a14, 0x00000021, 0x00000021, 0x000001c1, 0x000001c1, },
156 { 0x1c5a18, 0x00000061, 0x00000061, 0x00000001, 0x00000001, },
157 { 0x1c5a1c, 0x00000168, 0x00000168, 0x00000041, 0x00000041, },
158 { 0x1c5a20, 0x000001a8, 0x000001a8, 0x000001a8, 0x000001a8, },
159 { 0x1c5a24, 0x000001e8, 0x000001e8, 0x000001e8, 0x000001e8, },
160 { 0x1c5a28, 0x00000028, 0x00000028, 0x00000028, 0x00000028, },
161 { 0x1c5a2c, 0x00000068, 0x00000068, 0x00000068, 0x00000068, },
162 { 0x1c5a30, 0x00000189, 0x00000189, 0x000000a8, 0x000000a8, },
163 { 0x1c5a34, 0x000001c9, 0x000001c9, 0x00000169, 0x00000169, },
164 { 0x1c5a38, 0x00000009, 0x00000009, 0x000001a9, 0x000001a9, },
165 { 0x1c5a3c, 0x00000049, 0x00000049, 0x000001e9, 0x000001e9, },
166 { 0x1c5a40, 0x00000089, 0x00000089, 0x00000029, 0x00000029, },
167 { 0x1c5a44, 0x00000170, 0x00000170, 0x00000069, 0x00000069, },
168 { 0x1c5a48, 0x000001b0, 0x000001b0, 0x00000190, 0x00000190, },
169 { 0x1c5a4c, 0x000001f0, 0x000001f0, 0x000001d0, 0x000001d0, },
170 { 0x1c5a50, 0x00000030, 0x00000030, 0x00000010, 0x00000010, },
171 { 0x1c5a54, 0x00000070, 0x00000070, 0x00000050, 0x00000050, },
172 { 0x1c5a58, 0x00000191, 0x00000191, 0x00000090, 0x00000090, },
173 { 0x1c5a5c, 0x000001d1, 0x000001d1, 0x00000151, 0x00000151, },
174 { 0x1c5a60, 0x00000011, 0x00000011, 0x00000191, 0x00000191, },
175 { 0x1c5a64, 0x00000051, 0x00000051, 0x000001d1, 0x000001d1, },
176 { 0x1c5a68, 0x00000091, 0x00000091, 0x00000011, 0x00000011, },
177 { 0x1c5a6c, 0x000001b8, 0x000001b8, 0x00000051, 0x00000051, },
178 { 0x1c5a70, 0x000001f8, 0x000001f8, 0x00000198, 0x00000198, },
179 { 0x1c5a74, 0x00000038, 0x00000038, 0x000001d8, 0x000001d8, },
180 { 0x1c5a78, 0x00000078, 0x00000078, 0x00000018, 0x00000018, },
181 { 0x1c5a7c, 0x00000199, 0x00000199, 0x00000058, 0x00000058, },
182 { 0x1c5a80, 0x000001d9, 0x000001d9, 0x00000098, 0x00000098, },
183 { 0x1c5a84, 0x00000019, 0x00000019, 0x00000159, 0x00000159, },
184 { 0x1c5a88, 0x00000059, 0x00000059, 0x00000199, 0x00000199, },
185 { 0x1c5a8c, 0x00000099, 0x00000099, 0x000001d9, 0x000001d9, },
186 { 0x1c5a90, 0x000000d9, 0x000000d9, 0x00000019, 0x00000019, },
187 { 0x1c5a94, 0x000000f9, 0x000000f9, 0x00000059, 0x00000059, },
188 { 0x1c5a98, 0x000000f9, 0x000000f9, 0x00000099, 0x00000099, },
189 { 0x1c5a9c, 0x000000f9, 0x000000f9, 0x000000d9, 0x000000d9, },
190 { 0x1c5aa0, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, },
191 { 0x1c5aa4, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, },
192 { 0x1c5aa8, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, },
193 { 0x1c5aac, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, },
194 { 0x1c5ab0, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, },
195 { 0x1c5ab4, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, },
196 { 0x1c5ab8, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, },
197 { 0x1c5abc, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, },
198 { 0x1c5ac0, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, },
199 { 0x1c5ac4, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, },
200 { 0x1c5ac8, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, },
201 { 0x1c5acc, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, },
202 { 0x1c5ad0, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, },
203 { 0x1c5ad4, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, },
204 { 0x1c5ad8, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, },
205 { 0x1c5adc, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, },
206 { 0x1c5ae0, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, },
207 { 0x1c5ae4, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, },
208 { 0x1c5ae8, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, },
209 { 0x1c5aec, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, },
210 { 0x1c5af0, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, },
211 { 0x1c5af4, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, },
212 { 0x1c5af8, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, },
213 { 0x1c5afc, 0x000000f9, 0x000000f9, 0x000000f9, 0x000000f9, },
214 { 0x1c5b00, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
215 { 0x1c5b04, 0x00000001, 0x00000001, 0x00000001, 0x00000001, },
216 { 0x1c5b08, 0x00000002, 0x00000002, 0x00000002, 0x00000002, },
217 { 0x1c5b0c, 0x00000003, 0x00000003, 0x00000003, 0x00000003, },
218 { 0x1c5b10, 0x00000004, 0x00000004, 0x00000004, 0x00000004, },
219 { 0x1c5b14, 0x00000005, 0x00000005, 0x00000005, 0x00000005, },
220 { 0x1c5b18, 0x00000008, 0x00000008, 0x00000008, 0x00000008, },
221 { 0x1c5b1c, 0x00000009, 0x00000009, 0x00000009, 0x00000009, },
222 { 0x1c5b20, 0x0000000a, 0x0000000a, 0x0000000a, 0x0000000a, },
223 { 0x1c5b24, 0x0000000b, 0x0000000b, 0x0000000b, 0x0000000b, },
224 { 0x1c5b28, 0x0000000c, 0x0000000c, 0x0000000c, 0x0000000c, },
225 { 0x1c5b2c, 0x0000000d, 0x0000000d, 0x0000000d, 0x0000000d, },
226 { 0x1c5b30, 0x00000010, 0x00000010, 0x00000010, 0x00000010, },
227 { 0x1c5b34, 0x00000011, 0x00000011, 0x00000011, 0x00000011, },
228 { 0x1c5b38, 0x00000012, 0x00000012, 0x00000012, 0x00000012, },
229 { 0x1c5b3c, 0x00000013, 0x00000013, 0x00000013, 0x00000013, },
230 { 0x1c5b40, 0x00000014, 0x00000014, 0x00000014, 0x00000014, },
231 { 0x1c5b44, 0x00000015, 0x00000015, 0x00000015, 0x00000015, },
232 { 0x1c5b48, 0x00000018, 0x00000018, 0x00000018, 0x00000018, },
233 { 0x1c5b4c, 0x00000019, 0x00000019, 0x00000019, 0x00000019, },
234 { 0x1c5b50, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a, },
235 { 0x1c5b54, 0x0000001b, 0x0000001b, 0x0000001b, 0x0000001b, },
236 { 0x1c5b58, 0x0000001c, 0x0000001c, 0x0000001c, 0x0000001c, },
237 { 0x1c5b5c, 0x0000001d, 0x0000001d, 0x0000001d, 0x0000001d, },
238 { 0x1c5b60, 0x00000020, 0x00000020, 0x00000020, 0x00000020, },
239 { 0x1c5b64, 0x00000021, 0x00000021, 0x00000021, 0x00000021, },
240 { 0x1c5b68, 0x00000022, 0x00000022, 0x00000022, 0x00000022, },
241 { 0x1c5b6c, 0x00000023, 0x00000023, 0x00000023, 0x00000023, },
242 { 0x1c5b70, 0x00000024, 0x00000024, 0x00000024, 0x00000024, },
243 { 0x1c5b74, 0x00000025, 0x00000025, 0x00000025, 0x00000025, },
244 { 0x1c5b78, 0x00000028, 0x00000028, 0x00000028, 0x00000028, },
245 { 0x1c5b7c, 0x00000029, 0x00000029, 0x00000029, 0x00000029, },
246 { 0x1c5b80, 0x0000002a, 0x0000002a, 0x0000002a, 0x0000002a, },
247 { 0x1c5b84, 0x0000002b, 0x0000002b, 0x0000002b, 0x0000002b, },
248 { 0x1c5b88, 0x0000002c, 0x0000002c, 0x0000002c, 0x0000002c, },
249 { 0x1c5b8c, 0x0000002d, 0x0000002d, 0x0000002d, 0x0000002d, },
250 { 0x1c5b90, 0x00000030, 0x00000030, 0x00000030, 0x00000030, },
251 { 0x1c5b94, 0x00000031, 0x00000031, 0x00000031, 0x00000031, },
252 { 0x1c5b98, 0x00000032, 0x00000032, 0x00000032, 0x00000032, },
253 { 0x1c5b9c, 0x00000033, 0x00000033, 0x00000033, 0x00000033, },
254 { 0x1c5ba0, 0x00000034, 0x00000034, 0x00000034, 0x00000034, },
255 { 0x1c5ba4, 0x00000035, 0x00000035, 0x00000035, 0x00000035, },
256 { 0x1c5ba8, 0x00000035, 0x00000035, 0x00000035, 0x00000035, },
257 { 0x1c5bac, 0x00000035, 0x00000035, 0x00000035, 0x00000035, },
258 { 0x1c5bb0, 0x00000035, 0x00000035, 0x00000035, 0x00000035, },
259 { 0x1c5bb4, 0x00000035, 0x00000035, 0x00000035, 0x00000035, },
260 { 0x1c5bb8, 0x00000035, 0x00000035, 0x00000035, 0x00000035, },
261 { 0x1c5bbc, 0x00000035, 0x00000035, 0x00000035, 0x00000035, },
262 { 0x1c5bc0, 0x00000035, 0x00000035, 0x00000035, 0x00000035, },
263 { 0x1c5bc4, 0x00000035, 0x00000035, 0x00000035, 0x00000035, },
264 { 0x1c5bc8, 0x00000035, 0x00000035, 0x00000035, 0x00000035, },
265 { 0x1c5bcc, 0x00000035, 0x00000035, 0x00000035, 0x00000035, },
266 { 0x1c5bd0, 0x00000035, 0x00000035, 0x00000035, 0x00000035, },
267 { 0x1c5bd4, 0x00000035, 0x00000035, 0x00000035, 0x00000035, },
268 { 0x1c5bd8, 0x00000035, 0x00000035, 0x00000035, 0x00000035, },
269 { 0x1c5bdc, 0x00000035, 0x00000035, 0x00000035, 0x00000035, },
270 { 0x1c5be0, 0x00000035, 0x00000035, 0x00000035, 0x00000035, },
271 { 0x1c5be4, 0x00000035, 0x00000035, 0x00000035, 0x00000035, },
272 { 0x1c5be8, 0x00000035, 0x00000035, 0x00000035, 0x00000035, },
273 { 0x1c5bec, 0x00000035, 0x00000035, 0x00000035, 0x00000035, },
274 { 0x1c5bf0, 0x00000035, 0x00000035, 0x00000035, 0x00000035, },
275 { 0x1c5bf4, 0x00000035, 0x00000035, 0x00000035, 0x00000035, },
276 { 0x1c5bf8, 0x00000010, 0x00000010, 0x00000010, 0x00000010, },
277 { 0x1c5bfc, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a, },
278 { 0x1c5c00, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
279 { 0x1c5c0c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
280 { 0x1c5c10, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
281 { 0x1c5c14, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
282 { 0x1c5c18, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
283 { 0x1c5c1c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
284 { 0x1c5c20, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
285 { 0x1c5c24, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
286 { 0x1c5c28, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
287 { 0x1c5c2c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
288 { 0x1c5c30, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
289 { 0x1c5c34, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
290 { 0x1c5c38, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
291 { 0x1c5c3c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
292 { 0x1c5cf0, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
293 { 0x1c5cf4, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
294 { 0x1c5cf8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
295 { 0x1c5cfc, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
296 { 0x1c6200, 0x00000008, 0x00000008, 0x0000000e, 0x0000000e, },
297 { 0x1c6204, 0x00000440, 0x00000440, 0x00000440, 0x00000440, },
298 { 0x1c6208, 0xd6be4788, 0xd6be4788, 0xd03e4788, 0xd03e4788, },
299 { 0x1c620c, 0x012e8160, 0x012e8160, 0x012a8160, 0x012a8160, },
300 { 0x1c6210, 0x40806333, 0x40806333, 0x40806333, 0x40806333, },
301 { 0x1c6214, 0x00106c10, 0x00106c10, 0x00106c10, 0x00106c10, },
302 { 0x1c6218, 0x009c4060, 0x009c4060, 0x009c4060, 0x009c4060, },
303 { 0x1c621c, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a, },
304 { 0x1c6220, 0x018830c6, 0x018830c6, 0x018830c6, 0x018830c6, },
305 { 0x1c6224, 0x00000400, 0x00000400, 0x00000400, 0x00000400, },
306 { 0x1c6228, 0x000009b5, 0x000009b5, 0x000009b5, 0x000009b5, },
307 { 0x1c622c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
308 { 0x1c6230, 0x00000108, 0x00000210, 0x00000210, 0x00000108, },
309 { 0x1c6234, 0x3f3f3f3f, 0x3f3f3f3f, 0x3f3f3f3f, 0x3f3f3f3f, },
310 { 0x1c6238, 0x3f3f3f3f, 0x3f3f3f3f, 0x3f3f3f3f, 0x3f3f3f3f, },
311 { 0x1c623c, 0x13c889af, 0x13c889af, 0x13c889af, 0x13c889af, },
312 { 0x1c6240, 0x38490a20, 0x38490a20, 0x38490a20, 0x38490a20, },
313 { 0x1c6244, 0x00007bb6, 0x00007bb6, 0x00007bb6, 0x00007bb6, },
314 { 0x1c6248, 0x0fff3ffc, 0x0fff3ffc, 0x0fff3ffc, 0x0fff3ffc, },
315 { 0x1c624c, 0x00000001, 0x00000001, 0x00000001, 0x00000001, },
316 { 0x1c6250, 0x0000a000, 0x0000a000, 0x0000a000, 0x0000a000, },
317 { 0x1c6254, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
318 { 0x1c6258, 0x0cc75380, 0x0cc75380, 0x0cc75380, 0x0cc75380, },
319 { 0x1c625c, 0x0f0f0f01, 0x0f0f0f01, 0x0f0f0f01, 0x0f0f0f01, },
320 { 0x1c6260, 0xdfa91f01, 0xdfa91f01, 0xdfa91f01, 0xdfa91f01, },
321 { 0x1c6264, 0x00418a11, 0x00418a11, 0x00418a11, 0x00418a11, },
322 { 0x1c6268, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
323 { 0x1c626c, 0x09249126, 0x09249126, 0x09249126, 0x09249126, },
324 { 0x1c6274, 0x0a1a9caa, 0x0a1a9caa, 0x0a1a7caa, 0x0a1a7caa, },
325 { 0x1c6278, 0x1ce739ce, 0x1ce739ce, 0x1ce739ce, 0x1ce739ce, },
326 { 0x1c627c, 0x051701ce, 0x051701ce, 0x051701ce, 0x051701ce, },
327 { 0x1c6300, 0x18010000, 0x18010000, 0x18010000, 0x18010000, },
328 { 0x1c6304, 0x30032602, 0x30032602, 0x2e032402, 0x2e032402, },
329 { 0x1c6308, 0x48073e06, 0x48073e06, 0x4a0a3c06, 0x4a0a3c06, },
330 { 0x1c630c, 0x560b4c0a, 0x560b4c0a, 0x621a540b, 0x621a540b, },
331 { 0x1c6310, 0x641a600f, 0x641a600f, 0x764f6c1b, 0x764f6c1b, },
332 { 0x1c6314, 0x7a4f6e1b, 0x7a4f6e1b, 0x845b7a5a, 0x845b7a5a, },
333 { 0x1c6318, 0x8c5b7e5a, 0x8c5b7e5a, 0x950f8ccf, 0x950f8ccf, },
334 { 0x1c631c, 0x9d0f96cf, 0x9d0f96cf, 0xa5cf9b4f, 0xa5cf9b4f, },
335 { 0x1c6320, 0xb51fa69f, 0xb51fa69f, 0xbddfaf1f, 0xbddfaf1f, },
336 { 0x1c6324, 0xcb3fbd07, 0xcb3fbcbf, 0xd1ffc93f, 0xd1ffc93f, },
337 { 0x1c6328, 0x0000d7bf, 0x0000d7bf, 0x00000000, 0x00000000, },
338 { 0x1c632c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
339 { 0x1c6330, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
340 { 0x1c6334, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
341 { 0x1c6338, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
342 { 0x1c633c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
343 { 0x1c6340, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
344 { 0x1c6344, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
345 { 0x1c6348, 0x3fffffff, 0x3fffffff, 0x3fffffff, 0x3fffffff, },
346 { 0x1c634c, 0x3fffffff, 0x3fffffff, 0x3fffffff, 0x3fffffff, },
347 { 0x1c6350, 0x3fffffff, 0x3fffffff, 0x3fffffff, 0x3fffffff, },
348 { 0x1c6354, 0x0003ffff, 0x0003ffff, 0x0003ffff, 0x0003ffff, },
349 { 0x1c6358, 0x79a8aa1f, 0x79a8aa1f, 0x79a8aa1f, 0x79a8aa1f, },
350 { 0x1c6388, 0x08000000, 0x08000000, 0x08000000, 0x08000000, },
351 { 0x1c638c, 0x3f3f3f3f, 0x3f3f3f3f, 0x3f3f3f3f, 0x3f3f3f3f, },
352 { 0x1c6390, 0x3f3f3f3f, 0x3f3f3f3f, 0x3f3f3f3f, 0x3f3f3f3f, },
353 { 0x1c6394, 0x1ce739ce, 0x1ce739ce, 0x1ce739ce, 0x1ce739ce, },
354 { 0x1c6398, 0x000001ce, 0x000001ce, 0x000001ce, 0x000001ce, },
355 { 0x1c639c, 0x00000007, 0x00000007, 0x00000007, 0x00000007, },
356 { 0x1c63a0, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
357 { 0x1c63a4, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
358 { 0x1c63a8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
359 { 0x1c63ac, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
360 { 0x1c63b0, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
361 { 0x1c63b4, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
362 { 0x1c63b8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
363 { 0x1c63bc, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
364 { 0x1c63c0, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
365 { 0x1c63c4, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
366 { 0x1c63c8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
367 { 0x1c63cc, 0x3f3f3f3f, 0x3f3f3f3f, 0x3f3f3f3f, 0x3f3f3f3f, },
368 { 0x1c63d0, 0x3f3f3f3f, 0x3f3f3f3f, 0x3f3f3f3f, 0x3f3f3f3f, },
369 { 0x1c63d4, 0x3f3f3f3f, 0x3f3f3f3f, 0x3f3f3f3f, 0x3f3f3f3f, },
370 { 0x1c63d8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, },
371 { 0x1c63dc, 0x1ce739ce, 0x1ce739ce, 0x1ce739ce, 0x1ce739ce, },
372 { 0x1c63e0, 0x000000c0, 0x000000c0, 0x000000c0, 0x000000c0, },
373 { 0x1c6848, 0x00180a65, 0x00180a65, 0x00180a68, 0x00180a68, },
374 { 0x1c6920, 0x0510001c, 0x0510001c, 0x0510001c, 0x0510001c, },
375 { 0x1c6960, 0x00009b40, 0x00009b40, 0x00009b40, 0x00009b40, },
376 { 0x1c720c, 0x012e8160, 0x012e8160, 0x012a8160, 0x012a8160, },
377 { 0x1c726c, 0x09249126, 0x09249126, 0x09249126, 0x09249126, },
378 { 0x1c7848, 0x00180a65, 0x00180a65, 0x00180a68, 0x00180a68, },
379 { 0x1c7920, 0x0510001c, 0x0510001c, 0x0510001c, 0x0510001c, },
380 { 0x1c7960, 0x00009b40, 0x00009b40, 0x00009b40, 0x00009b40, },
381 { 0x1c820c, 0x012e8160, 0x012e8160, 0x012a8160, 0x012a8160, },
382 { 0x1c826c, 0x09249126, 0x09249126, 0x09249126, 0x09249126, },
383/* { 0x1c8864, 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00, }, */
384 { 0x1c8864, 0x0001c600, 0x0001c600, 0x0001c600, 0x0001c600, },
385 { 0x1c895c, 0x004b6a8e, 0x004b6a8e, 0x004b6a8e, 0x004b6a8e, },
386 { 0x1c8968, 0x000003ce, 0x000003ce, 0x000003ce, 0x000003ce, },
387 { 0x1c89bc, 0x00181400, 0x00181400, 0x00181400, 0x00181400, },
388 { 0x1c9270, 0x00820820, 0x00820820, 0x00820820, 0x00820820, },
389 { 0x1c935c, 0x066c420f, 0x066c420f, 0x066c420f, 0x066c420f, },
390 { 0x1c9360, 0x0f282207, 0x0f282207, 0x0f282207, 0x0f282207, },
391 { 0x1c9364, 0x17601685, 0x17601685, 0x17601685, 0x17601685, },
392 { 0x1c9368, 0x1f801104, 0x1f801104, 0x1f801104, 0x1f801104, },
393 { 0x1c936c, 0x37a00c03, 0x37a00c03, 0x37a00c03, 0x37a00c03, },
394 { 0x1c9370, 0x3fc40883, 0x3fc40883, 0x3fc40883, 0x3fc40883, },
395 { 0x1c9374, 0x57c00803, 0x57c00803, 0x57c00803, 0x57c00803, },
396 { 0x1c9378, 0x5fd80682, 0x5fd80682, 0x5fd80682, 0x5fd80682, },
397 { 0x1c937c, 0x7fe00482, 0x7fe00482, 0x7fe00482, 0x7fe00482, },
398 { 0x1c9380, 0x7f3c7bba, 0x7f3c7bba, 0x7f3c7bba, 0x7f3c7bba, },
399 { 0x1c9384, 0xf3307ff0, 0xf3307ff0, 0xf3307ff0, 0xf3307ff0, }
400};
401
402/*
403 * look up a certain register in ar5416_phy_init[] and return the init. value
404 * for the band and bandwidth given. Return 0 if register address not found.
405 */
406static u32 carl9170_def_val(u32 reg, bool is_2ghz, bool is_40mhz)
407{
408 unsigned int i;
409 for (i = 0; i < ARRAY_SIZE(ar5416_phy_init); i++) {
410 if (ar5416_phy_init[i].reg != reg)
411 continue;
412
413 if (is_2ghz) {
414 if (is_40mhz)
415 return ar5416_phy_init[i]._2ghz_40;
416 else
417 return ar5416_phy_init[i]._2ghz_20;
418 } else {
419 if (is_40mhz)
420 return ar5416_phy_init[i]._5ghz_40;
421 else
422 return ar5416_phy_init[i]._5ghz_20;
423 }
424 }
425 return 0;
426}
427
428/*
429 * initialize some phy regs from eeprom values in modal_header[]
430 * acc. to band and bandwith
431 */
432static int carl9170_init_phy_from_eeprom(struct ar9170 *ar,
433 bool is_2ghz, bool is_40mhz)
434{
435 static const u8 xpd2pd[16] = {
436 0x2, 0x2, 0x2, 0x1, 0x2, 0x2, 0x6, 0x2,
437 0x2, 0x3, 0x7, 0x2, 0xb, 0x2, 0x2, 0x2
438 };
439 /* pointer to the modal_header acc. to band */
440 struct ar9170_eeprom_modal *m = &ar->eeprom.modal_header[is_2ghz];
441 u32 val;
442
443 carl9170_regwrite_begin(ar);
444
445 /* ant common control (index 0) */
446 carl9170_regwrite(AR9170_PHY_REG_SWITCH_COM,
447 le32_to_cpu(m->antCtrlCommon));
448
449 /* ant control chain 0 (index 1) */
450 carl9170_regwrite(AR9170_PHY_REG_SWITCH_CHAIN_0,
451 le32_to_cpu(m->antCtrlChain[0]));
452
453 /* ant control chain 2 (index 2) */
454 carl9170_regwrite(AR9170_PHY_REG_SWITCH_CHAIN_2,
455 le32_to_cpu(m->antCtrlChain[1]));
456
457 /* SwSettle (index 3) */
458 if (!is_40mhz) {
459 val = carl9170_def_val(AR9170_PHY_REG_SETTLING,
460 is_2ghz, is_40mhz);
461 SET_VAL(AR9170_PHY_SETTLING_SWITCH, val, m->switchSettling);
462 carl9170_regwrite(AR9170_PHY_REG_SETTLING, val);
463 }
464
465 /* adcDesired, pdaDesired (index 4) */
466 val = carl9170_def_val(AR9170_PHY_REG_DESIRED_SZ, is_2ghz, is_40mhz);
467 SET_VAL(AR9170_PHY_DESIRED_SZ_PGA, val, m->pgaDesiredSize);
468 SET_VAL(AR9170_PHY_DESIRED_SZ_ADC, val, m->adcDesiredSize);
469 carl9170_regwrite(AR9170_PHY_REG_DESIRED_SZ, val);
470
471 /* TxEndToXpaOff, TxFrameToXpaOn (index 5) */
472 val = carl9170_def_val(AR9170_PHY_REG_RF_CTL4, is_2ghz, is_40mhz);
473 SET_VAL(AR9170_PHY_RF_CTL4_TX_END_XPAB_OFF, val, m->txEndToXpaOff);
474 SET_VAL(AR9170_PHY_RF_CTL4_TX_END_XPAA_OFF, val, m->txEndToXpaOff);
475 SET_VAL(AR9170_PHY_RF_CTL4_FRAME_XPAB_ON, val, m->txFrameToXpaOn);
476 SET_VAL(AR9170_PHY_RF_CTL4_FRAME_XPAA_ON, val, m->txFrameToXpaOn);
477 carl9170_regwrite(AR9170_PHY_REG_RF_CTL4, val);
478
479 /* TxEndToRxOn (index 6) */
480 val = carl9170_def_val(AR9170_PHY_REG_RF_CTL3, is_2ghz, is_40mhz);
481 SET_VAL(AR9170_PHY_RF_CTL3_TX_END_TO_A2_RX_ON, val, m->txEndToRxOn);
482 carl9170_regwrite(AR9170_PHY_REG_RF_CTL3, val);
483
484 /* thresh62 (index 7) */
485 val = carl9170_def_val(0x1c8864, is_2ghz, is_40mhz);
486 val = (val & ~0x7f000) | (m->thresh62 << 12);
487 carl9170_regwrite(0x1c8864, val);
488
489 /* tx/rx attenuation chain 0 (index 8) */
490 val = carl9170_def_val(AR9170_PHY_REG_RXGAIN, is_2ghz, is_40mhz);
491 SET_VAL(AR9170_PHY_RXGAIN_TXRX_ATTEN, val, m->txRxAttenCh[0]);
492 carl9170_regwrite(AR9170_PHY_REG_RXGAIN, val);
493
494 /* tx/rx attenuation chain 2 (index 9) */
495 val = carl9170_def_val(AR9170_PHY_REG_RXGAIN_CHAIN_2,
496 is_2ghz, is_40mhz);
497 SET_VAL(AR9170_PHY_RXGAIN_TXRX_ATTEN, val, m->txRxAttenCh[1]);
498 carl9170_regwrite(AR9170_PHY_REG_RXGAIN_CHAIN_2, val);
499
500 /* tx/rx margin chain 0 (index 10) */
501 val = carl9170_def_val(AR9170_PHY_REG_GAIN_2GHZ, is_2ghz, is_40mhz);
502 SET_VAL(AR9170_PHY_GAIN_2GHZ_RXTX_MARGIN, val, m->rxTxMarginCh[0]);
503 /* bsw margin chain 0 for 5GHz only */
504 if (!is_2ghz)
505 SET_VAL(AR9170_PHY_GAIN_2GHZ_BSW_MARGIN, val, m->bswMargin[0]);
506 carl9170_regwrite(AR9170_PHY_REG_GAIN_2GHZ, val);
507
508 /* tx/rx margin chain 2 (index 11) */
509 val = carl9170_def_val(AR9170_PHY_REG_GAIN_2GHZ_CHAIN_2,
510 is_2ghz, is_40mhz);
511 SET_VAL(AR9170_PHY_GAIN_2GHZ_RXTX_MARGIN, val, m->rxTxMarginCh[1]);
512 carl9170_regwrite(AR9170_PHY_REG_GAIN_2GHZ_CHAIN_2, val);
513
514 /* iqCall, iqCallq chain 0 (index 12) */
515 val = carl9170_def_val(AR9170_PHY_REG_TIMING_CTRL4(0),
516 is_2ghz, is_40mhz);
517 SET_VAL(AR9170_PHY_TIMING_CTRL4_IQCORR_Q_I_COFF, val, m->iqCalICh[0]);
518 SET_VAL(AR9170_PHY_TIMING_CTRL4_IQCORR_Q_Q_COFF, val, m->iqCalQCh[0]);
519 carl9170_regwrite(AR9170_PHY_REG_TIMING_CTRL4(0), val);
520
521 /* iqCall, iqCallq chain 2 (index 13) */
522 val = carl9170_def_val(AR9170_PHY_REG_TIMING_CTRL4(2),
523 is_2ghz, is_40mhz);
524 SET_VAL(AR9170_PHY_TIMING_CTRL4_IQCORR_Q_I_COFF, val, m->iqCalICh[1]);
525 SET_VAL(AR9170_PHY_TIMING_CTRL4_IQCORR_Q_Q_COFF, val, m->iqCalQCh[1]);
526 carl9170_regwrite(AR9170_PHY_REG_TIMING_CTRL4(2), val);
527
528 /* xpd gain mask (index 14) */
529 val = carl9170_def_val(AR9170_PHY_REG_TPCRG1, is_2ghz, is_40mhz);
530 SET_VAL(AR9170_PHY_TPCRG1_PD_GAIN_1, val,
531 xpd2pd[m->xpdGain & 0xf] & 3);
532 SET_VAL(AR9170_PHY_TPCRG1_PD_GAIN_2, val,
533 xpd2pd[m->xpdGain & 0xf] >> 2);
534 carl9170_regwrite(AR9170_PHY_REG_TPCRG1, val);
535
536 carl9170_regwrite(AR9170_PHY_REG_RX_CHAINMASK, ar->eeprom.rx_mask);
537 carl9170_regwrite(AR9170_PHY_REG_CAL_CHAINMASK, ar->eeprom.rx_mask);
538
539 carl9170_regwrite_finish();
540 return carl9170_regwrite_result();
541}
542
543static int carl9170_init_phy(struct ar9170 *ar, enum ieee80211_band band)
544{
545 int i, err;
546 u32 val;
547 bool is_2ghz = band == IEEE80211_BAND_2GHZ;
548 bool is_40mhz = conf_is_ht40(&ar->hw->conf);
549
550 carl9170_regwrite_begin(ar);
551
552 for (i = 0; i < ARRAY_SIZE(ar5416_phy_init); i++) {
553 if (is_40mhz) {
554 if (is_2ghz)
555 val = ar5416_phy_init[i]._2ghz_40;
556 else
557 val = ar5416_phy_init[i]._5ghz_40;
558 } else {
559 if (is_2ghz)
560 val = ar5416_phy_init[i]._2ghz_20;
561 else
562 val = ar5416_phy_init[i]._5ghz_20;
563 }
564
565 carl9170_regwrite(ar5416_phy_init[i].reg, val);
566 }
567
568 carl9170_regwrite_finish();
569 err = carl9170_regwrite_result();
570 if (err)
571 return err;
572
573 err = carl9170_init_phy_from_eeprom(ar, is_2ghz, is_40mhz);
574 if (err)
575 return err;
576
577 err = carl9170_init_power_cal(ar);
578 if (err)
579 return err;
580
581 /* XXX: remove magic! */
582 if (is_2ghz)
583 err = carl9170_write_reg(ar, AR9170_PWR_REG_PLL_ADDAC, 0x5163);
584 else
585 err = carl9170_write_reg(ar, AR9170_PWR_REG_PLL_ADDAC, 0x5143);
586
587 return err;
588}
589
590struct carl9170_rf_initvals {
591 u32 reg, _5ghz, _2ghz;
592};
593
594static struct carl9170_rf_initvals carl9170_rf_initval[] = {
595 /* bank 0 */
596 { 0x1c58b0, 0x1e5795e5, 0x1e5795e5},
597 { 0x1c58e0, 0x02008020, 0x02008020},
598 /* bank 1 */
599 { 0x1c58b0, 0x02108421, 0x02108421},
600 { 0x1c58ec, 0x00000008, 0x00000008},
601 /* bank 2 */
602 { 0x1c58b0, 0x0e73ff17, 0x0e73ff17},
603 { 0x1c58e0, 0x00000420, 0x00000420},
604 /* bank 3 */
605 { 0x1c58f0, 0x01400018, 0x01c00018},
606 /* bank 4 */
607 { 0x1c58b0, 0x000001a1, 0x000001a1},
608 { 0x1c58e8, 0x00000001, 0x00000001},
609 /* bank 5 */
610 { 0x1c58b0, 0x00000013, 0x00000013},
611 { 0x1c58e4, 0x00000002, 0x00000002},
612 /* bank 6 */
613 { 0x1c58b0, 0x00000000, 0x00000000},
614 { 0x1c58b0, 0x00000000, 0x00000000},
615 { 0x1c58b0, 0x00000000, 0x00000000},
616 { 0x1c58b0, 0x00000000, 0x00000000},
617 { 0x1c58b0, 0x00000000, 0x00000000},
618 { 0x1c58b0, 0x00004000, 0x00004000},
619 { 0x1c58b0, 0x00006c00, 0x00006c00},
620 { 0x1c58b0, 0x00002c00, 0x00002c00},
621 { 0x1c58b0, 0x00004800, 0x00004800},
622 { 0x1c58b0, 0x00004000, 0x00004000},
623 { 0x1c58b0, 0x00006000, 0x00006000},
624 { 0x1c58b0, 0x00001000, 0x00001000},
625 { 0x1c58b0, 0x00004000, 0x00004000},
626 { 0x1c58b0, 0x00007c00, 0x00007c00},
627 { 0x1c58b0, 0x00007c00, 0x00007c00},
628 { 0x1c58b0, 0x00007c00, 0x00007c00},
629 { 0x1c58b0, 0x00007c00, 0x00007c00},
630 { 0x1c58b0, 0x00007c00, 0x00007c00},
631 { 0x1c58b0, 0x00087c00, 0x00087c00},
632 { 0x1c58b0, 0x00007c00, 0x00007c00},
633 { 0x1c58b0, 0x00005400, 0x00005400},
634 { 0x1c58b0, 0x00000c00, 0x00000c00},
635 { 0x1c58b0, 0x00001800, 0x00001800},
636 { 0x1c58b0, 0x00007c00, 0x00007c00},
637 { 0x1c58b0, 0x00006c00, 0x00006c00},
638 { 0x1c58b0, 0x00006c00, 0x00006c00},
639 { 0x1c58b0, 0x00007c00, 0x00007c00},
640 { 0x1c58b0, 0x00002c00, 0x00002c00},
641 { 0x1c58b0, 0x00003c00, 0x00003c00},
642 { 0x1c58b0, 0x00003800, 0x00003800},
643 { 0x1c58b0, 0x00001c00, 0x00001c00},
644 { 0x1c58b0, 0x00000800, 0x00000800},
645 { 0x1c58b0, 0x00000408, 0x00000408},
646 { 0x1c58b0, 0x00004c15, 0x00004c15},
647 { 0x1c58b0, 0x00004188, 0x00004188},
648 { 0x1c58b0, 0x0000201e, 0x0000201e},
649 { 0x1c58b0, 0x00010408, 0x00010408},
650 { 0x1c58b0, 0x00000801, 0x00000801},
651 { 0x1c58b0, 0x00000c08, 0x00000c08},
652 { 0x1c58b0, 0x0000181e, 0x0000181e},
653 { 0x1c58b0, 0x00001016, 0x00001016},
654 { 0x1c58b0, 0x00002800, 0x00002800},
655 { 0x1c58b0, 0x00004010, 0x00004010},
656 { 0x1c58b0, 0x0000081c, 0x0000081c},
657 { 0x1c58b0, 0x00000115, 0x00000115},
658 { 0x1c58b0, 0x00000015, 0x00000015},
659 { 0x1c58b0, 0x00000066, 0x00000066},
660 { 0x1c58b0, 0x0000001c, 0x0000001c},
661 { 0x1c58b0, 0x00000000, 0x00000000},
662 { 0x1c58b0, 0x00000004, 0x00000004},
663 { 0x1c58b0, 0x00000015, 0x00000015},
664 { 0x1c58b0, 0x0000001f, 0x0000001f},
665 { 0x1c58e0, 0x00000000, 0x00000400},
666 /* bank 7 */
667 { 0x1c58b0, 0x000000a0, 0x000000a0},
668 { 0x1c58b0, 0x00000000, 0x00000000},
669 { 0x1c58b0, 0x00000040, 0x00000040},
670 { 0x1c58f0, 0x0000001c, 0x0000001c},
671};
672
673static int carl9170_init_rf_banks_0_7(struct ar9170 *ar, bool band5ghz)
674{
675 int err, i;
676
677 carl9170_regwrite_begin(ar);
678
679 for (i = 0; i < ARRAY_SIZE(carl9170_rf_initval); i++)
680 carl9170_regwrite(carl9170_rf_initval[i].reg,
681 band5ghz ? carl9170_rf_initval[i]._5ghz
682 : carl9170_rf_initval[i]._2ghz);
683
684 carl9170_regwrite_finish();
685 err = carl9170_regwrite_result();
686 if (err)
687 wiphy_err(ar->hw->wiphy, "rf init failed\n");
688
689 return err;
690}
691
692struct carl9170_phy_freq_params {
693 u8 coeff_exp;
694 u16 coeff_man;
695 u8 coeff_exp_shgi;
696 u16 coeff_man_shgi;
697};
698
699enum carl9170_bw {
700 CARL9170_BW_20,
701 CARL9170_BW_40_BELOW,
702 CARL9170_BW_40_ABOVE,
703
704 __CARL9170_NUM_BW,
705};
706
707struct carl9170_phy_freq_entry {
708 u16 freq;
709 struct carl9170_phy_freq_params params[__CARL9170_NUM_BW];
710};
711
712/* NB: must be in sync with channel tables in main! */
713static const struct carl9170_phy_freq_entry carl9170_phy_freq_params[] = {
714/*
715 * freq,
716 * 20MHz,
717 * 40MHz (below),
718 * 40Mhz (above),
719 */
720 { 2412, {
721 { 3, 21737, 3, 19563, },
722 { 3, 21827, 3, 19644, },
723 { 3, 21647, 3, 19482, },
724 } },
725 { 2417, {
726 { 3, 21692, 3, 19523, },
727 { 3, 21782, 3, 19604, },
728 { 3, 21602, 3, 19442, },
729 } },
730 { 2422, {
731 { 3, 21647, 3, 19482, },
732 { 3, 21737, 3, 19563, },
733 { 3, 21558, 3, 19402, },
734 } },
735 { 2427, {
736 { 3, 21602, 3, 19442, },
737 { 3, 21692, 3, 19523, },
738 { 3, 21514, 3, 19362, },
739 } },
740 { 2432, {
741 { 3, 21558, 3, 19402, },
742 { 3, 21647, 3, 19482, },
743 { 3, 21470, 3, 19323, },
744 } },
745 { 2437, {
746 { 3, 21514, 3, 19362, },
747 { 3, 21602, 3, 19442, },
748 { 3, 21426, 3, 19283, },
749 } },
750 { 2442, {
751 { 3, 21470, 3, 19323, },
752 { 3, 21558, 3, 19402, },
753 { 3, 21382, 3, 19244, },
754 } },
755 { 2447, {
756 { 3, 21426, 3, 19283, },
757 { 3, 21514, 3, 19362, },
758 { 3, 21339, 3, 19205, },
759 } },
760 { 2452, {
761 { 3, 21382, 3, 19244, },
762 { 3, 21470, 3, 19323, },
763 { 3, 21295, 3, 19166, },
764 } },
765 { 2457, {
766 { 3, 21339, 3, 19205, },
767 { 3, 21426, 3, 19283, },
768 { 3, 21252, 3, 19127, },
769 } },
770 { 2462, {
771 { 3, 21295, 3, 19166, },
772 { 3, 21382, 3, 19244, },
773 { 3, 21209, 3, 19088, },
774 } },
775 { 2467, {
776 { 3, 21252, 3, 19127, },
777 { 3, 21339, 3, 19205, },
778 { 3, 21166, 3, 19050, },
779 } },
780 { 2472, {
781 { 3, 21209, 3, 19088, },
782 { 3, 21295, 3, 19166, },
783 { 3, 21124, 3, 19011, },
784 } },
785 { 2484, {
786 { 3, 21107, 3, 18996, },
787 { 3, 21192, 3, 19073, },
788 { 3, 21022, 3, 18920, },
789 } },
790 { 4920, {
791 { 4, 21313, 4, 19181, },
792 { 4, 21356, 4, 19220, },
793 { 4, 21269, 4, 19142, },
794 } },
795 { 4940, {
796 { 4, 21226, 4, 19104, },
797 { 4, 21269, 4, 19142, },
798 { 4, 21183, 4, 19065, },
799 } },
800 { 4960, {
801 { 4, 21141, 4, 19027, },
802 { 4, 21183, 4, 19065, },
803 { 4, 21098, 4, 18988, },
804 } },
805 { 4980, {
806 { 4, 21056, 4, 18950, },
807 { 4, 21098, 4, 18988, },
808 { 4, 21014, 4, 18912, },
809 } },
810 { 5040, {
811 { 4, 20805, 4, 18725, },
812 { 4, 20846, 4, 18762, },
813 { 4, 20764, 4, 18687, },
814 } },
815 { 5060, {
816 { 4, 20723, 4, 18651, },
817 { 4, 20764, 4, 18687, },
818 { 4, 20682, 4, 18614, },
819 } },
820 { 5080, {
821 { 4, 20641, 4, 18577, },
822 { 4, 20682, 4, 18614, },
823 { 4, 20601, 4, 18541, },
824 } },
825 { 5180, {
826 { 4, 20243, 4, 18219, },
827 { 4, 20282, 4, 18254, },
828 { 4, 20204, 4, 18183, },
829 } },
830 { 5200, {
831 { 4, 20165, 4, 18148, },
832 { 4, 20204, 4, 18183, },
833 { 4, 20126, 4, 18114, },
834 } },
835 { 5220, {
836 { 4, 20088, 4, 18079, },
837 { 4, 20126, 4, 18114, },
838 { 4, 20049, 4, 18044, },
839 } },
840 { 5240, {
841 { 4, 20011, 4, 18010, },
842 { 4, 20049, 4, 18044, },
843 { 4, 19973, 4, 17976, },
844 } },
845 { 5260, {
846 { 4, 19935, 4, 17941, },
847 { 4, 19973, 4, 17976, },
848 { 4, 19897, 4, 17907, },
849 } },
850 { 5280, {
851 { 4, 19859, 4, 17873, },
852 { 4, 19897, 4, 17907, },
853 { 4, 19822, 4, 17840, },
854 } },
855 { 5300, {
856 { 4, 19784, 4, 17806, },
857 { 4, 19822, 4, 17840, },
858 { 4, 19747, 4, 17772, },
859 } },
860 { 5320, {
861 { 4, 19710, 4, 17739, },
862 { 4, 19747, 4, 17772, },
863 { 4, 19673, 4, 17706, },
864 } },
865 { 5500, {
866 { 4, 19065, 4, 17159, },
867 { 4, 19100, 4, 17190, },
868 { 4, 19030, 4, 17127, },
869 } },
870 { 5520, {
871 { 4, 18996, 4, 17096, },
872 { 4, 19030, 4, 17127, },
873 { 4, 18962, 4, 17065, },
874 } },
875 { 5540, {
876 { 4, 18927, 4, 17035, },
877 { 4, 18962, 4, 17065, },
878 { 4, 18893, 4, 17004, },
879 } },
880 { 5560, {
881 { 4, 18859, 4, 16973, },
882 { 4, 18893, 4, 17004, },
883 { 4, 18825, 4, 16943, },
884 } },
885 { 5580, {
886 { 4, 18792, 4, 16913, },
887 { 4, 18825, 4, 16943, },
888 { 4, 18758, 4, 16882, },
889 } },
890 { 5600, {
891 { 4, 18725, 4, 16852, },
892 { 4, 18758, 4, 16882, },
893 { 4, 18691, 4, 16822, },
894 } },
895 { 5620, {
896 { 4, 18658, 4, 16792, },
897 { 4, 18691, 4, 16822, },
898 { 4, 18625, 4, 16762, },
899 } },
900 { 5640, {
901 { 4, 18592, 4, 16733, },
902 { 4, 18625, 4, 16762, },
903 { 4, 18559, 4, 16703, },
904 } },
905 { 5660, {
906 { 4, 18526, 4, 16673, },
907 { 4, 18559, 4, 16703, },
908 { 4, 18493, 4, 16644, },
909 } },
910 { 5680, {
911 { 4, 18461, 4, 16615, },
912 { 4, 18493, 4, 16644, },
913 { 4, 18428, 4, 16586, },
914 } },
915 { 5700, {
916 { 4, 18396, 4, 16556, },
917 { 4, 18428, 4, 16586, },
918 { 4, 18364, 4, 16527, },
919 } },
920 { 5745, {
921 { 4, 18252, 4, 16427, },
922 { 4, 18284, 4, 16455, },
923 { 4, 18220, 4, 16398, },
924 } },
925 { 5765, {
926 { 4, 18189, 5, 32740, },
927 { 4, 18220, 4, 16398, },
928 { 4, 18157, 5, 32683, },
929 } },
930 { 5785, {
931 { 4, 18126, 5, 32626, },
932 { 4, 18157, 5, 32683, },
933 { 4, 18094, 5, 32570, },
934 } },
935 { 5805, {
936 { 4, 18063, 5, 32514, },
937 { 4, 18094, 5, 32570, },
938 { 4, 18032, 5, 32458, },
939 } },
940 { 5825, {
941 { 4, 18001, 5, 32402, },
942 { 4, 18032, 5, 32458, },
943 { 4, 17970, 5, 32347, },
944 } },
945 { 5170, {
946 { 4, 20282, 4, 18254, },
947 { 4, 20321, 4, 18289, },
948 { 4, 20243, 4, 18219, },
949 } },
950 { 5190, {
951 { 4, 20204, 4, 18183, },
952 { 4, 20243, 4, 18219, },
953 { 4, 20165, 4, 18148, },
954 } },
955 { 5210, {
956 { 4, 20126, 4, 18114, },
957 { 4, 20165, 4, 18148, },
958 { 4, 20088, 4, 18079, },
959 } },
960 { 5230, {
961 { 4, 20049, 4, 18044, },
962 { 4, 20088, 4, 18079, },
963 { 4, 20011, 4, 18010, },
964 } },
965};
966
967static int carl9170_init_rf_bank4_pwr(struct ar9170 *ar, bool band5ghz,
968 u32 freq, enum carl9170_bw bw)
969{
970 int err;
971 u32 d0, d1, td0, td1, fd0, fd1;
972 u8 chansel;
973 u8 refsel0 = 1, refsel1 = 0;
974 u8 lf_synth = 0;
975
976 switch (bw) {
977 case CARL9170_BW_40_ABOVE:
978 freq += 10;
979 break;
980 case CARL9170_BW_40_BELOW:
981 freq -= 10;
982 break;
983 case CARL9170_BW_20:
984 break;
985 default:
986 BUG();
987 return -ENOSYS;
988 }
989
990 if (band5ghz) {
991 if (freq % 10) {
992 chansel = (freq - 4800) / 5;
993 } else {
994 chansel = ((freq - 4800) / 10) * 2;
995 refsel0 = 0;
996 refsel1 = 1;
997 }
998 chansel = byte_rev_table[chansel];
999 } else {
1000 if (freq == 2484) {
1001 chansel = 10 + (freq - 2274) / 5;
1002 lf_synth = 1;
1003 } else
1004 chansel = 16 + (freq - 2272) / 5;
1005 chansel *= 4;
1006 chansel = byte_rev_table[chansel];
1007 }
1008
1009 d1 = chansel;
1010 d0 = 0x21 |
1011 refsel0 << 3 |
1012 refsel1 << 2 |
1013 lf_synth << 1;
1014 td0 = d0 & 0x1f;
1015 td1 = d1 & 0x1f;
1016 fd0 = td1 << 5 | td0;
1017
1018 td0 = (d0 >> 5) & 0x7;
1019 td1 = (d1 >> 5) & 0x7;
1020 fd1 = td1 << 5 | td0;
1021
1022 carl9170_regwrite_begin(ar);
1023
1024 carl9170_regwrite(0x1c58b0, fd0);
1025 carl9170_regwrite(0x1c58e8, fd1);
1026
1027 carl9170_regwrite_finish();
1028 err = carl9170_regwrite_result();
1029 if (err)
1030 return err;
1031
1032 msleep(20);
1033
1034 return 0;
1035}
1036
1037static const struct carl9170_phy_freq_params *
1038carl9170_get_hw_dyn_params(struct ieee80211_channel *channel,
1039 enum carl9170_bw bw)
1040{
1041 unsigned int chanidx = 0;
1042 u16 freq = 2412;
1043
1044 if (channel) {
1045 chanidx = channel->hw_value;
1046 freq = channel->center_freq;
1047 }
1048
1049 BUG_ON(chanidx >= ARRAY_SIZE(carl9170_phy_freq_params));
1050
1051 BUILD_BUG_ON(__CARL9170_NUM_BW != 3);
1052
1053 WARN_ON(carl9170_phy_freq_params[chanidx].freq != freq);
1054
1055 return &carl9170_phy_freq_params[chanidx].params[bw];
1056}
1057
1058static int carl9170_find_freq_idx(int nfreqs, u8 *freqs, u8 f)
1059{
1060 int idx = nfreqs - 2;
1061
1062 while (idx >= 0) {
1063 if (f >= freqs[idx])
1064 return idx;
1065 idx--;
1066 }
1067
1068 return 0;
1069}
1070
1071static s32 carl9170_interpolate_s32(s32 x, s32 x1, s32 y1, s32 x2, s32 y2)
1072{
1073 /* nothing to interpolate, it's horizontal */
1074 if (y2 == y1)
1075 return y1;
1076
1077 /* check if we hit one of the edges */
1078 if (x == x1)
1079 return y1;
1080 if (x == x2)
1081 return y2;
1082
1083 /* x1 == x2 is bad, hopefully == x */
1084 if (x2 == x1)
1085 return y1;
1086
1087 return y1 + (((y2 - y1) * (x - x1)) / (x2 - x1));
1088}
1089
1090static u8 carl9170_interpolate_u8(u8 x, u8 x1, u8 y1, u8 x2, u8 y2)
1091{
1092#define SHIFT 8
1093 s32 y;
1094
1095 y = carl9170_interpolate_s32(x << SHIFT, x1 << SHIFT,
1096 y1 << SHIFT, x2 << SHIFT, y2 << SHIFT);
1097
1098 /*
1099 * XXX: unwrap this expression
1100 * Isn't it just DIV_ROUND_UP(y, 1<<SHIFT)?
1101 * Can we rely on the compiler to optimise away the div?
1102 */
1103 return (y >> SHIFT) + ((y & (1<<(SHIFT-1))) >> (SHIFT - 1));
1104#undef SHIFT
1105}
1106
1107static u8 carl9170_interpolate_val(u8 x, u8 *x_array, u8 *y_array)
1108{
1109 int i;
1110
1111 for (i = 0; i < 3; i++) {
1112 if (x <= x_array[i + 1])
1113 break;
1114 }
1115
1116 return carl9170_interpolate_u8(x, x_array[i], y_array[i],
1117 x_array[i + 1], y_array[i + 1]);
1118}
1119
1120static int carl9170_set_freq_cal_data(struct ar9170 *ar,
1121 struct ieee80211_channel *channel)
1122{
1123 u8 *cal_freq_pier;
1124 u8 vpds[2][AR5416_PD_GAIN_ICEPTS];
1125 u8 pwrs[2][AR5416_PD_GAIN_ICEPTS];
1126 int chain, idx, i;
1127 u32 phy_data = 0;
1128 u8 f, tmp;
1129
1130 switch (channel->band) {
1131 case IEEE80211_BAND_2GHZ:
1132 f = channel->center_freq - 2300;
1133 cal_freq_pier = ar->eeprom.cal_freq_pier_2G;
1134 i = AR5416_NUM_2G_CAL_PIERS - 1;
1135 break;
1136
1137 case IEEE80211_BAND_5GHZ:
1138 f = (channel->center_freq - 4800) / 5;
1139 cal_freq_pier = ar->eeprom.cal_freq_pier_5G;
1140 i = AR5416_NUM_5G_CAL_PIERS - 1;
1141 break;
1142
1143 default:
1144 return -EINVAL;
1145 break;
1146 }
1147
1148 for (; i >= 0; i--) {
1149 if (cal_freq_pier[i] != 0xff)
1150 break;
1151 }
1152 if (i < 0)
1153 return -EINVAL;
1154
1155 idx = carl9170_find_freq_idx(i, cal_freq_pier, f);
1156
1157 carl9170_regwrite_begin(ar);
1158
1159 for (chain = 0; chain < AR5416_MAX_CHAINS; chain++) {
1160 for (i = 0; i < AR5416_PD_GAIN_ICEPTS; i++) {
1161 struct ar9170_calibration_data_per_freq *cal_pier_data;
1162 int j;
1163
1164 switch (channel->band) {
1165 case IEEE80211_BAND_2GHZ:
1166 cal_pier_data = &ar->eeprom.
1167 cal_pier_data_2G[chain][idx];
1168 break;
1169
1170 case IEEE80211_BAND_5GHZ:
1171 cal_pier_data = &ar->eeprom.
1172 cal_pier_data_5G[chain][idx];
1173 break;
1174
1175 default:
1176 return -EINVAL;
1177 }
1178
1179 for (j = 0; j < 2; j++) {
1180 vpds[j][i] = carl9170_interpolate_u8(f,
1181 cal_freq_pier[idx],
1182 cal_pier_data->vpd_pdg[j][i],
1183 cal_freq_pier[idx + 1],
1184 cal_pier_data[1].vpd_pdg[j][i]);
1185
1186 pwrs[j][i] = carl9170_interpolate_u8(f,
1187 cal_freq_pier[idx],
1188 cal_pier_data->pwr_pdg[j][i],
1189 cal_freq_pier[idx + 1],
1190 cal_pier_data[1].pwr_pdg[j][i]) / 2;
1191 }
1192 }
1193
1194 for (i = 0; i < 76; i++) {
1195 if (i < 25) {
1196 tmp = carl9170_interpolate_val(i, &pwrs[0][0],
1197 &vpds[0][0]);
1198 } else {
1199 tmp = carl9170_interpolate_val(i - 12,
1200 &pwrs[1][0],
1201 &vpds[1][0]);
1202 }
1203
1204 phy_data |= tmp << ((i & 3) << 3);
1205 if ((i & 3) == 3) {
1206 carl9170_regwrite(0x1c6280 + chain * 0x1000 +
1207 (i & ~3), phy_data);
1208 phy_data = 0;
1209 }
1210 }
1211
1212 for (i = 19; i < 32; i++)
1213 carl9170_regwrite(0x1c6280 + chain * 0x1000 + (i << 2),
1214 0x0);
1215 }
1216
1217 carl9170_regwrite_finish();
1218 return carl9170_regwrite_result();
1219}
1220
1221static u8 carl9170_get_max_edge_power(struct ar9170 *ar,
1222 u32 freq, struct ar9170_calctl_edges edges[])
1223{
1224 int i;
1225 u8 rc = AR5416_MAX_RATE_POWER;
1226 u8 f;
1227 if (freq < 3000)
1228 f = freq - 2300;
1229 else
1230 f = (freq - 4800) / 5;
1231
1232 for (i = 0; i < AR5416_NUM_BAND_EDGES; i++) {
1233 if (edges[i].channel == 0xff)
1234 break;
1235 if (f == edges[i].channel) {
1236 /* exact freq match */
1237 rc = edges[i].power_flags & ~AR9170_CALCTL_EDGE_FLAGS;
1238 break;
1239 }
1240 if (i > 0 && f < edges[i].channel) {
1241 if (f > edges[i - 1].channel &&
1242 edges[i - 1].power_flags &
1243 AR9170_CALCTL_EDGE_FLAGS) {
1244 /* lower channel has the inband flag set */
1245 rc = edges[i - 1].power_flags &
1246 ~AR9170_CALCTL_EDGE_FLAGS;
1247 }
1248 break;
1249 }
1250 }
1251
1252 if (i == AR5416_NUM_BAND_EDGES) {
1253 if (f > edges[i - 1].channel &&
1254 edges[i - 1].power_flags & AR9170_CALCTL_EDGE_FLAGS) {
1255 /* lower channel has the inband flag set */
1256 rc = edges[i - 1].power_flags &
1257 ~AR9170_CALCTL_EDGE_FLAGS;
1258 }
1259 }
1260 return rc;
1261}
1262
1263static u8 carl9170_get_heavy_clip(struct ar9170 *ar, u32 freq,
1264 enum carl9170_bw bw, struct ar9170_calctl_edges edges[])
1265{
1266 u8 f;
1267 int i;
1268 u8 rc = 0;
1269
1270 if (freq < 3000)
1271 f = freq - 2300;
1272 else
1273 f = (freq - 4800) / 5;
1274
1275 if (bw == CARL9170_BW_40_BELOW || bw == CARL9170_BW_40_ABOVE)
1276 rc |= 0xf0;
1277
1278 for (i = 0; i < AR5416_NUM_BAND_EDGES; i++) {
1279 if (edges[i].channel == 0xff)
1280 break;
1281 if (f == edges[i].channel) {
1282 if (!(edges[i].power_flags & AR9170_CALCTL_EDGE_FLAGS))
1283 rc |= 0x0f;
1284 break;
1285 }
1286 }
1287
1288 return rc;
1289}
1290
1291/*
1292 * calculate the conformance test limits and the heavy clip parameter
1293 * and apply them to ar->power* (derived from otus hal/hpmain.c, line 3706)
1294 */
1295static void carl9170_calc_ctl(struct ar9170 *ar, u32 freq, enum carl9170_bw bw)
1296{
1297 u8 ctl_grp; /* CTL group */
1298 u8 ctl_idx; /* CTL index */
1299 int i, j;
1300 struct ctl_modes {
1301 u8 ctl_mode;
1302 u8 max_power;
1303 u8 *pwr_cal_data;
1304 int pwr_cal_len;
1305 } *modes;
1306
1307 /*
1308 * order is relevant in the mode_list_*: we fall back to the
1309 * lower indices if any mode is missed in the EEPROM.
1310 */
1311 struct ctl_modes mode_list_2ghz[] = {
1312 { CTL_11B, 0, ar->power_2G_cck, 4 },
1313 { CTL_11G, 0, ar->power_2G_ofdm, 4 },
1314 { CTL_2GHT20, 0, ar->power_2G_ht20, 8 },
1315 { CTL_2GHT40, 0, ar->power_2G_ht40, 8 },
1316 };
1317 struct ctl_modes mode_list_5ghz[] = {
1318 { CTL_11A, 0, ar->power_5G_leg, 4 },
1319 { CTL_5GHT20, 0, ar->power_5G_ht20, 8 },
1320 { CTL_5GHT40, 0, ar->power_5G_ht40, 8 },
1321 };
1322 int nr_modes;
1323
1324#define EDGES(c, n) (ar->eeprom.ctl_data[c].control_edges[n])
1325
1326 ar->heavy_clip = 0;
1327
1328 /*
1329 * TODO: investigate the differences between OTUS'
1330 * hpreg.c::zfHpGetRegulatoryDomain() and
1331 * ath/regd.c::ath_regd_get_band_ctl() -
1332 * e.g. for FCC3_WORLD the OTUS procedure
1333 * always returns CTL_FCC, while the one in ath/ delivers
1334 * CTL_ETSI for 2GHz and CTL_FCC for 5GHz.
1335 */
1336 ctl_grp = ath_regd_get_band_ctl(&ar->common.regulatory,
1337 ar->hw->conf.channel->band);
1338
1339 /* ctl group not found - either invalid band (NO_CTL) or ww roaming */
1340 if (ctl_grp == NO_CTL || ctl_grp == SD_NO_CTL)
1341 ctl_grp = CTL_FCC;
1342
1343 if (ctl_grp != CTL_FCC)
1344 /* skip CTL and heavy clip for CTL_MKK and CTL_ETSI */
1345 return;
1346
1347 if (ar->hw->conf.channel->band == IEEE80211_BAND_2GHZ) {
1348 modes = mode_list_2ghz;
1349 nr_modes = ARRAY_SIZE(mode_list_2ghz);
1350 } else {
1351 modes = mode_list_5ghz;
1352 nr_modes = ARRAY_SIZE(mode_list_5ghz);
1353 }
1354
1355 for (i = 0; i < nr_modes; i++) {
1356 u8 c = ctl_grp | modes[i].ctl_mode;
1357 for (ctl_idx = 0; ctl_idx < AR5416_NUM_CTLS; ctl_idx++)
1358 if (c == ar->eeprom.ctl_index[ctl_idx])
1359 break;
1360 if (ctl_idx < AR5416_NUM_CTLS) {
1361 int f_off = 0;
1362
1363 /*
1364 * determine heavy clip parameter
1365 * from the 11G edges array
1366 */
1367 if (modes[i].ctl_mode == CTL_11G) {
1368 ar->heavy_clip =
1369 carl9170_get_heavy_clip(ar,
1370 freq, bw, EDGES(ctl_idx, 1));
1371 }
1372
1373 /* adjust freq for 40MHz */
1374 if (modes[i].ctl_mode == CTL_2GHT40 ||
1375 modes[i].ctl_mode == CTL_5GHT40) {
1376 if (bw == CARL9170_BW_40_BELOW)
1377 f_off = -10;
1378 else
1379 f_off = 10;
1380 }
1381
1382 modes[i].max_power =
1383 carl9170_get_max_edge_power(ar,
1384 freq+f_off, EDGES(ctl_idx, 1));
1385
1386 /*
1387 * TODO: check if the regulatory max. power is
1388 * controlled by cfg80211 for DFS.
1389 * (hpmain applies it to max_power itself for DFS freq)
1390 */
1391
1392 } else {
1393 /*
1394 * Workaround in otus driver, hpmain.c, line 3906:
1395 * if no data for 5GHT20 are found, take the
1396 * legacy 5G value. We extend this here to fallback
1397 * from any other HT* or 11G, too.
1398 */
1399 int k = i;
1400
1401 modes[i].max_power = AR5416_MAX_RATE_POWER;
1402 while (k-- > 0) {
1403 if (modes[k].max_power !=
1404 AR5416_MAX_RATE_POWER) {
1405 modes[i].max_power = modes[k].max_power;
1406 break;
1407 }
1408 }
1409 }
1410
1411 /* apply max power to pwr_cal_data (ar->power_*) */
1412 for (j = 0; j < modes[i].pwr_cal_len; j++) {
1413 modes[i].pwr_cal_data[j] = min(modes[i].pwr_cal_data[j],
1414 modes[i].max_power);
1415 }
1416 }
1417
1418 if (ar->heavy_clip & 0xf0) {
1419 ar->power_2G_ht40[0]--;
1420 ar->power_2G_ht40[1]--;
1421 ar->power_2G_ht40[2]--;
1422 }
1423 if (ar->heavy_clip & 0xf) {
1424 ar->power_2G_ht20[0]++;
1425 ar->power_2G_ht20[1]++;
1426 ar->power_2G_ht20[2]++;
1427 }
1428
1429#undef EDGES
1430}
1431
1432static int carl9170_set_power_cal(struct ar9170 *ar, u32 freq,
1433 enum carl9170_bw bw)
1434{
1435 struct ar9170_calibration_target_power_legacy *ctpl;
1436 struct ar9170_calibration_target_power_ht *ctph;
1437 u8 *ctpres;
1438 int ntargets;
1439 int idx, i, n;
1440 u8 ackpower, ackchains, f;
1441 u8 pwr_freqs[AR5416_MAX_NUM_TGT_PWRS];
1442
1443 if (freq < 3000)
1444 f = freq - 2300;
1445 else
1446 f = (freq - 4800)/5;
1447
1448 /*
1449 * cycle through the various modes
1450 *
1451 * legacy modes first: 5G, 2G CCK, 2G OFDM
1452 */
1453 for (i = 0; i < 3; i++) {
1454 switch (i) {
1455 case 0: /* 5 GHz legacy */
1456 ctpl = &ar->eeprom.cal_tgt_pwr_5G[0];
1457 ntargets = AR5416_NUM_5G_TARGET_PWRS;
1458 ctpres = ar->power_5G_leg;
1459 break;
1460 case 1: /* 2.4 GHz CCK */
1461 ctpl = &ar->eeprom.cal_tgt_pwr_2G_cck[0];
1462 ntargets = AR5416_NUM_2G_CCK_TARGET_PWRS;
1463 ctpres = ar->power_2G_cck;
1464 break;
1465 case 2: /* 2.4 GHz OFDM */
1466 ctpl = &ar->eeprom.cal_tgt_pwr_2G_ofdm[0];
1467 ntargets = AR5416_NUM_2G_OFDM_TARGET_PWRS;
1468 ctpres = ar->power_2G_ofdm;
1469 break;
1470 default:
1471 BUG();
1472 }
1473
1474 for (n = 0; n < ntargets; n++) {
1475 if (ctpl[n].freq == 0xff)
1476 break;
1477 pwr_freqs[n] = ctpl[n].freq;
1478 }
1479 ntargets = n;
1480 idx = carl9170_find_freq_idx(ntargets, pwr_freqs, f);
1481 for (n = 0; n < 4; n++)
1482 ctpres[n] = carl9170_interpolate_u8(f,
1483 ctpl[idx + 0].freq, ctpl[idx + 0].power[n],
1484 ctpl[idx + 1].freq, ctpl[idx + 1].power[n]);
1485 }
1486
1487 /* HT modes now: 5G HT20, 5G HT40, 2G CCK, 2G OFDM, 2G HT20, 2G HT40 */
1488 for (i = 0; i < 4; i++) {
1489 switch (i) {
1490 case 0: /* 5 GHz HT 20 */
1491 ctph = &ar->eeprom.cal_tgt_pwr_5G_ht20[0];
1492 ntargets = AR5416_NUM_5G_TARGET_PWRS;
1493 ctpres = ar->power_5G_ht20;
1494 break;
1495 case 1: /* 5 GHz HT 40 */
1496 ctph = &ar->eeprom.cal_tgt_pwr_5G_ht40[0];
1497 ntargets = AR5416_NUM_5G_TARGET_PWRS;
1498 ctpres = ar->power_5G_ht40;
1499 break;
1500 case 2: /* 2.4 GHz HT 20 */
1501 ctph = &ar->eeprom.cal_tgt_pwr_2G_ht20[0];
1502 ntargets = AR5416_NUM_2G_OFDM_TARGET_PWRS;
1503 ctpres = ar->power_2G_ht20;
1504 break;
1505 case 3: /* 2.4 GHz HT 40 */
1506 ctph = &ar->eeprom.cal_tgt_pwr_2G_ht40[0];
1507 ntargets = AR5416_NUM_2G_OFDM_TARGET_PWRS;
1508 ctpres = ar->power_2G_ht40;
1509 break;
1510 default:
1511 BUG();
1512 }
1513
1514 for (n = 0; n < ntargets; n++) {
1515 if (ctph[n].freq == 0xff)
1516 break;
1517 pwr_freqs[n] = ctph[n].freq;
1518 }
1519 ntargets = n;
1520 idx = carl9170_find_freq_idx(ntargets, pwr_freqs, f);
1521 for (n = 0; n < 8; n++)
1522 ctpres[n] = carl9170_interpolate_u8(f,
1523 ctph[idx + 0].freq, ctph[idx + 0].power[n],
1524 ctph[idx + 1].freq, ctph[idx + 1].power[n]);
1525 }
1526
1527 /* calc. conformance test limits and apply to ar->power*[] */
1528 carl9170_calc_ctl(ar, freq, bw);
1529
1530 /* set ACK/CTS TX power */
1531 carl9170_regwrite_begin(ar);
1532
1533 if (ar->eeprom.tx_mask != 1)
1534 ackchains = AR9170_TX_PHY_TXCHAIN_2;
1535 else
1536 ackchains = AR9170_TX_PHY_TXCHAIN_1;
1537
1538 if (freq < 3000)
1539 ackpower = ar->power_2G_ofdm[0] & 0x3f;
1540 else
1541 ackpower = ar->power_5G_leg[0] & 0x3f;
1542
1543 carl9170_regwrite(AR9170_MAC_REG_ACK_TPC,
1544 0x3c1e | ackpower << 20 | ackchains << 26);
1545 carl9170_regwrite(AR9170_MAC_REG_RTS_CTS_TPC,
1546 ackpower << 5 | ackchains << 11 |
1547 ackpower << 21 | ackchains << 27);
1548
1549 carl9170_regwrite(AR9170_MAC_REG_CFEND_QOSNULL_TPC,
1550 ackpower << 5 | ackchains << 11 |
1551 ackpower << 21 | ackchains << 27);
1552
1553 carl9170_regwrite_finish();
1554 return carl9170_regwrite_result();
1555}
1556
1557/* TODO: replace this with sign_extend32(noise, 8) */
1558static int carl9170_calc_noise_dbm(u32 raw_noise)
1559{
1560 if (raw_noise & 0x100)
1561 return ~0x1ff | raw_noise;
1562 else
1563 return raw_noise;
1564}
1565
1566int carl9170_get_noisefloor(struct ar9170 *ar)
1567{
1568 static const u32 phy_regs[] = {
1569 AR9170_PHY_REG_CCA, AR9170_PHY_REG_CH2_CCA,
1570 AR9170_PHY_REG_EXT_CCA, AR9170_PHY_REG_CH2_EXT_CCA };
1571 u32 phy_res[ARRAY_SIZE(phy_regs)];
1572 int err, i;
1573
1574 BUILD_BUG_ON(ARRAY_SIZE(phy_regs) != ARRAY_SIZE(ar->noise));
1575
1576 err = carl9170_read_mreg(ar, ARRAY_SIZE(phy_regs), phy_regs, phy_res);
1577 if (err)
1578 return err;
1579
1580 for (i = 0; i < 2; i++) {
1581 ar->noise[i] = carl9170_calc_noise_dbm(
1582 (phy_res[i] >> 19) & 0x1ff);
1583
1584 ar->noise[i + 2] = carl9170_calc_noise_dbm(
1585 (phy_res[i + 2] >> 23) & 0x1ff);
1586 }
1587
1588 return 0;
1589}
1590
1591static enum carl9170_bw nl80211_to_carl(enum nl80211_channel_type type)
1592{
1593 switch (type) {
1594 case NL80211_CHAN_NO_HT:
1595 case NL80211_CHAN_HT20:
1596 return CARL9170_BW_20;
1597 case NL80211_CHAN_HT40MINUS:
1598 return CARL9170_BW_40_BELOW;
1599 case NL80211_CHAN_HT40PLUS:
1600 return CARL9170_BW_40_ABOVE;
1601 default:
1602 BUG();
1603 }
1604}
1605
1606int carl9170_set_channel(struct ar9170 *ar, struct ieee80211_channel *channel,
1607 enum nl80211_channel_type _bw,
1608 enum carl9170_rf_init_mode rfi)
1609{
1610 const struct carl9170_phy_freq_params *freqpar;
1611 struct carl9170_rf_init_result rf_res;
1612 struct carl9170_rf_init rf;
1613 u32 cmd, tmp, offs = 0, new_ht = 0;
1614 int err;
1615 enum carl9170_bw bw;
1616 bool warm_reset;
1617 struct ieee80211_channel *old_channel = NULL;
1618
1619 bw = nl80211_to_carl(_bw);
1620
1621 if (conf_is_ht(&ar->hw->conf))
1622 new_ht |= CARL9170FW_PHY_HT_ENABLE;
1623
1624 if (conf_is_ht40(&ar->hw->conf))
1625 new_ht |= CARL9170FW_PHY_HT_DYN2040;
1626
1627 /* may be NULL at first setup */
1628 if (ar->channel) {
1629 old_channel = ar->channel;
1630 warm_reset = (old_channel->band != channel->band) ||
1631 (old_channel->center_freq ==
1632 channel->center_freq) ||
1633 (ar->ht_settings != new_ht);
1634
1635 ar->channel = NULL;
1636 } else {
1637 warm_reset = true;
1638 }
1639
1640 /* HW workaround */
1641 if (!ar->hw->wiphy->bands[IEEE80211_BAND_5GHZ] &&
1642 channel->center_freq <= 2417)
1643 warm_reset = true;
1644
1645 if (rfi != CARL9170_RFI_NONE || warm_reset) {
1646 u32 val;
1647
1648 if (rfi == CARL9170_RFI_COLD)
1649 val = AR9170_PWR_RESET_BB_COLD_RESET;
1650 else
1651 val = AR9170_PWR_RESET_BB_WARM_RESET;
1652
1653 /* warm/cold reset BB/ADDA */
1654 err = carl9170_write_reg(ar, AR9170_PWR_REG_RESET, val);
1655 if (err)
1656 return err;
1657
1658 err = carl9170_write_reg(ar, AR9170_PWR_REG_RESET, 0x0);
1659 if (err)
1660 return err;
1661
1662 err = carl9170_init_phy(ar, channel->band);
1663 if (err)
1664 return err;
1665
1666 err = carl9170_init_rf_banks_0_7(ar,
1667 channel->band == IEEE80211_BAND_5GHZ);
1668 if (err)
1669 return err;
1670
1671 cmd = CARL9170_CMD_RF_INIT;
1672
1673 msleep(100);
1674
1675 err = carl9170_echo_test(ar, 0xaabbccdd);
1676 if (err)
1677 return err;
1678 } else {
1679 cmd = CARL9170_CMD_FREQUENCY;
1680 }
1681
1682 err = carl9170_exec_cmd(ar, CARL9170_CMD_FREQ_START, 0, NULL, 0, NULL);
1683 if (err)
1684 return err;
1685
1686 err = carl9170_write_reg(ar, AR9170_PHY_REG_HEAVY_CLIP_ENABLE,
1687 0x200);
1688
1689 err = carl9170_init_rf_bank4_pwr(ar,
1690 channel->band == IEEE80211_BAND_5GHZ,
1691 channel->center_freq, bw);
1692 if (err)
1693 return err;
1694
1695 tmp = AR9170_PHY_TURBO_FC_SINGLE_HT_LTF1 |
1696 AR9170_PHY_TURBO_FC_HT_EN;
1697
1698 switch (bw) {
1699 case CARL9170_BW_20:
1700 break;
1701 case CARL9170_BW_40_BELOW:
1702 tmp |= AR9170_PHY_TURBO_FC_DYN2040_EN |
1703 AR9170_PHY_TURBO_FC_SHORT_GI_40;
1704 offs = 3;
1705 break;
1706 case CARL9170_BW_40_ABOVE:
1707 tmp |= AR9170_PHY_TURBO_FC_DYN2040_EN |
1708 AR9170_PHY_TURBO_FC_SHORT_GI_40 |
1709 AR9170_PHY_TURBO_FC_DYN2040_PRI_CH;
1710 offs = 1;
1711 break;
1712 default:
1713 BUG();
1714 return -ENOSYS;
1715 }
1716
1717 if (ar->eeprom.tx_mask != 1)
1718 tmp |= AR9170_PHY_TURBO_FC_WALSH;
1719
1720 err = carl9170_write_reg(ar, AR9170_PHY_REG_TURBO, tmp);
1721 if (err)
1722 return err;
1723
1724 err = carl9170_set_freq_cal_data(ar, channel);
1725 if (err)
1726 return err;
1727
1728 err = carl9170_set_power_cal(ar, channel->center_freq, bw);
1729 if (err)
1730 return err;
1731
1732 freqpar = carl9170_get_hw_dyn_params(channel, bw);
1733
1734 rf.ht_settings = new_ht;
1735 if (conf_is_ht40(&ar->hw->conf))
1736 SET_VAL(CARL9170FW_PHY_HT_EXT_CHAN_OFF, rf.ht_settings, offs);
1737
1738 rf.freq = cpu_to_le32(channel->center_freq * 1000);
1739 rf.delta_slope_coeff_exp = cpu_to_le32(freqpar->coeff_exp);
1740 rf.delta_slope_coeff_man = cpu_to_le32(freqpar->coeff_man);
1741 rf.delta_slope_coeff_exp_shgi = cpu_to_le32(freqpar->coeff_exp_shgi);
1742 rf.delta_slope_coeff_man_shgi = cpu_to_le32(freqpar->coeff_man_shgi);
1743
1744 if (rfi != CARL9170_RFI_NONE)
1745 rf.finiteLoopCount = cpu_to_le32(2000);
1746 else
1747 rf.finiteLoopCount = cpu_to_le32(1000);
1748
1749 err = carl9170_exec_cmd(ar, cmd, sizeof(rf), &rf,
1750 sizeof(rf_res), &rf_res);
1751 if (err)
1752 return err;
1753
1754 err = le32_to_cpu(rf_res.ret);
1755 if (err != 0) {
1756 ar->chan_fail++;
1757 ar->total_chan_fail++;
1758
1759 wiphy_err(ar->hw->wiphy, "channel change: %d -> %d "
1760 "failed (%d).\n", old_channel ?
1761 old_channel->center_freq : -1, channel->center_freq,
1762 err);
1763
1764 if ((rfi == CARL9170_RFI_COLD) || (ar->chan_fail > 3)) {
1765 /*
1766 * We have tried very hard to change to _another_
1767 * channel and we've failed to do so!
1768 * Chances are that the PHY/RF is no longer
1769 * operable (due to corruptions/fatal events/bugs?)
1770 * and we need to reset at a higher level.
1771 */
1772 carl9170_restart(ar, CARL9170_RR_TOO_MANY_PHY_ERRORS);
1773 return 0;
1774 }
1775
1776 err = carl9170_set_channel(ar, channel, _bw,
1777 CARL9170_RFI_COLD);
1778 if (err)
1779 return err;
1780 } else {
1781 ar->chan_fail = 0;
1782 }
1783
1784 err = carl9170_get_noisefloor(ar);
1785 if (err)
1786 return err;
1787
1788 if (ar->heavy_clip) {
1789 err = carl9170_write_reg(ar, AR9170_PHY_REG_HEAVY_CLIP_ENABLE,
1790 0x200 | ar->heavy_clip);
1791 if (err) {
1792 if (net_ratelimit()) {
1793 wiphy_err(ar->hw->wiphy, "failed to set "
1794 "heavy clip\n");
1795 }
1796
1797 return err;
1798 }
1799 }
1800
1801 /* FIXME: PSM does not work in 5GHz Band */
1802 if (channel->band == IEEE80211_BAND_5GHZ)
1803 ar->ps.off_override |= PS_OFF_5GHZ;
1804 else
1805 ar->ps.off_override &= ~PS_OFF_5GHZ;
1806
1807 ar->channel = channel;
1808 ar->ht_settings = new_ht;
1809 return 0;
1810}
diff --git a/drivers/net/wireless/ath/carl9170/phy.h b/drivers/net/wireless/ath/carl9170/phy.h
new file mode 100644
index 000000000000..02c34eb4ebde
--- /dev/null
+++ b/drivers/net/wireless/ath/carl9170/phy.h
@@ -0,0 +1,564 @@
1/*
2 * Shared Atheros AR9170 Header
3 *
4 * PHY register map
5 *
6 * Copyright (c) 2008-2009 Atheros Communications Inc.
7 *
8 * Permission to use, copy, modify, and/or distribute this software for any
9 * purpose with or without fee is hereby granted, provided that the above
10 * copyright notice and this permission notice appear in all copies.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 */
20
21#ifndef __CARL9170_SHARED_PHY_H
22#define __CARL9170_SHARED_PHY_H
23
24#define AR9170_PHY_REG_BASE (0x1bc000 + 0x9800)
25#define AR9170_PHY_REG(_n) (AR9170_PHY_REG_BASE + \
26 ((_n) << 2))
27
28#define AR9170_PHY_REG_TEST (AR9170_PHY_REG_BASE + 0x0000)
29#define AR9170_PHY_TEST_AGC_CLR 0x10000000
30#define AR9170_PHY_TEST_RFSILENT_BB 0x00002000
31
32#define AR9170_PHY_REG_TURBO (AR9170_PHY_REG_BASE + 0x0004)
33#define AR9170_PHY_TURBO_FC_TURBO_MODE 0x00000001
34#define AR9170_PHY_TURBO_FC_TURBO_SHORT 0x00000002
35#define AR9170_PHY_TURBO_FC_DYN2040_EN 0x00000004
36#define AR9170_PHY_TURBO_FC_DYN2040_PRI_ONLY 0x00000008
37#define AR9170_PHY_TURBO_FC_DYN2040_PRI_CH 0x00000010
38/* For 25 MHz channel spacing -- not used but supported by hw */
39#define AR9170_PHY_TURBO_FC_DYN2040_EXT_CH 0x00000020
40#define AR9170_PHY_TURBO_FC_HT_EN 0x00000040
41#define AR9170_PHY_TURBO_FC_SHORT_GI_40 0x00000080
42#define AR9170_PHY_TURBO_FC_WALSH 0x00000100
43#define AR9170_PHY_TURBO_FC_SINGLE_HT_LTF1 0x00000200
44#define AR9170_PHY_TURBO_FC_ENABLE_DAC_FIFO 0x00000800
45
46#define AR9170_PHY_REG_TEST2 (AR9170_PHY_REG_BASE + 0x0008)
47
48#define AR9170_PHY_REG_TIMING2 (AR9170_PHY_REG_BASE + 0x0010)
49#define AR9170_PHY_TIMING2_USE_FORCE 0x00001000
50#define AR9170_PHY_TIMING2_FORCE 0x00000fff
51#define AR9170_PHY_TIMING2_FORCE_S 0
52
53#define AR9170_PHY_REG_TIMING3 (AR9170_PHY_REG_BASE + 0x0014)
54#define AR9170_PHY_TIMING3_DSC_EXP 0x0001e000
55#define AR9170_PHY_TIMING3_DSC_EXP_S 13
56#define AR9170_PHY_TIMING3_DSC_MAN 0xfffe0000
57#define AR9170_PHY_TIMING3_DSC_MAN_S 17
58
59#define AR9170_PHY_REG_CHIP_ID (AR9170_PHY_REG_BASE + 0x0018)
60#define AR9170_PHY_CHIP_ID_REV_0 0x80
61#define AR9170_PHY_CHIP_ID_REV_1 0x81
62#define AR9170_PHY_CHIP_ID_9160_REV_0 0xb0
63
64#define AR9170_PHY_REG_ACTIVE (AR9170_PHY_REG_BASE + 0x001c)
65#define AR9170_PHY_ACTIVE_EN 0x00000001
66#define AR9170_PHY_ACTIVE_DIS 0x00000000
67
68#define AR9170_PHY_REG_RF_CTL2 (AR9170_PHY_REG_BASE + 0x0024)
69#define AR9170_PHY_RF_CTL2_TX_END_DATA_START 0x000000ff
70#define AR9170_PHY_RF_CTL2_TX_END_DATA_START_S 0
71#define AR9170_PHY_RF_CTL2_TX_END_PA_ON 0x0000ff00
72#define AR9170_PHY_RF_CTL2_TX_END_PA_ON_S 8
73
74#define AR9170_PHY_REG_RF_CTL3 (AR9170_PHY_REG_BASE + 0x0028)
75#define AR9170_PHY_RF_CTL3_TX_END_TO_A2_RX_ON 0x00ff0000
76#define AR9170_PHY_RF_CTL3_TX_END_TO_A2_RX_ON_S 16
77
78#define AR9170_PHY_REG_ADC_CTL (AR9170_PHY_REG_BASE + 0x002c)
79#define AR9170_PHY_ADC_CTL_OFF_INBUFGAIN 0x00000003
80#define AR9170_PHY_ADC_CTL_OFF_INBUFGAIN_S 0
81#define AR9170_PHY_ADC_CTL_OFF_PWDDAC 0x00002000
82#define AR9170_PHY_ADC_CTL_OFF_PWDBANDGAP 0x00004000
83#define AR9170_PHY_ADC_CTL_OFF_PWDADC 0x00008000
84#define AR9170_PHY_ADC_CTL_ON_INBUFGAIN 0x00030000
85#define AR9170_PHY_ADC_CTL_ON_INBUFGAIN_S 16
86
87#define AR9170_PHY_REG_ADC_SERIAL_CTL (AR9170_PHY_REG_BASE + 0x0030)
88#define AR9170_PHY_ADC_SCTL_SEL_INTERNAL_ADDAC 0x00000000
89#define AR9170_PHY_ADC_SCTL_SEL_EXTERNAL_RADIO 0x00000001
90
91#define AR9170_PHY_REG_RF_CTL4 (AR9170_PHY_REG_BASE + 0x0034)
92#define AR9170_PHY_RF_CTL4_TX_END_XPAB_OFF 0xff000000
93#define AR9170_PHY_RF_CTL4_TX_END_XPAB_OFF_S 24
94#define AR9170_PHY_RF_CTL4_TX_END_XPAA_OFF 0x00ff0000
95#define AR9170_PHY_RF_CTL4_TX_END_XPAA_OFF_S 16
96#define AR9170_PHY_RF_CTL4_FRAME_XPAB_ON 0x0000ff00
97#define AR9170_PHY_RF_CTL4_FRAME_XPAB_ON_S 8
98#define AR9170_PHY_RF_CTL4_FRAME_XPAA_ON 0x000000ff
99#define AR9170_PHY_RF_CTL4_FRAME_XPAA_ON_S 0
100
101#define AR9170_PHY_REG_TSTDAC_CONST (AR9170_PHY_REG_BASE + 0x003c)
102
103#define AR9170_PHY_REG_SETTLING (AR9170_PHY_REG_BASE + 0x0044)
104#define AR9170_PHY_SETTLING_SWITCH 0x00003f80
105#define AR9170_PHY_SETTLING_SWITCH_S 7
106
107#define AR9170_PHY_REG_RXGAIN (AR9170_PHY_REG_BASE + 0x0048)
108#define AR9170_PHY_REG_RXGAIN_CHAIN_2 (AR9170_PHY_REG_BASE + 0x2048)
109#define AR9170_PHY_RXGAIN_TXRX_ATTEN 0x0003f000
110#define AR9170_PHY_RXGAIN_TXRX_ATTEN_S 12
111#define AR9170_PHY_RXGAIN_TXRX_RF_MAX 0x007c0000
112#define AR9170_PHY_RXGAIN_TXRX_RF_MAX_S 18
113
114#define AR9170_PHY_REG_DESIRED_SZ (AR9170_PHY_REG_BASE + 0x0050)
115#define AR9170_PHY_DESIRED_SZ_ADC 0x000000ff
116#define AR9170_PHY_DESIRED_SZ_ADC_S 0
117#define AR9170_PHY_DESIRED_SZ_PGA 0x0000ff00
118#define AR9170_PHY_DESIRED_SZ_PGA_S 8
119#define AR9170_PHY_DESIRED_SZ_TOT_DES 0x0ff00000
120#define AR9170_PHY_DESIRED_SZ_TOT_DES_S 20
121
122#define AR9170_PHY_REG_FIND_SIG (AR9170_PHY_REG_BASE + 0x0058)
123#define AR9170_PHY_FIND_SIG_FIRSTEP 0x0003f000
124#define AR9170_PHY_FIND_SIG_FIRSTEP_S 12
125#define AR9170_PHY_FIND_SIG_FIRPWR 0x03fc0000
126#define AR9170_PHY_FIND_SIG_FIRPWR_S 18
127
128#define AR9170_PHY_REG_AGC_CTL1 (AR9170_PHY_REG_BASE + 0x005c)
129#define AR9170_PHY_AGC_CTL1_COARSE_LOW 0x00007f80
130#define AR9170_PHY_AGC_CTL1_COARSE_LOW_S 7
131#define AR9170_PHY_AGC_CTL1_COARSE_HIGH 0x003f8000
132#define AR9170_PHY_AGC_CTL1_COARSE_HIGH_S 15
133
134#define AR9170_PHY_REG_AGC_CONTROL (AR9170_PHY_REG_BASE + 0x0060)
135#define AR9170_PHY_AGC_CONTROL_CAL 0x00000001
136#define AR9170_PHY_AGC_CONTROL_NF 0x00000002
137#define AR9170_PHY_AGC_CONTROL_ENABLE_NF 0x00008000
138#define AR9170_PHY_AGC_CONTROL_FLTR_CAL 0x00010000
139#define AR9170_PHY_AGC_CONTROL_NO_UPDATE_NF 0x00020000
140
141#define AR9170_PHY_REG_CCA (AR9170_PHY_REG_BASE + 0x0064)
142#define AR9170_PHY_CCA_MINCCA_PWR 0x0ff80000
143#define AR9170_PHY_CCA_MINCCA_PWR_S 19
144#define AR9170_PHY_CCA_THRESH62 0x0007f000
145#define AR9170_PHY_CCA_THRESH62_S 12
146
147#define AR9170_PHY_REG_SFCORR (AR9170_PHY_REG_BASE + 0x0068)
148#define AR9170_PHY_SFCORR_M2COUNT_THR 0x0000001f
149#define AR9170_PHY_SFCORR_M2COUNT_THR_S 0
150#define AR9170_PHY_SFCORR_M1_THRESH 0x00fe0000
151#define AR9170_PHY_SFCORR_M1_THRESH_S 17
152#define AR9170_PHY_SFCORR_M2_THRESH 0x7f000000
153#define AR9170_PHY_SFCORR_M2_THRESH_S 24
154
155#define AR9170_PHY_REG_SFCORR_LOW (AR9170_PHY_REG_BASE + 0x006c)
156#define AR9170_PHY_SFCORR_LOW_USE_SELF_CORR_LOW 0x00000001
157#define AR9170_PHY_SFCORR_LOW_M2COUNT_THR_LOW 0x00003f00
158#define AR9170_PHY_SFCORR_LOW_M2COUNT_THR_LOW_S 8
159#define AR9170_PHY_SFCORR_LOW_M1_THRESH_LOW 0x001fc000
160#define AR9170_PHY_SFCORR_LOW_M1_THRESH_LOW_S 14
161#define AR9170_PHY_SFCORR_LOW_M2_THRESH_LOW 0x0fe00000
162#define AR9170_PHY_SFCORR_LOW_M2_THRESH_LOW_S 21
163
164#define AR9170_PHY_REG_SLEEP_CTR_CONTROL (AR9170_PHY_REG_BASE + 0x0070)
165#define AR9170_PHY_REG_SLEEP_CTR_LIMIT (AR9170_PHY_REG_BASE + 0x0074)
166#define AR9170_PHY_REG_SLEEP_SCAL (AR9170_PHY_REG_BASE + 0x0078)
167
168#define AR9170_PHY_REG_PLL_CTL (AR9170_PHY_REG_BASE + 0x007c)
169#define AR9170_PHY_PLL_CTL_40 0xaa
170#define AR9170_PHY_PLL_CTL_40_5413 0x04
171#define AR9170_PHY_PLL_CTL_44 0xab
172#define AR9170_PHY_PLL_CTL_44_2133 0xeb
173#define AR9170_PHY_PLL_CTL_40_2133 0xea
174
175#define AR9170_PHY_REG_BIN_MASK_1 (AR9170_PHY_REG_BASE + 0x0100)
176#define AR9170_PHY_REG_BIN_MASK_2 (AR9170_PHY_REG_BASE + 0x0104)
177#define AR9170_PHY_REG_BIN_MASK_3 (AR9170_PHY_REG_BASE + 0x0108)
178#define AR9170_PHY_REG_MASK_CTL (AR9170_PHY_REG_BASE + 0x010c)
179
180/* analogue power on time (100ns) */
181#define AR9170_PHY_REG_RX_DELAY (AR9170_PHY_REG_BASE + 0x0114)
182#define AR9170_PHY_REG_SEARCH_START_DELAY (AR9170_PHY_REG_BASE + 0x0118)
183#define AR9170_PHY_RX_DELAY_DELAY 0x00003fff
184
185#define AR9170_PHY_REG_TIMING_CTRL4(_i) (AR9170_PHY_REG_BASE + \
186 (0x0120 + ((_i) << 12)))
187#define AR9170_PHY_TIMING_CTRL4_IQCORR_Q_Q_COFF 0x01f
188#define AR9170_PHY_TIMING_CTRL4_IQCORR_Q_Q_COFF_S 0
189#define AR9170_PHY_TIMING_CTRL4_IQCORR_Q_I_COFF 0x7e0
190#define AR9170_PHY_TIMING_CTRL4_IQCORR_Q_I_COFF_S 5
191#define AR9170_PHY_TIMING_CTRL4_IQCORR_ENABLE 0x800
192#define AR9170_PHY_TIMING_CTRL4_IQCAL_LOG_COUNT_MAX 0xf000
193#define AR9170_PHY_TIMING_CTRL4_IQCAL_LOG_COUNT_MAX_S 12
194#define AR9170_PHY_TIMING_CTRL4_DO_IQCAL 0x10000
195#define AR9170_PHY_TIMING_CTRL4_ENABLE_SPUR_RSSI 0x80000000
196#define AR9170_PHY_TIMING_CTRL4_ENABLE_SPUR_FILTER 0x40000000
197#define AR9170_PHY_TIMING_CTRL4_ENABLE_CHAN_MASK 0x20000000
198#define AR9170_PHY_TIMING_CTRL4_ENABLE_PILOT_MASK 0x10000000
199
200#define AR9170_PHY_REG_TIMING5 (AR9170_PHY_REG_BASE + 0x0124)
201#define AR9170_PHY_TIMING5_CYCPWR_THR1 0x000000fe
202#define AR9170_PHY_TIMING5_CYCPWR_THR1_S 1
203
204#define AR9170_PHY_REG_POWER_TX_RATE1 (AR9170_PHY_REG_BASE + 0x0134)
205#define AR9170_PHY_REG_POWER_TX_RATE2 (AR9170_PHY_REG_BASE + 0x0138)
206#define AR9170_PHY_REG_POWER_TX_RATE_MAX (AR9170_PHY_REG_BASE + 0x013c)
207#define AR9170_PHY_POWER_TX_RATE_MAX_TPC_ENABLE 0x00000040
208
209#define AR9170_PHY_REG_FRAME_CTL (AR9170_PHY_REG_BASE + 0x0144)
210#define AR9170_PHY_FRAME_CTL_TX_CLIP 0x00000038
211#define AR9170_PHY_FRAME_CTL_TX_CLIP_S 3
212
213#define AR9170_PHY_REG_SPUR_REG (AR9170_PHY_REG_BASE + 0x014c)
214#define AR9170_PHY_SPUR_REG_MASK_RATE_CNTL (0xff << 18)
215#define AR9170_PHY_SPUR_REG_MASK_RATE_CNTL_S 18
216#define AR9170_PHY_SPUR_REG_ENABLE_MASK_PPM 0x20000
217#define AR9170_PHY_SPUR_REG_MASK_RATE_SELECT (0xff << 9)
218#define AR9170_PHY_SPUR_REG_MASK_RATE_SELECT_S 9
219#define AR9170_PHY_SPUR_REG_ENABLE_VIT_SPUR_RSSI 0x100
220#define AR9170_PHY_SPUR_REG_SPUR_RSSI_THRESH 0x7f
221#define AR9170_PHY_SPUR_REG_SPUR_RSSI_THRESH_S 0
222
223#define AR9170_PHY_REG_RADAR_EXT (AR9170_PHY_REG_BASE + 0x0140)
224#define AR9170_PHY_RADAR_EXT_ENA 0x00004000
225
226#define AR9170_PHY_REG_RADAR_0 (AR9170_PHY_REG_BASE + 0x0154)
227#define AR9170_PHY_RADAR_0_ENA 0x00000001
228#define AR9170_PHY_RADAR_0_FFT_ENA 0x80000000
229/* inband pulse threshold */
230#define AR9170_PHY_RADAR_0_INBAND 0x0000003e
231#define AR9170_PHY_RADAR_0_INBAND_S 1
232/* pulse RSSI threshold */
233#define AR9170_PHY_RADAR_0_PRSSI 0x00000fc0
234#define AR9170_PHY_RADAR_0_PRSSI_S 6
235/* pulse height threshold */
236#define AR9170_PHY_RADAR_0_HEIGHT 0x0003f000
237#define AR9170_PHY_RADAR_0_HEIGHT_S 12
238/* radar RSSI threshold */
239#define AR9170_PHY_RADAR_0_RRSSI 0x00fc0000
240#define AR9170_PHY_RADAR_0_RRSSI_S 18
241/* radar firepower threshold */
242#define AR9170_PHY_RADAR_0_FIRPWR 0x7f000000
243#define AR9170_PHY_RADAR_0_FIRPWR_S 24
244
245#define AR9170_PHY_REG_RADAR_1 (AR9170_PHY_REG_BASE + 0x0158)
246#define AR9170_PHY_RADAR_1_RELPWR_ENA 0x00800000
247#define AR9170_PHY_RADAR_1_USE_FIR128 0x00400000
248#define AR9170_PHY_RADAR_1_RELPWR_THRESH 0x003f0000
249#define AR9170_PHY_RADAR_1_RELPWR_THRESH_S 16
250#define AR9170_PHY_RADAR_1_BLOCK_CHECK 0x00008000
251#define AR9170_PHY_RADAR_1_MAX_RRSSI 0x00004000
252#define AR9170_PHY_RADAR_1_RELSTEP_CHECK 0x00002000
253#define AR9170_PHY_RADAR_1_RELSTEP_THRESH 0x00001f00
254#define AR9170_PHY_RADAR_1_RELSTEP_THRESH_S 8
255#define AR9170_PHY_RADAR_1_MAXLEN 0x000000ff
256#define AR9170_PHY_RADAR_1_MAXLEN_S 0
257
258#define AR9170_PHY_REG_SWITCH_CHAIN_0 (AR9170_PHY_REG_BASE + 0x0160)
259#define AR9170_PHY_REG_SWITCH_CHAIN_2 (AR9170_PHY_REG_BASE + 0x2160)
260
261#define AR9170_PHY_REG_SWITCH_COM (AR9170_PHY_REG_BASE + 0x0164)
262
263#define AR9170_PHY_REG_CCA_THRESHOLD (AR9170_PHY_REG_BASE + 0x0168)
264
265#define AR9170_PHY_REG_SIGMA_DELTA (AR9170_PHY_REG_BASE + 0x016c)
266#define AR9170_PHY_SIGMA_DELTA_ADC_SEL 0x00000003
267#define AR9170_PHY_SIGMA_DELTA_ADC_SEL_S 0
268#define AR9170_PHY_SIGMA_DELTA_FILT2 0x000000f8
269#define AR9170_PHY_SIGMA_DELTA_FILT2_S 3
270#define AR9170_PHY_SIGMA_DELTA_FILT1 0x00001f00
271#define AR9170_PHY_SIGMA_DELTA_FILT1_S 8
272#define AR9170_PHY_SIGMA_DELTA_ADC_CLIP 0x01ffe000
273#define AR9170_PHY_SIGMA_DELTA_ADC_CLIP_S 13
274
275#define AR9170_PHY_REG_RESTART (AR9170_PHY_REG_BASE + 0x0170)
276#define AR9170_PHY_RESTART_DIV_GC 0x001c0000
277#define AR9170_PHY_RESTART_DIV_GC_S 18
278
279#define AR9170_PHY_REG_RFBUS_REQ (AR9170_PHY_REG_BASE + 0x017c)
280#define AR9170_PHY_RFBUS_REQ_EN 0x00000001
281
282#define AR9170_PHY_REG_TIMING7 (AR9170_PHY_REG_BASE + 0x0180)
283#define AR9170_PHY_REG_TIMING8 (AR9170_PHY_REG_BASE + 0x0184)
284#define AR9170_PHY_TIMING8_PILOT_MASK_2 0x000fffff
285#define AR9170_PHY_TIMING8_PILOT_MASK_2_S 0
286
287#define AR9170_PHY_REG_BIN_MASK2_1 (AR9170_PHY_REG_BASE + 0x0188)
288#define AR9170_PHY_REG_BIN_MASK2_2 (AR9170_PHY_REG_BASE + 0x018c)
289#define AR9170_PHY_REG_BIN_MASK2_3 (AR9170_PHY_REG_BASE + 0x0190)
290#define AR9170_PHY_REG_BIN_MASK2_4 (AR9170_PHY_REG_BASE + 0x0194)
291#define AR9170_PHY_BIN_MASK2_4_MASK_4 0x00003fff
292#define AR9170_PHY_BIN_MASK2_4_MASK_4_S 0
293
294#define AR9170_PHY_REG_TIMING9 (AR9170_PHY_REG_BASE + 0x0198)
295#define AR9170_PHY_REG_TIMING10 (AR9170_PHY_REG_BASE + 0x019c)
296#define AR9170_PHY_TIMING10_PILOT_MASK_2 0x000fffff
297#define AR9170_PHY_TIMING10_PILOT_MASK_2_S 0
298
299#define AR9170_PHY_REG_TIMING11 (AR9170_PHY_REG_BASE + 0x01a0)
300#define AR9170_PHY_TIMING11_SPUR_DELTA_PHASE 0x000fffff
301#define AR9170_PHY_TIMING11_SPUR_DELTA_PHASE_S 0
302#define AR9170_PHY_TIMING11_SPUR_FREQ_SD 0x3ff00000
303#define AR9170_PHY_TIMING11_SPUR_FREQ_SD_S 20
304#define AR9170_PHY_TIMING11_USE_SPUR_IN_AGC 0x40000000
305#define AR9170_PHY_TIMING11_USE_SPUR_IN_SELFCOR 0x80000000
306
307#define AR9170_PHY_REG_RX_CHAINMASK (AR9170_PHY_REG_BASE + 0x01a4)
308#define AR9170_PHY_REG_NEW_ADC_DC_GAIN_CORR(_i) (AR9170_PHY_REG_BASE + \
309 0x01b4 + ((_i) << 12))
310#define AR9170_PHY_NEW_ADC_GAIN_CORR_ENABLE 0x40000000
311#define AR9170_PHY_NEW_ADC_DC_OFFSET_CORR_ENABLE 0x80000000
312
313#define AR9170_PHY_REG_MULTICHAIN_GAIN_CTL (AR9170_PHY_REG_BASE + 0x01ac)
314#define AR9170_PHY_9285_ANT_DIV_CTL_ALL 0x7f000000
315#define AR9170_PHY_9285_ANT_DIV_CTL 0x01000000
316#define AR9170_PHY_9285_ANT_DIV_CTL_S 24
317#define AR9170_PHY_9285_ANT_DIV_ALT_LNACONF 0x06000000
318#define AR9170_PHY_9285_ANT_DIV_ALT_LNACONF_S 25
319#define AR9170_PHY_9285_ANT_DIV_MAIN_LNACONF 0x18000000
320#define AR9170_PHY_9285_ANT_DIV_MAIN_LNACONF_S 27
321#define AR9170_PHY_9285_ANT_DIV_ALT_GAINTB 0x20000000
322#define AR9170_PHY_9285_ANT_DIV_ALT_GAINTB_S 29
323#define AR9170_PHY_9285_ANT_DIV_MAIN_GAINTB 0x40000000
324#define AR9170_PHY_9285_ANT_DIV_MAIN_GAINTB_S 30
325#define AR9170_PHY_9285_ANT_DIV_LNA1 2
326#define AR9170_PHY_9285_ANT_DIV_LNA2 1
327#define AR9170_PHY_9285_ANT_DIV_LNA1_PLUS_LNA2 3
328#define AR9170_PHY_9285_ANT_DIV_LNA1_MINUS_LNA2 0
329#define AR9170_PHY_9285_ANT_DIV_GAINTB_0 0
330#define AR9170_PHY_9285_ANT_DIV_GAINTB_1 1
331
332#define AR9170_PHY_REG_EXT_CCA0 (AR9170_PHY_REG_BASE + 0x01b8)
333#define AR9170_PHY_REG_EXT_CCA0_THRESH62 0x000000ff
334#define AR9170_PHY_REG_EXT_CCA0_THRESH62_S 0
335
336#define AR9170_PHY_REG_EXT_CCA (AR9170_PHY_REG_BASE + 0x01bc)
337#define AR9170_PHY_EXT_CCA_CYCPWR_THR1 0x0000fe00
338#define AR9170_PHY_EXT_CCA_CYCPWR_THR1_S 9
339#define AR9170_PHY_EXT_CCA_THRESH62 0x007f0000
340#define AR9170_PHY_EXT_CCA_THRESH62_S 16
341#define AR9170_PHY_EXT_MINCCA_PWR 0xff800000
342#define AR9170_PHY_EXT_MINCCA_PWR_S 23
343
344#define AR9170_PHY_REG_SFCORR_EXT (AR9170_PHY_REG_BASE + 0x01c0)
345#define AR9170_PHY_SFCORR_EXT_M1_THRESH 0x0000007f
346#define AR9170_PHY_SFCORR_EXT_M1_THRESH_S 0
347#define AR9170_PHY_SFCORR_EXT_M2_THRESH 0x00003f80
348#define AR9170_PHY_SFCORR_EXT_M2_THRESH_S 7
349#define AR9170_PHY_SFCORR_EXT_M1_THRESH_LOW 0x001fc000
350#define AR9170_PHY_SFCORR_EXT_M1_THRESH_LOW_S 14
351#define AR9170_PHY_SFCORR_EXT_M2_THRESH_LOW 0x0fe00000
352#define AR9170_PHY_SFCORR_EXT_M2_THRESH_LOW_S 21
353#define AR9170_PHY_SFCORR_SPUR_SUBCHNL_SD_S 28
354
355#define AR9170_PHY_REG_HALFGI (AR9170_PHY_REG_BASE + 0x01d0)
356#define AR9170_PHY_HALFGI_DSC_MAN 0x0007fff0
357#define AR9170_PHY_HALFGI_DSC_MAN_S 4
358#define AR9170_PHY_HALFGI_DSC_EXP 0x0000000f
359#define AR9170_PHY_HALFGI_DSC_EXP_S 0
360
361#define AR9170_PHY_REG_CHANNEL_MASK_01_30 (AR9170_PHY_REG_BASE + 0x01d4)
362#define AR9170_PHY_REG_CHANNEL_MASK_31_60 (AR9170_PHY_REG_BASE + 0x01d8)
363
364#define AR9170_PHY_REG_CHAN_INFO_MEMORY (AR9170_PHY_REG_BASE + 0x01dc)
365#define AR9170_PHY_CHAN_INFO_MEMORY_CAPTURE_MASK 0x0001
366
367#define AR9170_PHY_REG_HEAVY_CLIP_ENABLE (AR9170_PHY_REG_BASE + 0x01e0)
368#define AR9170_PHY_REG_HEAVY_CLIP_FACTOR_RIFS (AR9170_PHY_REG_BASE + 0x01ec)
369#define AR9170_PHY_RIFS_INIT_DELAY 0x03ff0000
370
371#define AR9170_PHY_REG_CALMODE (AR9170_PHY_REG_BASE + 0x01f0)
372#define AR9170_PHY_CALMODE_IQ 0x00000000
373#define AR9170_PHY_CALMODE_ADC_GAIN 0x00000001
374#define AR9170_PHY_CALMODE_ADC_DC_PER 0x00000002
375#define AR9170_PHY_CALMODE_ADC_DC_INIT 0x00000003
376
377#define AR9170_PHY_REG_REFCLKDLY (AR9170_PHY_REG_BASE + 0x01f4)
378#define AR9170_PHY_REG_REFCLKPD (AR9170_PHY_REG_BASE + 0x01f8)
379
380
381#define AR9170_PHY_REG_CAL_MEAS_0(_i) (AR9170_PHY_REG_BASE + \
382 0x0410 + ((_i) << 12))
383#define AR9170_PHY_REG_CAL_MEAS_1(_i) (AR9170_PHY_REG_BASE + \
384 0x0414 \ + ((_i) << 12))
385#define AR9170_PHY_REG_CAL_MEAS_2(_i) (AR9170_PHY_REG_BASE + \
386 0x0418 + ((_i) << 12))
387#define AR9170_PHY_REG_CAL_MEAS_3(_i) (AR9170_PHY_REG_BASE + \
388 0x041c + ((_i) << 12))
389
390#define AR9170_PHY_REG_CURRENT_RSSI (AR9170_PHY_REG_BASE + 0x041c)
391
392#define AR9170_PHY_REG_RFBUS_GRANT (AR9170_PHY_REG_BASE + 0x0420)
393#define AR9170_PHY_RFBUS_GRANT_EN 0x00000001
394
395#define AR9170_PHY_REG_CHAN_INFO_GAIN_DIFF (AR9170_PHY_REG_BASE + 0x04f4)
396#define AR9170_PHY_CHAN_INFO_GAIN_DIFF_UPPER_LIMIT 320
397
398#define AR9170_PHY_REG_CHAN_INFO_GAIN (AR9170_PHY_REG_BASE + 0x04fc)
399
400#define AR9170_PHY_REG_MODE (AR9170_PHY_REG_BASE + 0x0a00)
401#define AR9170_PHY_MODE_ASYNCFIFO 0x80
402#define AR9170_PHY_MODE_AR2133 0x08
403#define AR9170_PHY_MODE_AR5111 0x00
404#define AR9170_PHY_MODE_AR5112 0x08
405#define AR9170_PHY_MODE_DYNAMIC 0x04
406#define AR9170_PHY_MODE_RF2GHZ 0x02
407#define AR9170_PHY_MODE_RF5GHZ 0x00
408#define AR9170_PHY_MODE_CCK 0x01
409#define AR9170_PHY_MODE_OFDM 0x00
410#define AR9170_PHY_MODE_DYN_CCK_DISABLE 0x100
411
412#define AR9170_PHY_REG_CCK_TX_CTRL (AR9170_PHY_REG_BASE + 0x0a04)
413#define AR9170_PHY_CCK_TX_CTRL_JAPAN 0x00000010
414#define AR9170_PHY_CCK_TX_CTRL_TX_DAC_SCALE_CCK 0x0000000c
415#define AR9170_PHY_CCK_TX_CTRL_TX_DAC_SCALE_CCK_S 2
416
417#define AR9170_PHY_REG_CCK_DETECT (AR9170_PHY_REG_BASE + 0x0a08)
418#define AR9170_PHY_CCK_DETECT_WEAK_SIG_THR_CCK 0x0000003f
419#define AR9170_PHY_CCK_DETECT_WEAK_SIG_THR_CCK_S 0
420/* [12:6] settling time for antenna switch */
421#define AR9170_PHY_CCK_DETECT_ANT_SWITCH_TIME 0x00001fc0
422#define AR9170_PHY_CCK_DETECT_ANT_SWITCH_TIME_S 6
423#define AR9170_PHY_CCK_DETECT_BB_ENABLE_ANT_FAST_DIV 0x2000
424#define AR9170_PHY_CCK_DETECT_BB_ENABLE_ANT_FAST_DIV_S 13
425
426#define AR9170_PHY_REG_GAIN_2GHZ (AR9170_PHY_REG_BASE + 0x0a0c)
427#define AR9170_PHY_REG_GAIN_2GHZ_CHAIN_2 (AR9170_PHY_REG_BASE + 0x2a0c)
428#define AR9170_PHY_GAIN_2GHZ_RXTX_MARGIN 0x00fc0000
429#define AR9170_PHY_GAIN_2GHZ_RXTX_MARGIN_S 18
430#define AR9170_PHY_GAIN_2GHZ_BSW_MARGIN 0x00003c00
431#define AR9170_PHY_GAIN_2GHZ_BSW_MARGIN_S 10
432#define AR9170_PHY_GAIN_2GHZ_BSW_ATTEN 0x0000001f
433#define AR9170_PHY_GAIN_2GHZ_BSW_ATTEN_S 0
434#define AR9170_PHY_GAIN_2GHZ_XATTEN2_MARGIN 0x003e0000
435#define AR9170_PHY_GAIN_2GHZ_XATTEN2_MARGIN_S 17
436#define AR9170_PHY_GAIN_2GHZ_XATTEN1_MARGIN 0x0001f000
437#define AR9170_PHY_GAIN_2GHZ_XATTEN1_MARGIN_S 12
438#define AR9170_PHY_GAIN_2GHZ_XATTEN2_DB 0x00000fc0
439#define AR9170_PHY_GAIN_2GHZ_XATTEN2_DB_S 6
440#define AR9170_PHY_GAIN_2GHZ_XATTEN1_DB 0x0000003f
441#define AR9170_PHY_GAIN_2GHZ_XATTEN1_DB_S 0
442
443#define AR9170_PHY_REG_CCK_RXCTRL4 (AR9170_PHY_REG_BASE + 0x0a1c)
444#define AR9170_PHY_CCK_RXCTRL4_FREQ_EST_SHORT 0x01f80000
445#define AR9170_PHY_CCK_RXCTRL4_FREQ_EST_SHORT_S 19
446
447#define AR9170_PHY_REG_DAG_CTRLCCK (AR9170_PHY_REG_BASE + 0x0a28)
448#define AR9170_REG_DAG_CTRLCCK_EN_RSSI_THR 0x00000200
449#define AR9170_REG_DAG_CTRLCCK_RSSI_THR 0x0001fc00
450#define AR9170_REG_DAG_CTRLCCK_RSSI_THR_S 10
451
452#define AR9170_PHY_REG_FORCE_CLKEN_CCK (AR9170_PHY_REG_BASE + 0x0a2c)
453#define AR9170_FORCE_CLKEN_CCK_MRC_MUX 0x00000040
454
455#define AR9170_PHY_REG_POWER_TX_RATE3 (AR9170_PHY_REG_BASE + 0x0a34)
456#define AR9170_PHY_REG_POWER_TX_RATE4 (AR9170_PHY_REG_BASE + 0x0a38)
457
458#define AR9170_PHY_REG_SCRM_SEQ_XR (AR9170_PHY_REG_BASE + 0x0a3c)
459#define AR9170_PHY_REG_HEADER_DETECT_XR (AR9170_PHY_REG_BASE + 0x0a40)
460#define AR9170_PHY_REG_CHIRP_DETECTED_XR (AR9170_PHY_REG_BASE + 0x0a44)
461#define AR9170_PHY_REG_BLUETOOTH (AR9170_PHY_REG_BASE + 0x0a54)
462
463#define AR9170_PHY_REG_TPCRG1 (AR9170_PHY_REG_BASE + 0x0a58)
464#define AR9170_PHY_TPCRG1_NUM_PD_GAIN 0x0000c000
465#define AR9170_PHY_TPCRG1_NUM_PD_GAIN_S 14
466#define AR9170_PHY_TPCRG1_PD_GAIN_1 0x00030000
467#define AR9170_PHY_TPCRG1_PD_GAIN_1_S 16
468#define AR9170_PHY_TPCRG1_PD_GAIN_2 0x000c0000
469#define AR9170_PHY_TPCRG1_PD_GAIN_2_S 18
470#define AR9170_PHY_TPCRG1_PD_GAIN_3 0x00300000
471#define AR9170_PHY_TPCRG1_PD_GAIN_3_S 20
472#define AR9170_PHY_TPCRG1_PD_CAL_ENABLE 0x00400000
473#define AR9170_PHY_TPCRG1_PD_CAL_ENABLE_S 22
474
475#define AR9170_PHY_REG_TX_PWRCTRL4 (AR9170_PHY_REG_BASE + 0x0a64)
476#define AR9170_PHY_TX_PWRCTRL_PD_AVG_VALID 0x00000001
477#define AR9170_PHY_TX_PWRCTRL_PD_AVG_VALID_S 0
478#define AR9170_PHY_TX_PWRCTRL_PD_AVG_OUT 0x000001fe
479#define AR9170_PHY_TX_PWRCTRL_PD_AVG_OUT_S 1
480
481#define AR9170_PHY_REG_ANALOG_SWAP (AR9170_PHY_REG_BASE + 0x0a68)
482#define AR9170_PHY_ANALOG_SWAP_AB 0x0001
483#define AR9170_PHY_ANALOG_SWAP_ALT_CHAIN 0x00000040
484
485#define AR9170_PHY_REG_TPCRG5 (AR9170_PHY_REG_BASE + 0x0a6c)
486#define AR9170_PHY_TPCRG5_PD_GAIN_OVERLAP 0x0000000f
487#define AR9170_PHY_TPCRG5_PD_GAIN_OVERLAP_S 0
488#define AR9170_PHY_TPCRG5_PD_GAIN_BOUNDARY_1 0x000003f0
489#define AR9170_PHY_TPCRG5_PD_GAIN_BOUNDARY_1_S 4
490#define AR9170_PHY_TPCRG5_PD_GAIN_BOUNDARY_2 0x0000fc00
491#define AR9170_PHY_TPCRG5_PD_GAIN_BOUNDARY_2_S 10
492#define AR9170_PHY_TPCRG5_PD_GAIN_BOUNDARY_3 0x003f0000
493#define AR9170_PHY_TPCRG5_PD_GAIN_BOUNDARY_3_S 16
494#define AR9170_PHY_TPCRG5_PD_GAIN_BOUNDARY_4 0x0fc00000
495#define AR9170_PHY_TPCRG5_PD_GAIN_BOUNDARY_4_S 22
496
497#define AR9170_PHY_REG_TX_PWRCTRL6_0 (AR9170_PHY_REG_BASE + 0x0a70)
498#define AR9170_PHY_REG_TX_PWRCTRL6_1 (AR9170_PHY_REG_BASE + 0x1a70)
499#define AR9170_PHY_TX_PWRCTRL_ERR_EST_MODE 0x03000000
500#define AR9170_PHY_TX_PWRCTRL_ERR_EST_MODE_S 24
501
502#define AR9170_PHY_REG_TX_PWRCTRL7 (AR9170_PHY_REG_BASE + 0x0a74)
503#define AR9170_PHY_TX_PWRCTRL_INIT_TX_GAIN 0x01f80000
504#define AR9170_PHY_TX_PWRCTRL_INIT_TX_GAIN_S 19
505
506#define AR9170_PHY_REG_TX_PWRCTRL9 (AR9170_PHY_REG_BASE + 0x0a7c)
507#define AR9170_PHY_TX_DESIRED_SCALE_CCK 0x00007c00
508#define AR9170_PHY_TX_DESIRED_SCALE_CCK_S 10
509#define AR9170_PHY_TX_PWRCTRL9_RES_DC_REMOVAL 0x80000000
510#define AR9170_PHY_TX_PWRCTRL9_RES_DC_REMOVAL_S 31
511
512#define AR9170_PHY_REG_TX_GAIN_TBL1 (AR9170_PHY_REG_BASE + 0x0b00)
513#define AR9170_PHY_TX_GAIN 0x0007f000
514#define AR9170_PHY_TX_GAIN_S 12
515
516/* Carrier leak calibration control, do it after AGC calibration */
517#define AR9170_PHY_REG_CL_CAL_CTL (AR9170_PHY_REG_BASE + 0x0b58)
518#define AR9170_PHY_CL_CAL_ENABLE 0x00000002
519#define AR9170_PHY_CL_CAL_PARALLEL_CAL_ENABLE 0x00000001
520
521#define AR9170_PHY_REG_POWER_TX_RATE5 (AR9170_PHY_REG_BASE + 0x0b8c)
522#define AR9170_PHY_REG_POWER_TX_RATE6 (AR9170_PHY_REG_BASE + 0x0b90)
523
524#define AR9170_PHY_REG_CH0_TX_PWRCTRL11 (AR9170_PHY_REG_BASE + 0x0b98)
525#define AR9170_PHY_REG_CH1_TX_PWRCTRL11 (AR9170_PHY_REG_BASE + 0x1b98)
526#define AR9170_PHY_TX_CHX_PWRCTRL_OLPC_TEMP_COMP 0x0000fc00
527#define AR9170_PHY_TX_CHX_PWRCTRL_OLPC_TEMP_COMP_S 10
528
529#define AR9170_PHY_REG_CAL_CHAINMASK (AR9170_PHY_REG_BASE + 0x0b9c)
530#define AR9170_PHY_REG_VIT_MASK2_M_46_61 (AR9170_PHY_REG_BASE + 0x0ba0)
531#define AR9170_PHY_REG_MASK2_M_31_45 (AR9170_PHY_REG_BASE + 0x0ba4)
532#define AR9170_PHY_REG_MASK2_M_16_30 (AR9170_PHY_REG_BASE + 0x0ba8)
533#define AR9170_PHY_REG_MASK2_M_00_15 (AR9170_PHY_REG_BASE + 0x0bac)
534#define AR9170_PHY_REG_PILOT_MASK_01_30 (AR9170_PHY_REG_BASE + 0x0bb0)
535#define AR9170_PHY_REG_PILOT_MASK_31_60 (AR9170_PHY_REG_BASE + 0x0bb4)
536#define AR9170_PHY_REG_MASK2_P_15_01 (AR9170_PHY_REG_BASE + 0x0bb8)
537#define AR9170_PHY_REG_MASK2_P_30_16 (AR9170_PHY_REG_BASE + 0x0bbc)
538#define AR9170_PHY_REG_MASK2_P_45_31 (AR9170_PHY_REG_BASE + 0x0bc0)
539#define AR9170_PHY_REG_MASK2_P_61_45 (AR9170_PHY_REG_BASE + 0x0bc4)
540#define AR9170_PHY_REG_POWER_TX_SUB (AR9170_PHY_REG_BASE + 0x0bc8)
541#define AR9170_PHY_REG_POWER_TX_RATE7 (AR9170_PHY_REG_BASE + 0x0bcc)
542#define AR9170_PHY_REG_POWER_TX_RATE8 (AR9170_PHY_REG_BASE + 0x0bd0)
543#define AR9170_PHY_REG_POWER_TX_RATE9 (AR9170_PHY_REG_BASE + 0x0bd4)
544#define AR9170_PHY_REG_XPA_CFG (AR9170_PHY_REG_BASE + 0x0bd8)
545#define AR9170_PHY_FORCE_XPA_CFG 0x000000001
546#define AR9170_PHY_FORCE_XPA_CFG_S 0
547
548#define AR9170_PHY_REG_CH1_CCA (AR9170_PHY_REG_BASE + 0x1064)
549#define AR9170_PHY_CH1_MINCCA_PWR 0x0ff80000
550#define AR9170_PHY_CH1_MINCCA_PWR_S 19
551
552#define AR9170_PHY_REG_CH2_CCA (AR9170_PHY_REG_BASE + 0x2064)
553#define AR9170_PHY_CH2_MINCCA_PWR 0x0ff80000
554#define AR9170_PHY_CH2_MINCCA_PWR_S 19
555
556#define AR9170_PHY_REG_CH1_EXT_CCA (AR9170_PHY_REG_BASE + 0x11bc)
557#define AR9170_PHY_CH1_EXT_MINCCA_PWR 0xff800000
558#define AR9170_PHY_CH1_EXT_MINCCA_PWR_S 23
559
560#define AR9170_PHY_REG_CH2_EXT_CCA (AR9170_PHY_REG_BASE + 0x21bc)
561#define AR9170_PHY_CH2_EXT_MINCCA_PWR 0xff800000
562#define AR9170_PHY_CH2_EXT_MINCCA_PWR_S 23
563
564#endif /* __CARL9170_SHARED_PHY_H */
diff --git a/drivers/net/wireless/ath/carl9170/rx.c b/drivers/net/wireless/ath/carl9170/rx.c
new file mode 100644
index 000000000000..939a0e96ed1f
--- /dev/null
+++ b/drivers/net/wireless/ath/carl9170/rx.c
@@ -0,0 +1,938 @@
1/*
2 * Atheros CARL9170 driver
3 *
4 * 802.11 & command trap routines
5 *
6 * Copyright 2008, Johannes Berg <johannes@sipsolutions.net>
7 * Copyright 2009, 2010, Christian Lamparter <chunkeey@googlemail.com>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; see the file COPYING. If not, see
21 * http://www.gnu.org/licenses/.
22 *
23 * This file incorporates work covered by the following copyright and
24 * permission notice:
25 * Copyright (c) 2007-2008 Atheros Communications, Inc.
26 *
27 * Permission to use, copy, modify, and/or distribute this software for any
28 * purpose with or without fee is hereby granted, provided that the above
29 * copyright notice and this permission notice appear in all copies.
30 *
31 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
32 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
33 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
34 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
35 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
36 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
37 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
38 */
39
40#include <linux/init.h>
41#include <linux/slab.h>
42#include <linux/module.h>
43#include <linux/etherdevice.h>
44#include <linux/crc32.h>
45#include <net/mac80211.h>
46#include "carl9170.h"
47#include "hw.h"
48#include "cmd.h"
49
50static void carl9170_dbg_message(struct ar9170 *ar, const char *buf, u32 len)
51{
52 bool restart = false;
53 enum carl9170_restart_reasons reason = CARL9170_RR_NO_REASON;
54
55 if (len > 3) {
56 if (memcmp(buf, CARL9170_ERR_MAGIC, 3) == 0) {
57 ar->fw.err_counter++;
58 if (ar->fw.err_counter > 3) {
59 restart = true;
60 reason = CARL9170_RR_TOO_MANY_FIRMWARE_ERRORS;
61 }
62 }
63
64 if (memcmp(buf, CARL9170_BUG_MAGIC, 3) == 0) {
65 ar->fw.bug_counter++;
66 restart = true;
67 reason = CARL9170_RR_FATAL_FIRMWARE_ERROR;
68 }
69 }
70
71 wiphy_info(ar->hw->wiphy, "FW: %.*s\n", len, buf);
72
73 if (restart)
74 carl9170_restart(ar, reason);
75}
76
77static void carl9170_handle_ps(struct ar9170 *ar, struct carl9170_rsp *rsp)
78{
79 u32 ps;
80 bool new_ps;
81
82 ps = le32_to_cpu(rsp->psm.state);
83
84 new_ps = (ps & CARL9170_PSM_COUNTER) != CARL9170_PSM_WAKE;
85 if (ar->ps.state != new_ps) {
86 if (!new_ps) {
87 ar->ps.sleep_ms = jiffies_to_msecs(jiffies -
88 ar->ps.last_action);
89 }
90
91 ar->ps.last_action = jiffies;
92
93 ar->ps.state = new_ps;
94 }
95}
96
97static int carl9170_check_sequence(struct ar9170 *ar, unsigned int seq)
98{
99 if (ar->cmd_seq < -1)
100 return 0;
101
102 /*
103 * Initialize Counter
104 */
105 if (ar->cmd_seq < 0)
106 ar->cmd_seq = seq;
107
108 /*
109 * The sequence is strictly monotonic increasing and it never skips!
110 *
111 * Therefore we can safely assume that whenever we received an
112 * unexpected sequence we have lost some valuable data.
113 */
114 if (seq != ar->cmd_seq) {
115 int count;
116
117 count = (seq - ar->cmd_seq) % ar->fw.cmd_bufs;
118
119 wiphy_err(ar->hw->wiphy, "lost %d command responses/traps! "
120 "w:%d g:%d\n", count, ar->cmd_seq, seq);
121
122 carl9170_restart(ar, CARL9170_RR_LOST_RSP);
123 return -EIO;
124 }
125
126 ar->cmd_seq = (ar->cmd_seq + 1) % ar->fw.cmd_bufs;
127 return 0;
128}
129
130static void carl9170_cmd_callback(struct ar9170 *ar, u32 len, void *buffer)
131{
132 /*
133 * Some commands may have a variable response length
134 * and we cannot predict the correct length in advance.
135 * So we only check if we provided enough space for the data.
136 */
137 if (unlikely(ar->readlen != (len - 4))) {
138 dev_warn(&ar->udev->dev, "received invalid command response:"
139 "got %d, instead of %d\n", len - 4, ar->readlen);
140 print_hex_dump_bytes("carl9170 cmd:", DUMP_PREFIX_OFFSET,
141 ar->cmd_buf, (ar->cmd.hdr.len + 4) & 0x3f);
142 print_hex_dump_bytes("carl9170 rsp:", DUMP_PREFIX_OFFSET,
143 buffer, len);
144 /*
145 * Do not complete. The command times out,
146 * and we get a stack trace from there.
147 */
148 carl9170_restart(ar, CARL9170_RR_INVALID_RSP);
149 }
150
151 spin_lock(&ar->cmd_lock);
152 if (ar->readbuf) {
153 if (len >= 4)
154 memcpy(ar->readbuf, buffer + 4, len - 4);
155
156 ar->readbuf = NULL;
157 }
158 complete(&ar->cmd_wait);
159 spin_unlock(&ar->cmd_lock);
160}
161
162void carl9170_handle_command_response(struct ar9170 *ar, void *buf, u32 len)
163{
164 struct carl9170_rsp *cmd = (void *) buf;
165 struct ieee80211_vif *vif;
166
167 if (carl9170_check_sequence(ar, cmd->hdr.seq))
168 return;
169
170 if ((cmd->hdr.cmd & CARL9170_RSP_FLAG) != CARL9170_RSP_FLAG) {
171 if (!(cmd->hdr.cmd & CARL9170_CMD_ASYNC_FLAG))
172 carl9170_cmd_callback(ar, len, buf);
173
174 return;
175 }
176
177 if (unlikely(cmd->hdr.len != (len - 4))) {
178 if (net_ratelimit()) {
179 wiphy_err(ar->hw->wiphy, "FW: received over-/under"
180 "sized event %x (%d, but should be %d).\n",
181 cmd->hdr.cmd, cmd->hdr.len, len - 4);
182
183 print_hex_dump_bytes("dump:", DUMP_PREFIX_NONE,
184 buf, len);
185 }
186
187 return;
188 }
189
190 /* hardware event handlers */
191 switch (cmd->hdr.cmd) {
192 case CARL9170_RSP_PRETBTT:
193 /* pre-TBTT event */
194 rcu_read_lock();
195 vif = carl9170_get_main_vif(ar);
196
197 if (!vif) {
198 rcu_read_unlock();
199 break;
200 }
201
202 switch (vif->type) {
203 case NL80211_IFTYPE_STATION:
204 carl9170_handle_ps(ar, cmd);
205 break;
206
207 case NL80211_IFTYPE_AP:
208 case NL80211_IFTYPE_ADHOC:
209 carl9170_update_beacon(ar, true);
210 break;
211
212 default:
213 break;
214 }
215 rcu_read_unlock();
216
217 break;
218
219
220 case CARL9170_RSP_TXCOMP:
221 /* TX status notification */
222 carl9170_tx_process_status(ar, cmd);
223 break;
224
225 case CARL9170_RSP_BEACON_CONFIG:
226 /*
227 * (IBSS) beacon send notification
228 * bytes: 04 c2 XX YY B4 B3 B2 B1
229 *
230 * XX always 80
231 * YY always 00
232 * B1-B4 "should" be the number of send out beacons.
233 */
234 break;
235
236 case CARL9170_RSP_ATIM:
237 /* End of Atim Window */
238 break;
239
240 case CARL9170_RSP_WATCHDOG:
241 /* Watchdog Interrupt */
242 carl9170_restart(ar, CARL9170_RR_WATCHDOG);
243 break;
244
245 case CARL9170_RSP_TEXT:
246 /* firmware debug */
247 carl9170_dbg_message(ar, (char *)buf + 4, len - 4);
248 break;
249
250 case CARL9170_RSP_HEXDUMP:
251 wiphy_dbg(ar->hw->wiphy, "FW: HD %d\n", len - 4);
252 print_hex_dump_bytes("FW:", DUMP_PREFIX_NONE,
253 (char *)buf + 4, len - 4);
254 break;
255
256 case CARL9170_RSP_RADAR:
257 if (!net_ratelimit())
258 break;
259
260 wiphy_info(ar->hw->wiphy, "FW: RADAR! Please report this "
261 "incident to linux-wireless@vger.kernel.org !\n");
262 break;
263
264 case CARL9170_RSP_GPIO:
265#ifdef CONFIG_CARL9170_WPC
266 if (ar->wps.pbc) {
267 bool state = !!(cmd->gpio.gpio & cpu_to_le32(
268 AR9170_GPIO_PORT_WPS_BUTTON_PRESSED));
269
270 if (state != ar->wps.pbc_state) {
271 ar->wps.pbc_state = state;
272 input_report_key(ar->wps.pbc, KEY_WPS_BUTTON,
273 state);
274 input_sync(ar->wps.pbc);
275 }
276 }
277#endif /* CONFIG_CARL9170_WPC */
278 break;
279
280 case CARL9170_RSP_BOOT:
281 complete(&ar->fw_boot_wait);
282 break;
283
284 default:
285 wiphy_err(ar->hw->wiphy, "FW: received unhandled event %x\n",
286 cmd->hdr.cmd);
287 print_hex_dump_bytes("dump:", DUMP_PREFIX_NONE, buf, len);
288 break;
289 }
290}
291
292static int carl9170_rx_mac_status(struct ar9170 *ar,
293 struct ar9170_rx_head *head, struct ar9170_rx_macstatus *mac,
294 struct ieee80211_rx_status *status)
295{
296 struct ieee80211_channel *chan;
297 u8 error, decrypt;
298
299 BUILD_BUG_ON(sizeof(struct ar9170_rx_head) != 12);
300 BUILD_BUG_ON(sizeof(struct ar9170_rx_macstatus) != 4);
301
302 error = mac->error;
303
304 if (error & AR9170_RX_ERROR_WRONG_RA) {
305 if (!ar->sniffer_enabled)
306 return -EINVAL;
307 }
308
309 if (error & AR9170_RX_ERROR_PLCP) {
310 if (!(ar->filter_state & FIF_PLCPFAIL))
311 return -EINVAL;
312
313 status->flag |= RX_FLAG_FAILED_PLCP_CRC;
314 }
315
316 if (error & AR9170_RX_ERROR_FCS) {
317 ar->tx_fcs_errors++;
318
319 if (!(ar->filter_state & FIF_FCSFAIL))
320 return -EINVAL;
321
322 status->flag |= RX_FLAG_FAILED_FCS_CRC;
323 }
324
325 decrypt = ar9170_get_decrypt_type(mac);
326 if (!(decrypt & AR9170_RX_ENC_SOFTWARE) &&
327 decrypt != AR9170_ENC_ALG_NONE) {
328 if ((decrypt == AR9170_ENC_ALG_TKIP) &&
329 (error & AR9170_RX_ERROR_MMIC))
330 status->flag |= RX_FLAG_MMIC_ERROR;
331
332 status->flag |= RX_FLAG_DECRYPTED;
333 }
334
335 if (error & AR9170_RX_ERROR_DECRYPT && !ar->sniffer_enabled)
336 return -ENODATA;
337
338 error &= ~(AR9170_RX_ERROR_MMIC |
339 AR9170_RX_ERROR_FCS |
340 AR9170_RX_ERROR_WRONG_RA |
341 AR9170_RX_ERROR_DECRYPT |
342 AR9170_RX_ERROR_PLCP);
343
344 /* drop any other error frames */
345 if (unlikely(error)) {
346 /* TODO: update netdevice's RX dropped/errors statistics */
347
348 if (net_ratelimit())
349 wiphy_dbg(ar->hw->wiphy, "received frame with "
350 "suspicious error code (%#x).\n", error);
351
352 return -EINVAL;
353 }
354
355 chan = ar->channel;
356 if (chan) {
357 status->band = chan->band;
358 status->freq = chan->center_freq;
359 }
360
361 switch (mac->status & AR9170_RX_STATUS_MODULATION) {
362 case AR9170_RX_STATUS_MODULATION_CCK:
363 if (mac->status & AR9170_RX_STATUS_SHORT_PREAMBLE)
364 status->flag |= RX_FLAG_SHORTPRE;
365 switch (head->plcp[0]) {
366 case AR9170_RX_PHY_RATE_CCK_1M:
367 status->rate_idx = 0;
368 break;
369 case AR9170_RX_PHY_RATE_CCK_2M:
370 status->rate_idx = 1;
371 break;
372 case AR9170_RX_PHY_RATE_CCK_5M:
373 status->rate_idx = 2;
374 break;
375 case AR9170_RX_PHY_RATE_CCK_11M:
376 status->rate_idx = 3;
377 break;
378 default:
379 if (net_ratelimit()) {
380 wiphy_err(ar->hw->wiphy, "invalid plcp cck "
381 "rate (%x).\n", head->plcp[0]);
382 }
383
384 return -EINVAL;
385 }
386 break;
387
388 case AR9170_RX_STATUS_MODULATION_DUPOFDM:
389 case AR9170_RX_STATUS_MODULATION_OFDM:
390 switch (head->plcp[0] & 0xf) {
391 case AR9170_TXRX_PHY_RATE_OFDM_6M:
392 status->rate_idx = 0;
393 break;
394 case AR9170_TXRX_PHY_RATE_OFDM_9M:
395 status->rate_idx = 1;
396 break;
397 case AR9170_TXRX_PHY_RATE_OFDM_12M:
398 status->rate_idx = 2;
399 break;
400 case AR9170_TXRX_PHY_RATE_OFDM_18M:
401 status->rate_idx = 3;
402 break;
403 case AR9170_TXRX_PHY_RATE_OFDM_24M:
404 status->rate_idx = 4;
405 break;
406 case AR9170_TXRX_PHY_RATE_OFDM_36M:
407 status->rate_idx = 5;
408 break;
409 case AR9170_TXRX_PHY_RATE_OFDM_48M:
410 status->rate_idx = 6;
411 break;
412 case AR9170_TXRX_PHY_RATE_OFDM_54M:
413 status->rate_idx = 7;
414 break;
415 default:
416 if (net_ratelimit()) {
417 wiphy_err(ar->hw->wiphy, "invalid plcp ofdm "
418 "rate (%x).\n", head->plcp[0]);
419 }
420
421 return -EINVAL;
422 }
423 if (status->band == IEEE80211_BAND_2GHZ)
424 status->rate_idx += 4;
425 break;
426
427 case AR9170_RX_STATUS_MODULATION_HT:
428 if (head->plcp[3] & 0x80)
429 status->flag |= RX_FLAG_40MHZ;
430 if (head->plcp[6] & 0x80)
431 status->flag |= RX_FLAG_SHORT_GI;
432
433 status->rate_idx = clamp(0, 75, head->plcp[3] & 0x7f);
434 status->flag |= RX_FLAG_HT;
435 break;
436
437 default:
438 BUG();
439 return -ENOSYS;
440 }
441
442 return 0;
443}
444
445static void carl9170_rx_phy_status(struct ar9170 *ar,
446 struct ar9170_rx_phystatus *phy, struct ieee80211_rx_status *status)
447{
448 int i;
449
450 BUILD_BUG_ON(sizeof(struct ar9170_rx_phystatus) != 20);
451
452 for (i = 0; i < 3; i++)
453 if (phy->rssi[i] != 0x80)
454 status->antenna |= BIT(i);
455
456 /* post-process RSSI */
457 for (i = 0; i < 7; i++)
458 if (phy->rssi[i] & 0x80)
459 phy->rssi[i] = ((phy->rssi[i] & 0x7f) + 1) & 0x7f;
460
461 /* TODO: we could do something with phy_errors */
462 status->signal = ar->noise[0] + phy->rssi_combined;
463}
464
465static struct sk_buff *carl9170_rx_copy_data(u8 *buf, int len)
466{
467 struct sk_buff *skb;
468 int reserved = 0;
469 struct ieee80211_hdr *hdr = (void *) buf;
470
471 if (ieee80211_is_data_qos(hdr->frame_control)) {
472 u8 *qc = ieee80211_get_qos_ctl(hdr);
473 reserved += NET_IP_ALIGN;
474
475 if (*qc & IEEE80211_QOS_CONTROL_A_MSDU_PRESENT)
476 reserved += NET_IP_ALIGN;
477 }
478
479 if (ieee80211_has_a4(hdr->frame_control))
480 reserved += NET_IP_ALIGN;
481
482 reserved = 32 + (reserved & NET_IP_ALIGN);
483
484 skb = dev_alloc_skb(len + reserved);
485 if (likely(skb)) {
486 skb_reserve(skb, reserved);
487 memcpy(skb_put(skb, len), buf, len);
488 }
489
490 return skb;
491}
492
493static u8 *carl9170_find_ie(u8 *data, unsigned int len, u8 ie)
494{
495 struct ieee80211_mgmt *mgmt = (void *)data;
496 u8 *pos, *end;
497
498 pos = (u8 *)mgmt->u.beacon.variable;
499 end = data + len;
500 while (pos < end) {
501 if (pos + 2 + pos[1] > end)
502 return NULL;
503
504 if (pos[0] == ie)
505 return pos;
506
507 pos += 2 + pos[1];
508 }
509 return NULL;
510}
511
512/*
513 * NOTE:
514 *
515 * The firmware is in charge of waking up the device just before
516 * the AP is expected to transmit the next beacon.
517 *
518 * This leaves the driver with the important task of deciding when
519 * to set the PHY back to bed again.
520 */
521static void carl9170_ps_beacon(struct ar9170 *ar, void *data, unsigned int len)
522{
523 struct ieee80211_hdr *hdr = (void *) data;
524 struct ieee80211_tim_ie *tim_ie;
525 u8 *tim;
526 u8 tim_len;
527 bool cam;
528
529 if (likely(!(ar->hw->conf.flags & IEEE80211_CONF_PS)))
530 return;
531
532 /* check if this really is a beacon */
533 if (!ieee80211_is_beacon(hdr->frame_control))
534 return;
535
536 /* min. beacon length + FCS_LEN */
537 if (len <= 40 + FCS_LEN)
538 return;
539
540 /* and only beacons from the associated BSSID, please */
541 if (compare_ether_addr(hdr->addr3, ar->common.curbssid) ||
542 !ar->common.curaid)
543 return;
544
545 ar->ps.last_beacon = jiffies;
546
547 tim = carl9170_find_ie(data, len - FCS_LEN, WLAN_EID_TIM);
548 if (!tim)
549 return;
550
551 if (tim[1] < sizeof(*tim_ie))
552 return;
553
554 tim_len = tim[1];
555 tim_ie = (struct ieee80211_tim_ie *) &tim[2];
556
557 if (!WARN_ON_ONCE(!ar->hw->conf.ps_dtim_period))
558 ar->ps.dtim_counter = (tim_ie->dtim_count - 1) %
559 ar->hw->conf.ps_dtim_period;
560
561 /* Check whenever the PHY can be turned off again. */
562
563 /* 1. What about buffered unicast traffic for our AID? */
564 cam = ieee80211_check_tim(tim_ie, tim_len, ar->common.curaid);
565
566 /* 2. Maybe the AP wants to send multicast/broadcast data? */
567 cam = !!(tim_ie->bitmap_ctrl & 0x01);
568
569 if (!cam) {
570 /* back to low-power land. */
571 ar->ps.off_override &= ~PS_OFF_BCN;
572 carl9170_ps_check(ar);
573 } else {
574 /* force CAM */
575 ar->ps.off_override |= PS_OFF_BCN;
576 }
577}
578
579static bool carl9170_ampdu_check(struct ar9170 *ar, u8 *buf, u8 ms)
580{
581 __le16 fc;
582
583 if ((ms & AR9170_RX_STATUS_MPDU) == AR9170_RX_STATUS_MPDU_SINGLE) {
584 /*
585 * This frame is not part of an aMPDU.
586 * Therefore it is not subjected to any
587 * of the following content restrictions.
588 */
589 return true;
590 }
591
592 /*
593 * "802.11n - 7.4a.3 A-MPDU contents" describes in which contexts
594 * certain frame types can be part of an aMPDU.
595 *
596 * In order to keep the processing cost down, I opted for a
597 * stateless filter solely based on the frame control field.
598 */
599
600 fc = ((struct ieee80211_hdr *)buf)->frame_control;
601 if (ieee80211_is_data_qos(fc) && ieee80211_is_data_present(fc))
602 return true;
603
604 if (ieee80211_is_ack(fc) || ieee80211_is_back(fc) ||
605 ieee80211_is_back_req(fc))
606 return true;
607
608 if (ieee80211_is_action(fc))
609 return true;
610
611 return false;
612}
613
614/*
615 * If the frame alignment is right (or the kernel has
616 * CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS), and there
617 * is only a single MPDU in the USB frame, then we could
618 * submit to mac80211 the SKB directly. However, since
619 * there may be multiple packets in one SKB in stream
620 * mode, and we need to observe the proper ordering,
621 * this is non-trivial.
622 */
623
624static void carl9170_handle_mpdu(struct ar9170 *ar, u8 *buf, int len)
625{
626 struct ar9170_rx_head *head;
627 struct ar9170_rx_macstatus *mac;
628 struct ar9170_rx_phystatus *phy = NULL;
629 struct ieee80211_rx_status status;
630 struct sk_buff *skb;
631 int mpdu_len;
632 u8 mac_status;
633
634 if (!IS_STARTED(ar))
635 return;
636
637 if (unlikely(len < sizeof(*mac)))
638 goto drop;
639
640 mpdu_len = len - sizeof(*mac);
641
642 mac = (void *)(buf + mpdu_len);
643 mac_status = mac->status;
644 switch (mac_status & AR9170_RX_STATUS_MPDU) {
645 case AR9170_RX_STATUS_MPDU_FIRST:
646 /* Aggregated MPDUs start with an PLCP header */
647 if (likely(mpdu_len >= sizeof(struct ar9170_rx_head))) {
648 head = (void *) buf;
649
650 /*
651 * The PLCP header needs to be cached for the
652 * following MIDDLE + LAST A-MPDU packets.
653 *
654 * So, if you are wondering why all frames seem
655 * to share a common RX status information,
656 * then you have the answer right here...
657 */
658 memcpy(&ar->rx_plcp, (void *) buf,
659 sizeof(struct ar9170_rx_head));
660
661 mpdu_len -= sizeof(struct ar9170_rx_head);
662 buf += sizeof(struct ar9170_rx_head);
663
664 ar->rx_has_plcp = true;
665 } else {
666 if (net_ratelimit()) {
667 wiphy_err(ar->hw->wiphy, "plcp info "
668 "is clipped.\n");
669 }
670
671 goto drop;
672 }
673 break;
674
675 case AR9170_RX_STATUS_MPDU_LAST:
676 /*
677 * The last frame of an A-MPDU has an extra tail
678 * which does contain the phy status of the whole
679 * aggregate.
680 */
681
682 if (likely(mpdu_len >= sizeof(struct ar9170_rx_phystatus))) {
683 mpdu_len -= sizeof(struct ar9170_rx_phystatus);
684 phy = (void *)(buf + mpdu_len);
685 } else {
686 if (net_ratelimit()) {
687 wiphy_err(ar->hw->wiphy, "frame tail "
688 "is clipped.\n");
689 }
690
691 goto drop;
692 }
693
694 case AR9170_RX_STATUS_MPDU_MIDDLE:
695 /* These are just data + mac status */
696 if (unlikely(!ar->rx_has_plcp)) {
697 if (!net_ratelimit())
698 return;
699
700 wiphy_err(ar->hw->wiphy, "rx stream does not start "
701 "with a first_mpdu frame tag.\n");
702
703 goto drop;
704 }
705
706 head = &ar->rx_plcp;
707 break;
708
709 case AR9170_RX_STATUS_MPDU_SINGLE:
710 /* single mpdu has both: plcp (head) and phy status (tail) */
711 head = (void *) buf;
712
713 mpdu_len -= sizeof(struct ar9170_rx_head);
714 mpdu_len -= sizeof(struct ar9170_rx_phystatus);
715
716 buf += sizeof(struct ar9170_rx_head);
717 phy = (void *)(buf + mpdu_len);
718 break;
719
720 default:
721 BUG_ON(1);
722 break;
723 }
724
725 /* FC + DU + RA + FCS */
726 if (unlikely(mpdu_len < (2 + 2 + ETH_ALEN + FCS_LEN)))
727 goto drop;
728
729 memset(&status, 0, sizeof(status));
730 if (unlikely(carl9170_rx_mac_status(ar, head, mac, &status)))
731 goto drop;
732
733 if (!carl9170_ampdu_check(ar, buf, mac_status))
734 goto drop;
735
736 if (phy)
737 carl9170_rx_phy_status(ar, phy, &status);
738
739 carl9170_ps_beacon(ar, buf, mpdu_len);
740
741 skb = carl9170_rx_copy_data(buf, mpdu_len);
742 if (!skb)
743 goto drop;
744
745 memcpy(IEEE80211_SKB_RXCB(skb), &status, sizeof(status));
746 ieee80211_rx(ar->hw, skb);
747 return;
748
749drop:
750 ar->rx_dropped++;
751}
752
753static void carl9170_rx_untie_cmds(struct ar9170 *ar, const u8 *respbuf,
754 const unsigned int resplen)
755{
756 struct carl9170_rsp *cmd;
757 int i = 0;
758
759 while (i < resplen) {
760 cmd = (void *) &respbuf[i];
761
762 i += cmd->hdr.len + 4;
763 if (unlikely(i > resplen))
764 break;
765
766 carl9170_handle_command_response(ar, cmd, cmd->hdr.len + 4);
767 }
768
769 if (unlikely(i != resplen)) {
770 if (!net_ratelimit())
771 return;
772
773 wiphy_err(ar->hw->wiphy, "malformed firmware trap:\n");
774 print_hex_dump_bytes("rxcmd:", DUMP_PREFIX_OFFSET,
775 respbuf, resplen);
776 }
777}
778
779static void __carl9170_rx(struct ar9170 *ar, u8 *buf, unsigned int len)
780{
781 unsigned int i = 0;
782
783 /* weird thing, but this is the same in the original driver */
784 while (len > 2 && i < 12 && buf[0] == 0xff && buf[1] == 0xff) {
785 i += 2;
786 len -= 2;
787 buf += 2;
788 }
789
790 if (unlikely(len < 4))
791 return;
792
793 /* found the 6 * 0xffff marker? */
794 if (i == 12)
795 carl9170_rx_untie_cmds(ar, buf, len);
796 else
797 carl9170_handle_mpdu(ar, buf, len);
798}
799
800static void carl9170_rx_stream(struct ar9170 *ar, void *buf, unsigned int len)
801{
802 unsigned int tlen, wlen = 0, clen = 0;
803 struct ar9170_stream *rx_stream;
804 u8 *tbuf;
805
806 tbuf = buf;
807 tlen = len;
808
809 while (tlen >= 4) {
810 rx_stream = (void *) tbuf;
811 clen = le16_to_cpu(rx_stream->length);
812 wlen = ALIGN(clen, 4);
813
814 /* check if this is stream has a valid tag.*/
815 if (rx_stream->tag != cpu_to_le16(AR9170_RX_STREAM_TAG)) {
816 /*
817 * TODO: handle the highly unlikely event that the
818 * corrupted stream has the TAG at the right position.
819 */
820
821 /* check if the frame can be repaired. */
822 if (!ar->rx_failover_missing) {
823
824 /* this is not "short read". */
825 if (net_ratelimit()) {
826 wiphy_err(ar->hw->wiphy,
827 "missing tag!\n");
828 }
829
830 __carl9170_rx(ar, tbuf, tlen);
831 return;
832 }
833
834 if (ar->rx_failover_missing > tlen) {
835 if (net_ratelimit()) {
836 wiphy_err(ar->hw->wiphy,
837 "possible multi "
838 "stream corruption!\n");
839 goto err_telluser;
840 } else {
841 goto err_silent;
842 }
843 }
844
845 memcpy(skb_put(ar->rx_failover, tlen), tbuf, tlen);
846 ar->rx_failover_missing -= tlen;
847
848 if (ar->rx_failover_missing <= 0) {
849 /*
850 * nested carl9170_rx_stream call!
851 *
852 * termination is guranteed, even when the
853 * combined frame also have an element with
854 * a bad tag.
855 */
856
857 ar->rx_failover_missing = 0;
858 carl9170_rx_stream(ar, ar->rx_failover->data,
859 ar->rx_failover->len);
860
861 skb_reset_tail_pointer(ar->rx_failover);
862 skb_trim(ar->rx_failover, 0);
863 }
864
865 return;
866 }
867
868 /* check if stream is clipped */
869 if (wlen > tlen - 4) {
870 if (ar->rx_failover_missing) {
871 /* TODO: handle double stream corruption. */
872 if (net_ratelimit()) {
873 wiphy_err(ar->hw->wiphy, "double rx "
874 "stream corruption!\n");
875 goto err_telluser;
876 } else {
877 goto err_silent;
878 }
879 }
880
881 /*
882 * save incomplete data set.
883 * the firmware will resend the missing bits when
884 * the rx - descriptor comes round again.
885 */
886
887 memcpy(skb_put(ar->rx_failover, tlen), tbuf, tlen);
888 ar->rx_failover_missing = clen - tlen;
889 return;
890 }
891 __carl9170_rx(ar, rx_stream->payload, clen);
892
893 tbuf += wlen + 4;
894 tlen -= wlen + 4;
895 }
896
897 if (tlen) {
898 if (net_ratelimit()) {
899 wiphy_err(ar->hw->wiphy, "%d bytes of unprocessed "
900 "data left in rx stream!\n", tlen);
901 }
902
903 goto err_telluser;
904 }
905
906 return;
907
908err_telluser:
909 wiphy_err(ar->hw->wiphy, "damaged RX stream data [want:%d, "
910 "data:%d, rx:%d, pending:%d ]\n", clen, wlen, tlen,
911 ar->rx_failover_missing);
912
913 if (ar->rx_failover_missing)
914 print_hex_dump_bytes("rxbuf:", DUMP_PREFIX_OFFSET,
915 ar->rx_failover->data,
916 ar->rx_failover->len);
917
918 print_hex_dump_bytes("stream:", DUMP_PREFIX_OFFSET,
919 buf, len);
920
921 wiphy_err(ar->hw->wiphy, "please check your hardware and cables, if "
922 "you see this message frequently.\n");
923
924err_silent:
925 if (ar->rx_failover_missing) {
926 skb_reset_tail_pointer(ar->rx_failover);
927 skb_trim(ar->rx_failover, 0);
928 ar->rx_failover_missing = 0;
929 }
930}
931
932void carl9170_rx(struct ar9170 *ar, void *buf, unsigned int len)
933{
934 if (ar->fw.rx_stream)
935 carl9170_rx_stream(ar, buf, len);
936 else
937 __carl9170_rx(ar, buf, len);
938}
diff --git a/drivers/net/wireless/ath/carl9170/tx.c b/drivers/net/wireless/ath/carl9170/tx.c
new file mode 100644
index 000000000000..b575c865142d
--- /dev/null
+++ b/drivers/net/wireless/ath/carl9170/tx.c
@@ -0,0 +1,1335 @@
1/*
2 * Atheros CARL9170 driver
3 *
4 * 802.11 xmit & status routines
5 *
6 * Copyright 2008, Johannes Berg <johannes@sipsolutions.net>
7 * Copyright 2009, 2010, Christian Lamparter <chunkeey@googlemail.com>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; see the file COPYING. If not, see
21 * http://www.gnu.org/licenses/.
22 *
23 * This file incorporates work covered by the following copyright and
24 * permission notice:
25 * Copyright (c) 2007-2008 Atheros Communications, Inc.
26 *
27 * Permission to use, copy, modify, and/or distribute this software for any
28 * purpose with or without fee is hereby granted, provided that the above
29 * copyright notice and this permission notice appear in all copies.
30 *
31 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
32 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
33 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
34 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
35 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
36 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
37 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
38 */
39
40#include <linux/init.h>
41#include <linux/slab.h>
42#include <linux/module.h>
43#include <linux/etherdevice.h>
44#include <net/mac80211.h>
45#include "carl9170.h"
46#include "hw.h"
47#include "cmd.h"
48
49static inline unsigned int __carl9170_get_queue(struct ar9170 *ar,
50 unsigned int queue)
51{
52 if (unlikely(modparam_noht)) {
53 return queue;
54 } else {
55 /*
56 * This is just another workaround, until
57 * someone figures out how to get QoS and
58 * AMPDU to play nicely together.
59 */
60
61 return 2; /* AC_BE */
62 }
63}
64
65static inline unsigned int carl9170_get_queue(struct ar9170 *ar,
66 struct sk_buff *skb)
67{
68 return __carl9170_get_queue(ar, skb_get_queue_mapping(skb));
69}
70
71static bool is_mem_full(struct ar9170 *ar)
72{
73 return (DIV_ROUND_UP(IEEE80211_MAX_FRAME_LEN, ar->fw.mem_block_size) >
74 atomic_read(&ar->mem_free_blocks));
75}
76
77static void carl9170_tx_accounting(struct ar9170 *ar, struct sk_buff *skb)
78{
79 int queue, i;
80 bool mem_full;
81
82 atomic_inc(&ar->tx_total_queued);
83
84 queue = skb_get_queue_mapping(skb);
85 spin_lock_bh(&ar->tx_stats_lock);
86
87 /*
88 * The driver has to accept the frame, regardless if the queue is
89 * full to the brim, or not. We have to do the queuing internally,
90 * since mac80211 assumes that a driver which can operate with
91 * aggregated frames does not reject frames for this reason.
92 */
93 ar->tx_stats[queue].len++;
94 ar->tx_stats[queue].count++;
95
96 mem_full = is_mem_full(ar);
97 for (i = 0; i < ar->hw->queues; i++) {
98 if (mem_full || ar->tx_stats[i].len >= ar->tx_stats[i].limit) {
99 ieee80211_stop_queue(ar->hw, i);
100 ar->queue_stop_timeout[i] = jiffies;
101 }
102 }
103
104 spin_unlock_bh(&ar->tx_stats_lock);
105}
106
107static void carl9170_tx_accounting_free(struct ar9170 *ar, struct sk_buff *skb)
108{
109 struct ieee80211_tx_info *txinfo;
110 int queue;
111
112 txinfo = IEEE80211_SKB_CB(skb);
113 queue = skb_get_queue_mapping(skb);
114
115 spin_lock_bh(&ar->tx_stats_lock);
116
117 ar->tx_stats[queue].len--;
118
119 if (!is_mem_full(ar)) {
120 unsigned int i;
121 for (i = 0; i < ar->hw->queues; i++) {
122 if (ar->tx_stats[i].len >= CARL9170_NUM_TX_LIMIT_SOFT)
123 continue;
124
125 if (ieee80211_queue_stopped(ar->hw, i)) {
126 unsigned long tmp;
127
128 tmp = jiffies - ar->queue_stop_timeout[i];
129 if (tmp > ar->max_queue_stop_timeout[i])
130 ar->max_queue_stop_timeout[i] = tmp;
131 }
132
133 ieee80211_wake_queue(ar->hw, i);
134 }
135 }
136
137 spin_unlock_bh(&ar->tx_stats_lock);
138 if (atomic_dec_and_test(&ar->tx_total_queued))
139 complete(&ar->tx_flush);
140}
141
142static int carl9170_alloc_dev_space(struct ar9170 *ar, struct sk_buff *skb)
143{
144 struct _carl9170_tx_superframe *super = (void *) skb->data;
145 unsigned int chunks;
146 int cookie = -1;
147
148 atomic_inc(&ar->mem_allocs);
149
150 chunks = DIV_ROUND_UP(skb->len, ar->fw.mem_block_size);
151 if (unlikely(atomic_sub_return(chunks, &ar->mem_free_blocks) < 0)) {
152 atomic_add(chunks, &ar->mem_free_blocks);
153 return -ENOSPC;
154 }
155
156 spin_lock_bh(&ar->mem_lock);
157 cookie = bitmap_find_free_region(ar->mem_bitmap, ar->fw.mem_blocks, 0);
158 spin_unlock_bh(&ar->mem_lock);
159
160 if (unlikely(cookie < 0)) {
161 atomic_add(chunks, &ar->mem_free_blocks);
162 return -ENOSPC;
163 }
164
165 super = (void *) skb->data;
166
167 /*
168 * Cookie #0 serves two special purposes:
169 * 1. The firmware might use it generate BlockACK frames
170 * in responds of an incoming BlockAckReqs.
171 *
172 * 2. Prevent double-free bugs.
173 */
174 super->s.cookie = (u8) cookie + 1;
175 return 0;
176}
177
178static void carl9170_release_dev_space(struct ar9170 *ar, struct sk_buff *skb)
179{
180 struct _carl9170_tx_superframe *super = (void *) skb->data;
181 int cookie;
182
183 /* make a local copy of the cookie */
184 cookie = super->s.cookie;
185 /* invalidate cookie */
186 super->s.cookie = 0;
187
188 /*
189 * Do a out-of-bounds check on the cookie:
190 *
191 * * cookie "0" is reserved and won't be assigned to any
192 * out-going frame. Internally however, it is used to
193 * mark no longer/un-accounted frames and serves as a
194 * cheap way of preventing frames from being freed
195 * twice by _accident_. NB: There is a tiny race...
196 *
197 * * obviously, cookie number is limited by the amount
198 * of available memory blocks, so the number can
199 * never execeed the mem_blocks count.
200 */
201 if (unlikely(WARN_ON_ONCE(cookie == 0) ||
202 WARN_ON_ONCE(cookie > ar->fw.mem_blocks)))
203 return;
204
205 atomic_add(DIV_ROUND_UP(skb->len, ar->fw.mem_block_size),
206 &ar->mem_free_blocks);
207
208 spin_lock_bh(&ar->mem_lock);
209 bitmap_release_region(ar->mem_bitmap, cookie - 1, 0);
210 spin_unlock_bh(&ar->mem_lock);
211}
212
213/* Called from any context */
214static void carl9170_tx_release(struct kref *ref)
215{
216 struct ar9170 *ar;
217 struct carl9170_tx_info *arinfo;
218 struct ieee80211_tx_info *txinfo;
219 struct sk_buff *skb;
220
221 arinfo = container_of(ref, struct carl9170_tx_info, ref);
222 txinfo = container_of((void *) arinfo, struct ieee80211_tx_info,
223 rate_driver_data);
224 skb = container_of((void *) txinfo, struct sk_buff, cb);
225
226 ar = arinfo->ar;
227 if (WARN_ON_ONCE(!ar))
228 return;
229
230 BUILD_BUG_ON(
231 offsetof(struct ieee80211_tx_info, status.ampdu_ack_len) != 23);
232
233 memset(&txinfo->status.ampdu_ack_len, 0,
234 sizeof(struct ieee80211_tx_info) -
235 offsetof(struct ieee80211_tx_info, status.ampdu_ack_len));
236
237 if (atomic_read(&ar->tx_total_queued))
238 ar->tx_schedule = true;
239
240 if (txinfo->flags & IEEE80211_TX_CTL_AMPDU) {
241 if (!atomic_read(&ar->tx_ampdu_upload))
242 ar->tx_ampdu_schedule = true;
243
244 if (txinfo->flags & IEEE80211_TX_STAT_AMPDU) {
245 txinfo->status.ampdu_len = txinfo->pad[0];
246 txinfo->status.ampdu_ack_len = txinfo->pad[1];
247 txinfo->pad[0] = txinfo->pad[1] = 0;
248 } else if (txinfo->flags & IEEE80211_TX_STAT_ACK) {
249 /*
250 * drop redundant tx_status reports:
251 *
252 * 1. ampdu_ack_len of the final tx_status does
253 * include the feedback of this particular frame.
254 *
255 * 2. tx_status_irqsafe only queues up to 128
256 * tx feedback reports and discards the rest.
257 *
258 * 3. minstrel_ht is picky, it only accepts
259 * reports of frames with the TX_STATUS_AMPDU flag.
260 */
261
262 dev_kfree_skb_any(skb);
263 return;
264 } else {
265 /*
266 * Frame has failed, but we want to keep it in
267 * case it was lost due to a power-state
268 * transition.
269 */
270 }
271 }
272
273 skb_pull(skb, sizeof(struct _carl9170_tx_superframe));
274 ieee80211_tx_status_irqsafe(ar->hw, skb);
275}
276
277void carl9170_tx_get_skb(struct sk_buff *skb)
278{
279 struct carl9170_tx_info *arinfo = (void *)
280 (IEEE80211_SKB_CB(skb))->rate_driver_data;
281 kref_get(&arinfo->ref);
282}
283
284int carl9170_tx_put_skb(struct sk_buff *skb)
285{
286 struct carl9170_tx_info *arinfo = (void *)
287 (IEEE80211_SKB_CB(skb))->rate_driver_data;
288
289 return kref_put(&arinfo->ref, carl9170_tx_release);
290}
291
292/* Caller must hold the tid_info->lock & rcu_read_lock */
293static void carl9170_tx_shift_bm(struct ar9170 *ar,
294 struct carl9170_sta_tid *tid_info, u16 seq)
295{
296 u16 off;
297
298 off = SEQ_DIFF(seq, tid_info->bsn);
299
300 if (WARN_ON_ONCE(off >= CARL9170_BAW_BITS))
301 return;
302
303 /*
304 * Sanity check. For each MPDU we set the bit in bitmap and
305 * clear it once we received the tx_status.
306 * But if the bit is already cleared then we've been bitten
307 * by a bug.
308 */
309 WARN_ON_ONCE(!test_and_clear_bit(off, tid_info->bitmap));
310
311 off = SEQ_DIFF(tid_info->snx, tid_info->bsn);
312 if (WARN_ON_ONCE(off >= CARL9170_BAW_BITS))
313 return;
314
315 if (!bitmap_empty(tid_info->bitmap, off))
316 off = find_first_bit(tid_info->bitmap, off);
317
318 tid_info->bsn += off;
319 tid_info->bsn &= 0x0fff;
320
321 bitmap_shift_right(tid_info->bitmap, tid_info->bitmap,
322 off, CARL9170_BAW_BITS);
323}
324
325static void carl9170_tx_status_process_ampdu(struct ar9170 *ar,
326 struct sk_buff *skb, struct ieee80211_tx_info *txinfo)
327{
328 struct _carl9170_tx_superframe *super = (void *) skb->data;
329 struct ieee80211_hdr *hdr = (void *) super->frame_data;
330 struct ieee80211_tx_info *tx_info;
331 struct carl9170_tx_info *ar_info;
332 struct carl9170_sta_info *sta_info;
333 struct ieee80211_sta *sta;
334 struct carl9170_sta_tid *tid_info;
335 struct ieee80211_vif *vif;
336 unsigned int vif_id;
337 u8 tid;
338
339 if (!(txinfo->flags & IEEE80211_TX_CTL_AMPDU) ||
340 txinfo->flags & IEEE80211_TX_CTL_INJECTED)
341 return;
342
343 tx_info = IEEE80211_SKB_CB(skb);
344 ar_info = (void *) tx_info->rate_driver_data;
345
346 vif_id = (super->s.misc & CARL9170_TX_SUPER_MISC_VIF_ID) >>
347 CARL9170_TX_SUPER_MISC_VIF_ID_S;
348
349 if (WARN_ON_ONCE(vif_id >= AR9170_MAX_VIRTUAL_MAC))
350 return;
351
352 rcu_read_lock();
353 vif = rcu_dereference(ar->vif_priv[vif_id].vif);
354 if (unlikely(!vif))
355 goto out_rcu;
356
357 /*
358 * Normally we should use wrappers like ieee80211_get_DA to get
359 * the correct peer ieee80211_sta.
360 *
361 * But there is a problem with indirect traffic (broadcasts, or
362 * data which is designated for other stations) in station mode.
363 * The frame will be directed to the AP for distribution and not
364 * to the actual destination.
365 */
366 sta = ieee80211_find_sta(vif, hdr->addr1);
367 if (unlikely(!sta))
368 goto out_rcu;
369
370 tid = get_tid_h(hdr);
371
372 sta_info = (void *) sta->drv_priv;
373 tid_info = rcu_dereference(sta_info->agg[tid]);
374 if (!tid_info)
375 goto out_rcu;
376
377 spin_lock_bh(&tid_info->lock);
378 if (likely(tid_info->state >= CARL9170_TID_STATE_IDLE))
379 carl9170_tx_shift_bm(ar, tid_info, get_seq_h(hdr));
380
381 if (sta_info->stats[tid].clear) {
382 sta_info->stats[tid].clear = false;
383 sta_info->stats[tid].ampdu_len = 0;
384 sta_info->stats[tid].ampdu_ack_len = 0;
385 }
386
387 sta_info->stats[tid].ampdu_len++;
388 if (txinfo->status.rates[0].count == 1)
389 sta_info->stats[tid].ampdu_ack_len++;
390
391 if (super->f.mac_control & cpu_to_le16(AR9170_TX_MAC_IMM_BA)) {
392 txinfo->pad[0] = sta_info->stats[tid].ampdu_len;
393 txinfo->pad[1] = sta_info->stats[tid].ampdu_ack_len;
394 txinfo->flags |= IEEE80211_TX_STAT_AMPDU;
395 sta_info->stats[tid].clear = true;
396 }
397 spin_unlock_bh(&tid_info->lock);
398
399out_rcu:
400 rcu_read_unlock();
401}
402
403void carl9170_tx_status(struct ar9170 *ar, struct sk_buff *skb,
404 const bool success)
405{
406 struct ieee80211_tx_info *txinfo;
407
408 carl9170_tx_accounting_free(ar, skb);
409
410 txinfo = IEEE80211_SKB_CB(skb);
411
412 if (success)
413 txinfo->flags |= IEEE80211_TX_STAT_ACK;
414 else
415 ar->tx_ack_failures++;
416
417 if (txinfo->flags & IEEE80211_TX_CTL_AMPDU)
418 carl9170_tx_status_process_ampdu(ar, skb, txinfo);
419
420 carl9170_tx_put_skb(skb);
421}
422
423/* This function may be called form any context */
424void carl9170_tx_callback(struct ar9170 *ar, struct sk_buff *skb)
425{
426 struct ieee80211_tx_info *txinfo = IEEE80211_SKB_CB(skb);
427
428 atomic_dec(&ar->tx_total_pending);
429
430 if (txinfo->flags & IEEE80211_TX_CTL_AMPDU)
431 atomic_dec(&ar->tx_ampdu_upload);
432
433 if (carl9170_tx_put_skb(skb))
434 tasklet_hi_schedule(&ar->usb_tasklet);
435}
436
437static struct sk_buff *carl9170_get_queued_skb(struct ar9170 *ar, u8 cookie,
438 struct sk_buff_head *queue)
439{
440 struct sk_buff *skb;
441
442 spin_lock_bh(&queue->lock);
443 skb_queue_walk(queue, skb) {
444 struct _carl9170_tx_superframe *txc = (void *) skb->data;
445
446 if (txc->s.cookie != cookie)
447 continue;
448
449 __skb_unlink(skb, queue);
450 spin_unlock_bh(&queue->lock);
451
452 carl9170_release_dev_space(ar, skb);
453 return skb;
454 }
455 spin_unlock_bh(&queue->lock);
456
457 return NULL;
458}
459
460static void carl9170_tx_fill_rateinfo(struct ar9170 *ar, unsigned int rix,
461 unsigned int tries, struct ieee80211_tx_info *txinfo)
462{
463 unsigned int i;
464
465 for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) {
466 if (txinfo->status.rates[i].idx < 0)
467 break;
468
469 if (i == rix) {
470 txinfo->status.rates[i].count = tries;
471 i++;
472 break;
473 }
474 }
475
476 for (; i < IEEE80211_TX_MAX_RATES; i++) {
477 txinfo->status.rates[i].idx = -1;
478 txinfo->status.rates[i].count = 0;
479 }
480}
481
482static void carl9170_check_queue_stop_timeout(struct ar9170 *ar)
483{
484 int i;
485 struct sk_buff *skb;
486 struct ieee80211_tx_info *txinfo;
487 struct carl9170_tx_info *arinfo;
488 bool restart = false;
489
490 for (i = 0; i < ar->hw->queues; i++) {
491 spin_lock_bh(&ar->tx_status[i].lock);
492
493 skb = skb_peek(&ar->tx_status[i]);
494
495 if (!skb)
496 goto next;
497
498 txinfo = IEEE80211_SKB_CB(skb);
499 arinfo = (void *) txinfo->rate_driver_data;
500
501 if (time_is_before_jiffies(arinfo->timeout +
502 msecs_to_jiffies(CARL9170_QUEUE_STUCK_TIMEOUT)) == true)
503 restart = true;
504
505next:
506 spin_unlock_bh(&ar->tx_status[i].lock);
507 }
508
509 if (restart) {
510 /*
511 * At least one queue has been stuck for long enough.
512 * Give the device a kick and hope it gets back to
513 * work.
514 *
515 * possible reasons may include:
516 * - frames got lost/corrupted (bad connection to the device)
517 * - stalled rx processing/usb controller hiccups
518 * - firmware errors/bugs
519 * - every bug you can think of.
520 * - all bugs you can't...
521 * - ...
522 */
523 carl9170_restart(ar, CARL9170_RR_STUCK_TX);
524 }
525}
526
527void carl9170_tx_janitor(struct work_struct *work)
528{
529 struct ar9170 *ar = container_of(work, struct ar9170,
530 tx_janitor.work);
531 if (!IS_STARTED(ar))
532 return;
533
534 ar->tx_janitor_last_run = jiffies;
535
536 carl9170_check_queue_stop_timeout(ar);
537
538 if (!atomic_read(&ar->tx_total_queued))
539 return;
540
541 ieee80211_queue_delayed_work(ar->hw, &ar->tx_janitor,
542 msecs_to_jiffies(CARL9170_TX_TIMEOUT));
543}
544
545static void __carl9170_tx_process_status(struct ar9170 *ar,
546 const uint8_t cookie, const uint8_t info)
547{
548 struct sk_buff *skb;
549 struct ieee80211_tx_info *txinfo;
550 struct carl9170_tx_info *arinfo;
551 unsigned int r, t, q;
552 bool success = true;
553
554 q = ar9170_qmap[info & CARL9170_TX_STATUS_QUEUE];
555
556 skb = carl9170_get_queued_skb(ar, cookie, &ar->tx_status[q]);
557 if (!skb) {
558 /*
559 * We have lost the race to another thread.
560 */
561
562 return ;
563 }
564
565 txinfo = IEEE80211_SKB_CB(skb);
566 arinfo = (void *) txinfo->rate_driver_data;
567
568 if (!(info & CARL9170_TX_STATUS_SUCCESS))
569 success = false;
570
571 r = (info & CARL9170_TX_STATUS_RIX) >> CARL9170_TX_STATUS_RIX_S;
572 t = (info & CARL9170_TX_STATUS_TRIES) >> CARL9170_TX_STATUS_TRIES_S;
573
574 carl9170_tx_fill_rateinfo(ar, r, t, txinfo);
575 carl9170_tx_status(ar, skb, success);
576}
577
578void carl9170_tx_process_status(struct ar9170 *ar,
579 const struct carl9170_rsp *cmd)
580{
581 unsigned int i;
582
583 for (i = 0; i < cmd->hdr.ext; i++) {
584 if (WARN_ON(i > ((cmd->hdr.len / 2) + 1))) {
585 print_hex_dump_bytes("UU:", DUMP_PREFIX_NONE,
586 (void *) cmd, cmd->hdr.len + 4);
587 break;
588 }
589
590 __carl9170_tx_process_status(ar, cmd->_tx_status[i].cookie,
591 cmd->_tx_status[i].info);
592 }
593}
594
595static __le32 carl9170_tx_physet(struct ar9170 *ar,
596 struct ieee80211_tx_info *info, struct ieee80211_tx_rate *txrate)
597{
598 struct ieee80211_rate *rate = NULL;
599 u32 power, chains;
600 __le32 tmp;
601
602 tmp = cpu_to_le32(0);
603
604 if (txrate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
605 tmp |= cpu_to_le32(AR9170_TX_PHY_BW_40MHZ <<
606 AR9170_TX_PHY_BW_S);
607 /* this works because 40 MHz is 2 and dup is 3 */
608 if (txrate->flags & IEEE80211_TX_RC_DUP_DATA)
609 tmp |= cpu_to_le32(AR9170_TX_PHY_BW_40MHZ_DUP <<
610 AR9170_TX_PHY_BW_S);
611
612 if (txrate->flags & IEEE80211_TX_RC_SHORT_GI)
613 tmp |= cpu_to_le32(AR9170_TX_PHY_SHORT_GI);
614
615 if (txrate->flags & IEEE80211_TX_RC_MCS) {
616 u32 r = txrate->idx;
617 u8 *txpower;
618
619 /* heavy clip control */
620 tmp |= cpu_to_le32((r & 0x7) <<
621 AR9170_TX_PHY_TX_HEAVY_CLIP_S);
622
623 if (txrate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH) {
624 if (info->band == IEEE80211_BAND_5GHZ)
625 txpower = ar->power_5G_ht40;
626 else
627 txpower = ar->power_2G_ht40;
628 } else {
629 if (info->band == IEEE80211_BAND_5GHZ)
630 txpower = ar->power_5G_ht20;
631 else
632 txpower = ar->power_2G_ht20;
633 }
634
635 power = txpower[r & 7];
636
637 /* +1 dBm for HT40 */
638 if (txrate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
639 power += 2;
640
641 r <<= AR9170_TX_PHY_MCS_S;
642 BUG_ON(r & ~AR9170_TX_PHY_MCS);
643
644 tmp |= cpu_to_le32(r & AR9170_TX_PHY_MCS);
645 tmp |= cpu_to_le32(AR9170_TX_PHY_MOD_HT);
646
647 /*
648 * green field preamble does not work.
649 *
650 * if (txrate->flags & IEEE80211_TX_RC_GREEN_FIELD)
651 * tmp |= cpu_to_le32(AR9170_TX_PHY_GREENFIELD);
652 */
653 } else {
654 u8 *txpower;
655 u32 mod;
656 u32 phyrate;
657 u8 idx = txrate->idx;
658
659 if (info->band != IEEE80211_BAND_2GHZ) {
660 idx += 4;
661 txpower = ar->power_5G_leg;
662 mod = AR9170_TX_PHY_MOD_OFDM;
663 } else {
664 if (idx < 4) {
665 txpower = ar->power_2G_cck;
666 mod = AR9170_TX_PHY_MOD_CCK;
667 } else {
668 mod = AR9170_TX_PHY_MOD_OFDM;
669 txpower = ar->power_2G_ofdm;
670 }
671 }
672
673 rate = &__carl9170_ratetable[idx];
674
675 phyrate = rate->hw_value & 0xF;
676 power = txpower[(rate->hw_value & 0x30) >> 4];
677 phyrate <<= AR9170_TX_PHY_MCS_S;
678
679 tmp |= cpu_to_le32(mod);
680 tmp |= cpu_to_le32(phyrate);
681
682 /*
683 * short preamble seems to be broken too.
684 *
685 * if (txrate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
686 * tmp |= cpu_to_le32(AR9170_TX_PHY_SHORT_PREAMBLE);
687 */
688 }
689 power <<= AR9170_TX_PHY_TX_PWR_S;
690 power &= AR9170_TX_PHY_TX_PWR;
691 tmp |= cpu_to_le32(power);
692
693 /* set TX chains */
694 if (ar->eeprom.tx_mask == 1) {
695 chains = AR9170_TX_PHY_TXCHAIN_1;
696 } else {
697 chains = AR9170_TX_PHY_TXCHAIN_2;
698
699 /* >= 36M legacy OFDM - use only one chain */
700 if (rate && rate->bitrate >= 360 &&
701 !(txrate->flags & IEEE80211_TX_RC_MCS))
702 chains = AR9170_TX_PHY_TXCHAIN_1;
703 }
704 tmp |= cpu_to_le32(chains << AR9170_TX_PHY_TXCHAIN_S);
705
706 return tmp;
707}
708
709static bool carl9170_tx_rts_check(struct ar9170 *ar,
710 struct ieee80211_tx_rate *rate,
711 bool ampdu, bool multi)
712{
713 switch (ar->erp_mode) {
714 case CARL9170_ERP_AUTO:
715 if (ampdu)
716 break;
717
718 case CARL9170_ERP_MAC80211:
719 if (!(rate->flags & IEEE80211_TX_RC_USE_RTS_CTS))
720 break;
721
722 case CARL9170_ERP_RTS:
723 if (likely(!multi))
724 return true;
725
726 default:
727 break;
728 }
729
730 return false;
731}
732
733static bool carl9170_tx_cts_check(struct ar9170 *ar,
734 struct ieee80211_tx_rate *rate)
735{
736 switch (ar->erp_mode) {
737 case CARL9170_ERP_AUTO:
738 case CARL9170_ERP_MAC80211:
739 if (!(rate->flags & IEEE80211_TX_RC_USE_CTS_PROTECT))
740 break;
741
742 case CARL9170_ERP_CTS:
743 return true;
744
745 default:
746 break;
747 }
748
749 return false;
750}
751
752static int carl9170_tx_prepare(struct ar9170 *ar, struct sk_buff *skb)
753{
754 struct ieee80211_hdr *hdr;
755 struct _carl9170_tx_superframe *txc;
756 struct carl9170_vif_info *cvif;
757 struct ieee80211_tx_info *info;
758 struct ieee80211_tx_rate *txrate;
759 struct ieee80211_sta *sta;
760 struct carl9170_tx_info *arinfo;
761 unsigned int hw_queue;
762 int i;
763 __le16 mac_tmp;
764 u16 len;
765 bool ampdu, no_ack;
766
767 BUILD_BUG_ON(sizeof(*arinfo) > sizeof(info->rate_driver_data));
768 BUILD_BUG_ON(sizeof(struct _carl9170_tx_superdesc) !=
769 CARL9170_TX_SUPERDESC_LEN);
770
771 BUILD_BUG_ON(sizeof(struct _ar9170_tx_hwdesc) !=
772 AR9170_TX_HWDESC_LEN);
773
774 BUILD_BUG_ON(IEEE80211_TX_MAX_RATES < CARL9170_TX_MAX_RATES);
775
776 BUILD_BUG_ON(AR9170_MAX_VIRTUAL_MAC >
777 ((CARL9170_TX_SUPER_MISC_VIF_ID >>
778 CARL9170_TX_SUPER_MISC_VIF_ID_S) + 1));
779
780 hw_queue = ar9170_qmap[carl9170_get_queue(ar, skb)];
781
782 hdr = (void *)skb->data;
783 info = IEEE80211_SKB_CB(skb);
784 len = skb->len;
785
786 /*
787 * Note: If the frame was sent through a monitor interface,
788 * the ieee80211_vif pointer can be NULL.
789 */
790 if (likely(info->control.vif))
791 cvif = (void *) info->control.vif->drv_priv;
792 else
793 cvif = NULL;
794
795 sta = info->control.sta;
796
797 txc = (void *)skb_push(skb, sizeof(*txc));
798 memset(txc, 0, sizeof(*txc));
799
800 SET_VAL(CARL9170_TX_SUPER_MISC_QUEUE, txc->s.misc, hw_queue);
801
802 if (likely(cvif))
803 SET_VAL(CARL9170_TX_SUPER_MISC_VIF_ID, txc->s.misc, cvif->id);
804
805 if (unlikely(info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM))
806 txc->s.misc |= CARL9170_TX_SUPER_MISC_CAB;
807
808 if (unlikely(ieee80211_is_probe_resp(hdr->frame_control)))
809 txc->s.misc |= CARL9170_TX_SUPER_MISC_FILL_IN_TSF;
810
811 mac_tmp = cpu_to_le16(AR9170_TX_MAC_HW_DURATION |
812 AR9170_TX_MAC_BACKOFF);
813 mac_tmp |= cpu_to_le16((hw_queue << AR9170_TX_MAC_QOS_S) &&
814 AR9170_TX_MAC_QOS);
815
816 no_ack = !!(info->flags & IEEE80211_TX_CTL_NO_ACK);
817 if (unlikely(no_ack))
818 mac_tmp |= cpu_to_le16(AR9170_TX_MAC_NO_ACK);
819
820 if (info->control.hw_key) {
821 len += info->control.hw_key->icv_len;
822
823 switch (info->control.hw_key->cipher) {
824 case WLAN_CIPHER_SUITE_WEP40:
825 case WLAN_CIPHER_SUITE_WEP104:
826 case WLAN_CIPHER_SUITE_TKIP:
827 mac_tmp |= cpu_to_le16(AR9170_TX_MAC_ENCR_RC4);
828 break;
829 case WLAN_CIPHER_SUITE_CCMP:
830 mac_tmp |= cpu_to_le16(AR9170_TX_MAC_ENCR_AES);
831 break;
832 default:
833 WARN_ON(1);
834 goto err_out;
835 }
836 }
837
838 ampdu = !!(info->flags & IEEE80211_TX_CTL_AMPDU);
839 if (ampdu) {
840 unsigned int density, factor;
841
842 if (unlikely(!sta || !cvif))
843 goto err_out;
844
845 factor = min_t(unsigned int, 1u,
846 info->control.sta->ht_cap.ampdu_factor);
847
848 density = info->control.sta->ht_cap.ampdu_density;
849
850 if (density) {
851 /*
852 * Watch out!
853 *
854 * Otus uses slightly different density values than
855 * those from the 802.11n spec.
856 */
857
858 density = max_t(unsigned int, density + 1, 7u);
859 }
860
861 SET_VAL(CARL9170_TX_SUPER_AMPDU_DENSITY,
862 txc->s.ampdu_settings, density);
863
864 SET_VAL(CARL9170_TX_SUPER_AMPDU_FACTOR,
865 txc->s.ampdu_settings, factor);
866
867 for (i = 0; i < CARL9170_TX_MAX_RATES; i++) {
868 txrate = &info->control.rates[i];
869 if (txrate->idx >= 0) {
870 txc->s.ri[i] =
871 CARL9170_TX_SUPER_RI_AMPDU;
872
873 if (WARN_ON(!(txrate->flags &
874 IEEE80211_TX_RC_MCS))) {
875 /*
876 * Not sure if it's even possible
877 * to aggregate non-ht rates with
878 * this HW.
879 */
880 goto err_out;
881 }
882 continue;
883 }
884
885 txrate->idx = 0;
886 txrate->count = ar->hw->max_rate_tries;
887 }
888
889 mac_tmp |= cpu_to_le16(AR9170_TX_MAC_AGGR);
890 }
891
892 /*
893 * NOTE: For the first rate, the ERP & AMPDU flags are directly
894 * taken from mac_control. For all fallback rate, the firmware
895 * updates the mac_control flags from the rate info field.
896 */
897 for (i = 1; i < CARL9170_TX_MAX_RATES; i++) {
898 txrate = &info->control.rates[i];
899 if (txrate->idx < 0)
900 break;
901
902 SET_VAL(CARL9170_TX_SUPER_RI_TRIES, txc->s.ri[i],
903 txrate->count);
904
905 if (carl9170_tx_rts_check(ar, txrate, ampdu, no_ack))
906 txc->s.ri[i] |= (AR9170_TX_MAC_PROT_RTS <<
907 CARL9170_TX_SUPER_RI_ERP_PROT_S);
908 else if (carl9170_tx_cts_check(ar, txrate))
909 txc->s.ri[i] |= (AR9170_TX_MAC_PROT_CTS <<
910 CARL9170_TX_SUPER_RI_ERP_PROT_S);
911
912 txc->s.rr[i - 1] = carl9170_tx_physet(ar, info, txrate);
913 }
914
915 txrate = &info->control.rates[0];
916 SET_VAL(CARL9170_TX_SUPER_RI_TRIES, txc->s.ri[0], txrate->count);
917
918 if (carl9170_tx_rts_check(ar, txrate, ampdu, no_ack))
919 mac_tmp |= cpu_to_le16(AR9170_TX_MAC_PROT_RTS);
920 else if (carl9170_tx_cts_check(ar, txrate))
921 mac_tmp |= cpu_to_le16(AR9170_TX_MAC_PROT_CTS);
922
923 txc->s.len = cpu_to_le16(skb->len);
924 txc->f.length = cpu_to_le16(len + FCS_LEN);
925 txc->f.mac_control = mac_tmp;
926 txc->f.phy_control = carl9170_tx_physet(ar, info, txrate);
927
928 arinfo = (void *)info->rate_driver_data;
929 arinfo->timeout = jiffies;
930 arinfo->ar = ar;
931 kref_init(&arinfo->ref);
932 return 0;
933
934err_out:
935 skb_pull(skb, sizeof(*txc));
936 return -EINVAL;
937}
938
939static void carl9170_set_immba(struct ar9170 *ar, struct sk_buff *skb)
940{
941 struct _carl9170_tx_superframe *super;
942
943 super = (void *) skb->data;
944 super->f.mac_control |= cpu_to_le16(AR9170_TX_MAC_IMM_BA);
945}
946
947static void carl9170_set_ampdu_params(struct ar9170 *ar, struct sk_buff *skb)
948{
949 struct _carl9170_tx_superframe *super;
950 int tmp;
951
952 super = (void *) skb->data;
953
954 tmp = (super->s.ampdu_settings & CARL9170_TX_SUPER_AMPDU_DENSITY) <<
955 CARL9170_TX_SUPER_AMPDU_DENSITY_S;
956
957 /*
958 * If you haven't noticed carl9170_tx_prepare has already filled
959 * in all ampdu spacing & factor parameters.
960 * Now it's the time to check whenever the settings have to be
961 * updated by the firmware, or if everything is still the same.
962 *
963 * There's no sane way to handle different density values with
964 * this hardware, so we may as well just do the compare in the
965 * driver.
966 */
967
968 if (tmp != ar->current_density) {
969 ar->current_density = tmp;
970 super->s.ampdu_settings |=
971 CARL9170_TX_SUPER_AMPDU_COMMIT_DENSITY;
972 }
973
974 tmp = (super->s.ampdu_settings & CARL9170_TX_SUPER_AMPDU_FACTOR) <<
975 CARL9170_TX_SUPER_AMPDU_FACTOR_S;
976
977 if (tmp != ar->current_factor) {
978 ar->current_factor = tmp;
979 super->s.ampdu_settings |=
980 CARL9170_TX_SUPER_AMPDU_COMMIT_FACTOR;
981 }
982}
983
984static bool carl9170_tx_rate_check(struct ar9170 *ar, struct sk_buff *_dest,
985 struct sk_buff *_src)
986{
987 struct _carl9170_tx_superframe *dest, *src;
988
989 dest = (void *) _dest->data;
990 src = (void *) _src->data;
991
992 /*
993 * The mac80211 rate control algorithm expects that all MPDUs in
994 * an AMPDU share the same tx vectors.
995 * This is not really obvious right now, because the hardware
996 * does the AMPDU setup according to its own rulebook.
997 * Our nicely assembled, strictly monotonic increasing mpdu
998 * chains will be broken up, mashed back together...
999 */
1000
1001 return (dest->f.phy_control == src->f.phy_control);
1002}
1003
1004static void carl9170_tx_ampdu(struct ar9170 *ar)
1005{
1006 struct sk_buff_head agg;
1007 struct carl9170_sta_tid *tid_info;
1008 struct sk_buff *skb, *first;
1009 unsigned int i = 0, done_ampdus = 0;
1010 u16 seq, queue, tmpssn;
1011
1012 atomic_inc(&ar->tx_ampdu_scheduler);
1013 ar->tx_ampdu_schedule = false;
1014
1015 if (atomic_read(&ar->tx_ampdu_upload))
1016 return;
1017
1018 if (!ar->tx_ampdu_list_len)
1019 return;
1020
1021 __skb_queue_head_init(&agg);
1022
1023 rcu_read_lock();
1024 tid_info = rcu_dereference(ar->tx_ampdu_iter);
1025 if (WARN_ON_ONCE(!tid_info)) {
1026 rcu_read_unlock();
1027 return;
1028 }
1029
1030retry:
1031 list_for_each_entry_continue_rcu(tid_info, &ar->tx_ampdu_list, list) {
1032 i++;
1033
1034 if (tid_info->state < CARL9170_TID_STATE_PROGRESS)
1035 continue;
1036
1037 queue = TID_TO_WME_AC(tid_info->tid);
1038
1039 spin_lock_bh(&tid_info->lock);
1040 if (tid_info->state != CARL9170_TID_STATE_XMIT)
1041 goto processed;
1042
1043 tid_info->counter++;
1044 first = skb_peek(&tid_info->queue);
1045 tmpssn = carl9170_get_seq(first);
1046 seq = tid_info->snx;
1047
1048 if (unlikely(tmpssn != seq)) {
1049 tid_info->state = CARL9170_TID_STATE_IDLE;
1050
1051 goto processed;
1052 }
1053
1054 while ((skb = skb_peek(&tid_info->queue))) {
1055 /* strict 0, 1, ..., n - 1, n frame sequence order */
1056 if (unlikely(carl9170_get_seq(skb) != seq))
1057 break;
1058
1059 /* don't upload more than AMPDU FACTOR allows. */
1060 if (unlikely(SEQ_DIFF(tid_info->snx, tid_info->bsn) >=
1061 (tid_info->max - 1)))
1062 break;
1063
1064 if (!carl9170_tx_rate_check(ar, skb, first))
1065 break;
1066
1067 atomic_inc(&ar->tx_ampdu_upload);
1068 tid_info->snx = seq = SEQ_NEXT(seq);
1069 __skb_unlink(skb, &tid_info->queue);
1070
1071 __skb_queue_tail(&agg, skb);
1072
1073 if (skb_queue_len(&agg) >= CARL9170_NUM_TX_AGG_MAX)
1074 break;
1075 }
1076
1077 if (skb_queue_empty(&tid_info->queue) ||
1078 carl9170_get_seq(skb_peek(&tid_info->queue)) !=
1079 tid_info->snx) {
1080 /*
1081 * stop TID, if A-MPDU frames are still missing,
1082 * or whenever the queue is empty.
1083 */
1084
1085 tid_info->state = CARL9170_TID_STATE_IDLE;
1086 }
1087 done_ampdus++;
1088
1089processed:
1090 spin_unlock_bh(&tid_info->lock);
1091
1092 if (skb_queue_empty(&agg))
1093 continue;
1094
1095 /* apply ampdu spacing & factor settings */
1096 carl9170_set_ampdu_params(ar, skb_peek(&agg));
1097
1098 /* set aggregation push bit */
1099 carl9170_set_immba(ar, skb_peek_tail(&agg));
1100
1101 spin_lock_bh(&ar->tx_pending[queue].lock);
1102 skb_queue_splice_tail_init(&agg, &ar->tx_pending[queue]);
1103 spin_unlock_bh(&ar->tx_pending[queue].lock);
1104 ar->tx_schedule = true;
1105 }
1106 if ((done_ampdus++ == 0) && (i++ == 0))
1107 goto retry;
1108
1109 rcu_assign_pointer(ar->tx_ampdu_iter, tid_info);
1110 rcu_read_unlock();
1111}
1112
1113static struct sk_buff *carl9170_tx_pick_skb(struct ar9170 *ar,
1114 struct sk_buff_head *queue)
1115{
1116 struct sk_buff *skb;
1117 struct ieee80211_tx_info *info;
1118 struct carl9170_tx_info *arinfo;
1119
1120 BUILD_BUG_ON(sizeof(*arinfo) > sizeof(info->rate_driver_data));
1121
1122 spin_lock_bh(&queue->lock);
1123 skb = skb_peek(queue);
1124 if (unlikely(!skb))
1125 goto err_unlock;
1126
1127 if (carl9170_alloc_dev_space(ar, skb))
1128 goto err_unlock;
1129
1130 __skb_unlink(skb, queue);
1131 spin_unlock_bh(&queue->lock);
1132
1133 info = IEEE80211_SKB_CB(skb);
1134 arinfo = (void *) info->rate_driver_data;
1135
1136 arinfo->timeout = jiffies;
1137
1138 /*
1139 * increase ref count to "2".
1140 * Ref counting is the easiest way to solve the race between
1141 * the the urb's completion routine: carl9170_tx_callback and
1142 * wlan tx status functions: carl9170_tx_status/janitor.
1143 */
1144 carl9170_tx_get_skb(skb);
1145
1146 return skb;
1147
1148err_unlock:
1149 spin_unlock_bh(&queue->lock);
1150 return NULL;
1151}
1152
1153void carl9170_tx_drop(struct ar9170 *ar, struct sk_buff *skb)
1154{
1155 struct _carl9170_tx_superframe *super;
1156 uint8_t q = 0;
1157
1158 ar->tx_dropped++;
1159
1160 super = (void *)skb->data;
1161 SET_VAL(CARL9170_TX_SUPER_MISC_QUEUE, q,
1162 ar9170_qmap[carl9170_get_queue(ar, skb)]);
1163 __carl9170_tx_process_status(ar, super->s.cookie, q);
1164}
1165
1166static void carl9170_tx(struct ar9170 *ar)
1167{
1168 struct sk_buff *skb;
1169 unsigned int i, q;
1170 bool schedule_garbagecollector = false;
1171
1172 ar->tx_schedule = false;
1173
1174 if (unlikely(!IS_STARTED(ar)))
1175 return;
1176
1177 carl9170_usb_handle_tx_err(ar);
1178
1179 for (i = 0; i < ar->hw->queues; i++) {
1180 while (!skb_queue_empty(&ar->tx_pending[i])) {
1181 skb = carl9170_tx_pick_skb(ar, &ar->tx_pending[i]);
1182 if (unlikely(!skb))
1183 break;
1184
1185 atomic_inc(&ar->tx_total_pending);
1186
1187 q = __carl9170_get_queue(ar, i);
1188 /*
1189 * NB: tx_status[i] vs. tx_status[q],
1190 * TODO: Move into pick_skb or alloc_dev_space.
1191 */
1192 skb_queue_tail(&ar->tx_status[q], skb);
1193
1194 carl9170_usb_tx(ar, skb);
1195 schedule_garbagecollector = true;
1196 }
1197 }
1198
1199 if (!schedule_garbagecollector)
1200 return;
1201
1202 ieee80211_queue_delayed_work(ar->hw, &ar->tx_janitor,
1203 msecs_to_jiffies(CARL9170_TX_TIMEOUT));
1204}
1205
1206static bool carl9170_tx_ampdu_queue(struct ar9170 *ar,
1207 struct ieee80211_sta *sta, struct sk_buff *skb)
1208{
1209 struct carl9170_sta_info *sta_info;
1210 struct carl9170_sta_tid *agg;
1211 struct sk_buff *iter;
1212 unsigned int max;
1213 u16 tid, seq, qseq, off;
1214 bool run = false;
1215
1216 tid = carl9170_get_tid(skb);
1217 seq = carl9170_get_seq(skb);
1218 sta_info = (void *) sta->drv_priv;
1219
1220 rcu_read_lock();
1221 agg = rcu_dereference(sta_info->agg[tid]);
1222 max = sta_info->ampdu_max_len;
1223
1224 if (!agg)
1225 goto err_unlock_rcu;
1226
1227 spin_lock_bh(&agg->lock);
1228 if (unlikely(agg->state < CARL9170_TID_STATE_IDLE))
1229 goto err_unlock;
1230
1231 /* check if sequence is within the BA window */
1232 if (unlikely(!BAW_WITHIN(agg->bsn, CARL9170_BAW_BITS, seq)))
1233 goto err_unlock;
1234
1235 if (WARN_ON_ONCE(!BAW_WITHIN(agg->snx, CARL9170_BAW_BITS, seq)))
1236 goto err_unlock;
1237
1238 off = SEQ_DIFF(seq, agg->bsn);
1239 if (WARN_ON_ONCE(test_and_set_bit(off, agg->bitmap)))
1240 goto err_unlock;
1241
1242 if (likely(BAW_WITHIN(agg->hsn, CARL9170_BAW_BITS, seq))) {
1243 __skb_queue_tail(&agg->queue, skb);
1244 agg->hsn = seq;
1245 goto queued;
1246 }
1247
1248 skb_queue_reverse_walk(&agg->queue, iter) {
1249 qseq = carl9170_get_seq(iter);
1250
1251 if (BAW_WITHIN(qseq, CARL9170_BAW_BITS, seq)) {
1252 __skb_queue_after(&agg->queue, iter, skb);
1253 goto queued;
1254 }
1255 }
1256
1257 __skb_queue_head(&agg->queue, skb);
1258queued:
1259
1260 if (unlikely(agg->state != CARL9170_TID_STATE_XMIT)) {
1261 if (agg->snx == carl9170_get_seq(skb_peek(&agg->queue))) {
1262 agg->state = CARL9170_TID_STATE_XMIT;
1263 run = true;
1264 }
1265 }
1266
1267 spin_unlock_bh(&agg->lock);
1268 rcu_read_unlock();
1269
1270 return run;
1271
1272err_unlock:
1273 spin_unlock_bh(&agg->lock);
1274
1275err_unlock_rcu:
1276 rcu_read_unlock();
1277 carl9170_tx_status(ar, skb, false);
1278 ar->tx_dropped++;
1279 return false;
1280}
1281
1282int carl9170_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
1283{
1284 struct ar9170 *ar = hw->priv;
1285 struct ieee80211_tx_info *info;
1286 struct ieee80211_sta *sta;
1287 bool run;
1288
1289 if (unlikely(!IS_STARTED(ar)))
1290 goto err_free;
1291
1292 info = IEEE80211_SKB_CB(skb);
1293 sta = info->control.sta;
1294
1295 if (unlikely(carl9170_tx_prepare(ar, skb)))
1296 goto err_free;
1297
1298 carl9170_tx_accounting(ar, skb);
1299 /*
1300 * from now on, one has to use carl9170_tx_status to free
1301 * all ressouces which are associated with the frame.
1302 */
1303
1304 if (info->flags & IEEE80211_TX_CTL_AMPDU) {
1305 if (WARN_ON_ONCE(!sta))
1306 goto err_free;
1307
1308 run = carl9170_tx_ampdu_queue(ar, sta, skb);
1309 if (run)
1310 carl9170_tx_ampdu(ar);
1311
1312 } else {
1313 unsigned int queue = skb_get_queue_mapping(skb);
1314
1315 skb_queue_tail(&ar->tx_pending[queue], skb);
1316 }
1317
1318 carl9170_tx(ar);
1319 return NETDEV_TX_OK;
1320
1321err_free:
1322 ar->tx_dropped++;
1323 dev_kfree_skb_any(skb);
1324 return NETDEV_TX_OK;
1325}
1326
1327void carl9170_tx_scheduler(struct ar9170 *ar)
1328{
1329
1330 if (ar->tx_ampdu_schedule)
1331 carl9170_tx_ampdu(ar);
1332
1333 if (ar->tx_schedule)
1334 carl9170_tx(ar);
1335}
diff --git a/drivers/net/wireless/ath/carl9170/usb.c b/drivers/net/wireless/ath/carl9170/usb.c
new file mode 100644
index 000000000000..c7f6193934ea
--- /dev/null
+++ b/drivers/net/wireless/ath/carl9170/usb.c
@@ -0,0 +1,1136 @@
1/*
2 * Atheros CARL9170 driver
3 *
4 * USB - frontend
5 *
6 * Copyright 2008, Johannes Berg <johannes@sipsolutions.net>
7 * Copyright 2009, 2010, Christian Lamparter <chunkeey@googlemail.com>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; see the file COPYING. If not, see
21 * http://www.gnu.org/licenses/.
22 *
23 * This file incorporates work covered by the following copyright and
24 * permission notice:
25 * Copyright (c) 2007-2008 Atheros Communications, Inc.
26 *
27 * Permission to use, copy, modify, and/or distribute this software for any
28 * purpose with or without fee is hereby granted, provided that the above
29 * copyright notice and this permission notice appear in all copies.
30 *
31 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
32 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
33 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
34 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
35 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
36 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
37 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
38 */
39
40#include <linux/module.h>
41#include <linux/slab.h>
42#include <linux/usb.h>
43#include <linux/firmware.h>
44#include <linux/etherdevice.h>
45#include <linux/device.h>
46#include <net/mac80211.h>
47#include "carl9170.h"
48#include "cmd.h"
49#include "hw.h"
50#include "fwcmd.h"
51
52MODULE_AUTHOR("Johannes Berg <johannes@sipsolutions.net>");
53MODULE_AUTHOR("Christian Lamparter <chunkeey@googlemail.com>");
54MODULE_LICENSE("GPL");
55MODULE_DESCRIPTION("Atheros AR9170 802.11n USB wireless");
56MODULE_FIRMWARE(CARL9170FW_NAME);
57MODULE_ALIAS("ar9170usb");
58MODULE_ALIAS("arusb_lnx");
59
60/*
61 * Note:
62 *
63 * Always update our wiki's device list (located at:
64 * http://wireless.kernel.org/en/users/Drivers/ar9170/devices ),
65 * whenever you add a new device.
66 */
67static struct usb_device_id carl9170_usb_ids[] = {
68 /* Atheros 9170 */
69 { USB_DEVICE(0x0cf3, 0x9170) },
70 /* Atheros TG121N */
71 { USB_DEVICE(0x0cf3, 0x1001) },
72 /* TP-Link TL-WN821N v2 */
73 { USB_DEVICE(0x0cf3, 0x1002), .driver_info = CARL9170_WPS_BUTTON |
74 CARL9170_ONE_LED },
75 /* 3Com Dual Band 802.11n USB Adapter */
76 { USB_DEVICE(0x0cf3, 0x1010) },
77 /* H3C Dual Band 802.11n USB Adapter */
78 { USB_DEVICE(0x0cf3, 0x1011) },
79 /* Cace Airpcap NX */
80 { USB_DEVICE(0xcace, 0x0300) },
81 /* D-Link DWA 160 A1 */
82 { USB_DEVICE(0x07d1, 0x3c10) },
83 /* D-Link DWA 160 A2 */
84 { USB_DEVICE(0x07d1, 0x3a09) },
85 /* Netgear WNA1000 */
86 { USB_DEVICE(0x0846, 0x9040) },
87 /* Netgear WNDA3100 */
88 { USB_DEVICE(0x0846, 0x9010) },
89 /* Netgear WN111 v2 */
90 { USB_DEVICE(0x0846, 0x9001), .driver_info = CARL9170_ONE_LED },
91 /* Zydas ZD1221 */
92 { USB_DEVICE(0x0ace, 0x1221) },
93 /* Proxim ORiNOCO 802.11n USB */
94 { USB_DEVICE(0x1435, 0x0804) },
95 /* WNC Generic 11n USB Dongle */
96 { USB_DEVICE(0x1435, 0x0326) },
97 /* ZyXEL NWD271N */
98 { USB_DEVICE(0x0586, 0x3417) },
99 /* Z-Com UB81 BG */
100 { USB_DEVICE(0x0cde, 0x0023) },
101 /* Z-Com UB82 ABG */
102 { USB_DEVICE(0x0cde, 0x0026) },
103 /* Sphairon Homelink 1202 */
104 { USB_DEVICE(0x0cde, 0x0027) },
105 /* Arcadyan WN7512 */
106 { USB_DEVICE(0x083a, 0xf522) },
107 /* Planex GWUS300 */
108 { USB_DEVICE(0x2019, 0x5304) },
109 /* IO-Data WNGDNUS2 */
110 { USB_DEVICE(0x04bb, 0x093f) },
111 /* NEC WL300NU-G */
112 { USB_DEVICE(0x0409, 0x0249) },
113 /* AVM FRITZ!WLAN USB Stick N */
114 { USB_DEVICE(0x057c, 0x8401) },
115 /* AVM FRITZ!WLAN USB Stick N 2.4 */
116 { USB_DEVICE(0x057c, 0x8402) },
117 /* Qwest/Actiontec 802AIN Wireless N USB Network Adapter */
118 { USB_DEVICE(0x1668, 0x1200) },
119
120 /* terminate */
121 {}
122};
123MODULE_DEVICE_TABLE(usb, carl9170_usb_ids);
124
125static void carl9170_usb_submit_data_urb(struct ar9170 *ar)
126{
127 struct urb *urb;
128 int err;
129
130 if (atomic_inc_return(&ar->tx_anch_urbs) > AR9170_NUM_TX_URBS)
131 goto err_acc;
132
133 urb = usb_get_from_anchor(&ar->tx_wait);
134 if (!urb)
135 goto err_acc;
136
137 usb_anchor_urb(urb, &ar->tx_anch);
138
139 err = usb_submit_urb(urb, GFP_ATOMIC);
140 if (unlikely(err)) {
141 if (net_ratelimit()) {
142 dev_err(&ar->udev->dev, "tx submit failed (%d)\n",
143 urb->status);
144 }
145
146 usb_unanchor_urb(urb);
147 usb_anchor_urb(urb, &ar->tx_err);
148 }
149
150 usb_free_urb(urb);
151
152 if (likely(err == 0))
153 return;
154
155err_acc:
156 atomic_dec(&ar->tx_anch_urbs);
157}
158
159static void carl9170_usb_tx_data_complete(struct urb *urb)
160{
161 struct ar9170 *ar = (struct ar9170 *)
162 usb_get_intfdata(usb_ifnum_to_if(urb->dev, 0));
163
164 if (WARN_ON_ONCE(!ar)) {
165 dev_kfree_skb_irq(urb->context);
166 return;
167 }
168
169 atomic_dec(&ar->tx_anch_urbs);
170
171 switch (urb->status) {
172 /* everything is fine */
173 case 0:
174 carl9170_tx_callback(ar, (void *)urb->context);
175 break;
176
177 /* disconnect */
178 case -ENOENT:
179 case -ECONNRESET:
180 case -ENODEV:
181 case -ESHUTDOWN:
182 /*
183 * Defer the frame clean-up to the tasklet worker.
184 * This is necessary, because carl9170_tx_drop
185 * does not work in an irqsave context.
186 */
187 usb_anchor_urb(urb, &ar->tx_err);
188 return;
189
190 /* a random transmission error has occurred? */
191 default:
192 if (net_ratelimit()) {
193 dev_err(&ar->udev->dev, "tx failed (%d)\n",
194 urb->status);
195 }
196
197 usb_anchor_urb(urb, &ar->tx_err);
198 break;
199 }
200
201 if (likely(IS_STARTED(ar)))
202 carl9170_usb_submit_data_urb(ar);
203}
204
205static int carl9170_usb_submit_cmd_urb(struct ar9170 *ar)
206{
207 struct urb *urb;
208 int err;
209
210 if (atomic_inc_return(&ar->tx_cmd_urbs) != 1) {
211 atomic_dec(&ar->tx_cmd_urbs);
212 return 0;
213 }
214
215 urb = usb_get_from_anchor(&ar->tx_cmd);
216 if (!urb) {
217 atomic_dec(&ar->tx_cmd_urbs);
218 return 0;
219 }
220
221 usb_anchor_urb(urb, &ar->tx_anch);
222 err = usb_submit_urb(urb, GFP_ATOMIC);
223 if (unlikely(err)) {
224 usb_unanchor_urb(urb);
225 atomic_dec(&ar->tx_cmd_urbs);
226 }
227 usb_free_urb(urb);
228
229 return err;
230}
231
232static void carl9170_usb_cmd_complete(struct urb *urb)
233{
234 struct ar9170 *ar = urb->context;
235 int err = 0;
236
237 if (WARN_ON_ONCE(!ar))
238 return;
239
240 atomic_dec(&ar->tx_cmd_urbs);
241
242 switch (urb->status) {
243 /* everything is fine */
244 case 0:
245 break;
246
247 /* disconnect */
248 case -ENOENT:
249 case -ECONNRESET:
250 case -ENODEV:
251 case -ESHUTDOWN:
252 return;
253
254 default:
255 err = urb->status;
256 break;
257 }
258
259 if (!IS_INITIALIZED(ar))
260 return;
261
262 if (err)
263 dev_err(&ar->udev->dev, "submit cmd cb failed (%d).\n", err);
264
265 err = carl9170_usb_submit_cmd_urb(ar);
266 if (err)
267 dev_err(&ar->udev->dev, "submit cmd failed (%d).\n", err);
268}
269
270static void carl9170_usb_rx_irq_complete(struct urb *urb)
271{
272 struct ar9170 *ar = urb->context;
273
274 if (WARN_ON_ONCE(!ar))
275 return;
276
277 switch (urb->status) {
278 /* everything is fine */
279 case 0:
280 break;
281
282 /* disconnect */
283 case -ENOENT:
284 case -ECONNRESET:
285 case -ENODEV:
286 case -ESHUTDOWN:
287 return;
288
289 default:
290 goto resubmit;
291 }
292
293 carl9170_handle_command_response(ar, urb->transfer_buffer,
294 urb->actual_length);
295
296resubmit:
297 usb_anchor_urb(urb, &ar->rx_anch);
298 if (unlikely(usb_submit_urb(urb, GFP_ATOMIC)))
299 usb_unanchor_urb(urb);
300}
301
302static int carl9170_usb_submit_rx_urb(struct ar9170 *ar, gfp_t gfp)
303{
304 struct urb *urb;
305 int err = 0, runs = 0;
306
307 while ((atomic_read(&ar->rx_anch_urbs) < AR9170_NUM_RX_URBS) &&
308 (runs++ < AR9170_NUM_RX_URBS)) {
309 err = -ENOSPC;
310 urb = usb_get_from_anchor(&ar->rx_pool);
311 if (urb) {
312 usb_anchor_urb(urb, &ar->rx_anch);
313 err = usb_submit_urb(urb, gfp);
314 if (unlikely(err)) {
315 usb_unanchor_urb(urb);
316 usb_anchor_urb(urb, &ar->rx_pool);
317 } else {
318 atomic_dec(&ar->rx_pool_urbs);
319 atomic_inc(&ar->rx_anch_urbs);
320 }
321 usb_free_urb(urb);
322 }
323 }
324
325 return err;
326}
327
328static void carl9170_usb_rx_work(struct ar9170 *ar)
329{
330 struct urb *urb;
331 int i;
332
333 for (i = 0; i < AR9170_NUM_RX_URBS_POOL; i++) {
334 urb = usb_get_from_anchor(&ar->rx_work);
335 if (!urb)
336 break;
337
338 atomic_dec(&ar->rx_work_urbs);
339 if (IS_INITIALIZED(ar)) {
340 carl9170_rx(ar, urb->transfer_buffer,
341 urb->actual_length);
342 }
343
344 usb_anchor_urb(urb, &ar->rx_pool);
345 atomic_inc(&ar->rx_pool_urbs);
346
347 usb_free_urb(urb);
348
349 carl9170_usb_submit_rx_urb(ar, GFP_ATOMIC);
350 }
351}
352
353void carl9170_usb_handle_tx_err(struct ar9170 *ar)
354{
355 struct urb *urb;
356
357 while ((urb = usb_get_from_anchor(&ar->tx_err))) {
358 struct sk_buff *skb = (void *)urb->context;
359
360 carl9170_tx_drop(ar, skb);
361 carl9170_tx_callback(ar, skb);
362 usb_free_urb(urb);
363 }
364}
365
366static void carl9170_usb_tasklet(unsigned long data)
367{
368 struct ar9170 *ar = (struct ar9170 *) data;
369
370 if (!IS_INITIALIZED(ar))
371 return;
372
373 carl9170_usb_rx_work(ar);
374
375 /*
376 * Strictly speaking: The tx scheduler is not part of the USB system.
377 * But the rx worker returns frames back to the mac80211-stack and
378 * this is the _perfect_ place to generate the next transmissions.
379 */
380 if (IS_STARTED(ar))
381 carl9170_tx_scheduler(ar);
382}
383
384static void carl9170_usb_rx_complete(struct urb *urb)
385{
386 struct ar9170 *ar = (struct ar9170 *)urb->context;
387 int err;
388
389 if (WARN_ON_ONCE(!ar))
390 return;
391
392 atomic_dec(&ar->rx_anch_urbs);
393
394 switch (urb->status) {
395 case 0:
396 /* rx path */
397 usb_anchor_urb(urb, &ar->rx_work);
398 atomic_inc(&ar->rx_work_urbs);
399 break;
400
401 case -ENOENT:
402 case -ECONNRESET:
403 case -ENODEV:
404 case -ESHUTDOWN:
405 /* handle disconnect events*/
406 return;
407
408 default:
409 /* handle all other errors */
410 usb_anchor_urb(urb, &ar->rx_pool);
411 atomic_inc(&ar->rx_pool_urbs);
412 break;
413 }
414
415 err = carl9170_usb_submit_rx_urb(ar, GFP_ATOMIC);
416 if (unlikely(err)) {
417 /*
418 * usb_submit_rx_urb reported a problem.
419 * In case this is due to a rx buffer shortage,
420 * elevate the tasklet worker priority to
421 * the highest available level.
422 */
423 tasklet_hi_schedule(&ar->usb_tasklet);
424
425 if (atomic_read(&ar->rx_anch_urbs) == 0) {
426 /*
427 * The system is too slow to cope with
428 * the enormous workload. We have simply
429 * run out of active rx urbs and this
430 * unfortunatly leads to an unpredictable
431 * device.
432 */
433
434 carl9170_restart(ar, CARL9170_RR_SLOW_SYSTEM);
435 }
436 } else {
437 /*
438 * Using anything less than _high_ priority absolutely
439 * kills the rx performance my UP-System...
440 */
441 tasklet_hi_schedule(&ar->usb_tasklet);
442 }
443}
444
445static struct urb *carl9170_usb_alloc_rx_urb(struct ar9170 *ar, gfp_t gfp)
446{
447 struct urb *urb;
448 void *buf;
449
450 buf = kmalloc(ar->fw.rx_size, gfp);
451 if (!buf)
452 return NULL;
453
454 urb = usb_alloc_urb(0, gfp);
455 if (!urb) {
456 kfree(buf);
457 return NULL;
458 }
459
460 usb_fill_bulk_urb(urb, ar->udev, usb_rcvbulkpipe(ar->udev,
461 AR9170_USB_EP_RX), buf, ar->fw.rx_size,
462 carl9170_usb_rx_complete, ar);
463
464 urb->transfer_flags |= URB_FREE_BUFFER;
465
466 return urb;
467}
468
469static int carl9170_usb_send_rx_irq_urb(struct ar9170 *ar)
470{
471 struct urb *urb = NULL;
472 void *ibuf;
473 int err = -ENOMEM;
474
475 urb = usb_alloc_urb(0, GFP_KERNEL);
476 if (!urb)
477 goto out;
478
479 ibuf = kmalloc(AR9170_USB_EP_CTRL_MAX, GFP_KERNEL);
480 if (!ibuf)
481 goto out;
482
483 usb_fill_int_urb(urb, ar->udev, usb_rcvintpipe(ar->udev,
484 AR9170_USB_EP_IRQ), ibuf, AR9170_USB_EP_CTRL_MAX,
485 carl9170_usb_rx_irq_complete, ar, 1);
486
487 urb->transfer_flags |= URB_FREE_BUFFER;
488
489 usb_anchor_urb(urb, &ar->rx_anch);
490 err = usb_submit_urb(urb, GFP_KERNEL);
491 if (err)
492 usb_unanchor_urb(urb);
493
494out:
495 usb_free_urb(urb);
496 return err;
497}
498
499static int carl9170_usb_init_rx_bulk_urbs(struct ar9170 *ar)
500{
501 struct urb *urb;
502 int i, err = -EINVAL;
503
504 /*
505 * The driver actively maintains a second shadow
506 * pool for inactive, but fully-prepared rx urbs.
507 *
508 * The pool should help the driver to master huge
509 * workload spikes without running the risk of
510 * undersupplying the hardware or wasting time by
511 * processing rx data (streams) inside the urb
512 * completion (hardirq context).
513 */
514 for (i = 0; i < AR9170_NUM_RX_URBS_POOL; i++) {
515 urb = carl9170_usb_alloc_rx_urb(ar, GFP_KERNEL);
516 if (!urb) {
517 err = -ENOMEM;
518 goto err_out;
519 }
520
521 usb_anchor_urb(urb, &ar->rx_pool);
522 atomic_inc(&ar->rx_pool_urbs);
523 usb_free_urb(urb);
524 }
525
526 err = carl9170_usb_submit_rx_urb(ar, GFP_KERNEL);
527 if (err)
528 goto err_out;
529
530 /* the device now waiting for the firmware. */
531 carl9170_set_state_when(ar, CARL9170_STOPPED, CARL9170_IDLE);
532 return 0;
533
534err_out:
535
536 usb_scuttle_anchored_urbs(&ar->rx_pool);
537 usb_scuttle_anchored_urbs(&ar->rx_work);
538 usb_kill_anchored_urbs(&ar->rx_anch);
539 return err;
540}
541
542static int carl9170_usb_flush(struct ar9170 *ar)
543{
544 struct urb *urb;
545 int ret, err = 0;
546
547 while ((urb = usb_get_from_anchor(&ar->tx_wait))) {
548 struct sk_buff *skb = (void *)urb->context;
549 carl9170_tx_drop(ar, skb);
550 carl9170_tx_callback(ar, skb);
551 usb_free_urb(urb);
552 }
553
554 ret = usb_wait_anchor_empty_timeout(&ar->tx_cmd, HZ);
555 if (ret == 0)
556 err = -ETIMEDOUT;
557
558 /* lets wait a while until the tx - queues are dried out */
559 ret = usb_wait_anchor_empty_timeout(&ar->tx_anch, HZ);
560 if (ret == 0)
561 err = -ETIMEDOUT;
562
563 usb_kill_anchored_urbs(&ar->tx_anch);
564 carl9170_usb_handle_tx_err(ar);
565
566 return err;
567}
568
569static void carl9170_usb_cancel_urbs(struct ar9170 *ar)
570{
571 int err;
572
573 carl9170_set_state(ar, CARL9170_UNKNOWN_STATE);
574
575 err = carl9170_usb_flush(ar);
576 if (err)
577 dev_err(&ar->udev->dev, "stuck tx urbs!\n");
578
579 usb_poison_anchored_urbs(&ar->tx_anch);
580 carl9170_usb_handle_tx_err(ar);
581 usb_poison_anchored_urbs(&ar->rx_anch);
582
583 tasklet_kill(&ar->usb_tasklet);
584
585 usb_scuttle_anchored_urbs(&ar->rx_work);
586 usb_scuttle_anchored_urbs(&ar->rx_pool);
587 usb_scuttle_anchored_urbs(&ar->tx_cmd);
588}
589
590int __carl9170_exec_cmd(struct ar9170 *ar, struct carl9170_cmd *cmd,
591 const bool free_buf)
592{
593 struct urb *urb;
594
595 if (!IS_INITIALIZED(ar))
596 return -EPERM;
597
598 if (WARN_ON(cmd->hdr.len > CARL9170_MAX_CMD_LEN - 4))
599 return -EINVAL;
600
601 urb = usb_alloc_urb(0, GFP_ATOMIC);
602 if (!urb)
603 return -ENOMEM;
604
605 usb_fill_int_urb(urb, ar->udev, usb_sndintpipe(ar->udev,
606 AR9170_USB_EP_CMD), cmd, cmd->hdr.len + 4,
607 carl9170_usb_cmd_complete, ar, 1);
608
609 if (free_buf)
610 urb->transfer_flags |= URB_FREE_BUFFER;
611
612 usb_anchor_urb(urb, &ar->tx_cmd);
613 usb_free_urb(urb);
614
615 return carl9170_usb_submit_cmd_urb(ar);
616}
617
618int carl9170_exec_cmd(struct ar9170 *ar, const enum carl9170_cmd_oids cmd,
619 unsigned int plen, void *payload, unsigned int outlen, void *out)
620{
621 int err = -ENOMEM;
622
623 if (!IS_ACCEPTING_CMD(ar))
624 return -EIO;
625
626 if (!(cmd & CARL9170_CMD_ASYNC_FLAG))
627 might_sleep();
628
629 ar->cmd.hdr.len = plen;
630 ar->cmd.hdr.cmd = cmd;
631 /* writing multiple regs fills this buffer already */
632 if (plen && payload != (u8 *)(ar->cmd.data))
633 memcpy(ar->cmd.data, payload, plen);
634
635 spin_lock_bh(&ar->cmd_lock);
636 ar->readbuf = (u8 *)out;
637 ar->readlen = outlen;
638 spin_unlock_bh(&ar->cmd_lock);
639
640 err = __carl9170_exec_cmd(ar, &ar->cmd, false);
641
642 if (!(cmd & CARL9170_CMD_ASYNC_FLAG)) {
643 err = wait_for_completion_timeout(&ar->cmd_wait, HZ);
644 if (err == 0) {
645 err = -ETIMEDOUT;
646 goto err_unbuf;
647 }
648
649 if (ar->readlen != outlen) {
650 err = -EMSGSIZE;
651 goto err_unbuf;
652 }
653 }
654
655 return 0;
656
657err_unbuf:
658 /* Maybe the device was removed in the moment we were waiting? */
659 if (IS_STARTED(ar)) {
660 dev_err(&ar->udev->dev, "no command feedback "
661 "received (%d).\n", err);
662
663 /* provide some maybe useful debug information */
664 print_hex_dump_bytes("carl9170 cmd: ", DUMP_PREFIX_NONE,
665 &ar->cmd, plen + 4);
666
667 carl9170_restart(ar, CARL9170_RR_COMMAND_TIMEOUT);
668 }
669
670 /* invalidate to avoid completing the next command prematurely */
671 spin_lock_bh(&ar->cmd_lock);
672 ar->readbuf = NULL;
673 ar->readlen = 0;
674 spin_unlock_bh(&ar->cmd_lock);
675
676 return err;
677}
678
679void carl9170_usb_tx(struct ar9170 *ar, struct sk_buff *skb)
680{
681 struct urb *urb;
682 struct ar9170_stream *tx_stream;
683 void *data;
684 unsigned int len;
685
686 if (!IS_STARTED(ar))
687 goto err_drop;
688
689 urb = usb_alloc_urb(0, GFP_ATOMIC);
690 if (!urb)
691 goto err_drop;
692
693 if (ar->fw.tx_stream) {
694 tx_stream = (void *) (skb->data - sizeof(*tx_stream));
695
696 len = skb->len + sizeof(*tx_stream);
697 tx_stream->length = cpu_to_le16(len);
698 tx_stream->tag = cpu_to_le16(AR9170_TX_STREAM_TAG);
699 data = tx_stream;
700 } else {
701 data = skb->data;
702 len = skb->len;
703 }
704
705 usb_fill_bulk_urb(urb, ar->udev, usb_sndbulkpipe(ar->udev,
706 AR9170_USB_EP_TX), data, len,
707 carl9170_usb_tx_data_complete, skb);
708
709 urb->transfer_flags |= URB_ZERO_PACKET;
710
711 usb_anchor_urb(urb, &ar->tx_wait);
712
713 usb_free_urb(urb);
714
715 carl9170_usb_submit_data_urb(ar);
716 return;
717
718err_drop:
719 carl9170_tx_drop(ar, skb);
720 carl9170_tx_callback(ar, skb);
721}
722
723static void carl9170_release_firmware(struct ar9170 *ar)
724{
725 if (ar->fw.fw) {
726 release_firmware(ar->fw.fw);
727 memset(&ar->fw, 0, sizeof(ar->fw));
728 }
729}
730
731void carl9170_usb_stop(struct ar9170 *ar)
732{
733 int ret;
734
735 carl9170_set_state_when(ar, CARL9170_IDLE, CARL9170_STOPPED);
736
737 ret = carl9170_usb_flush(ar);
738 if (ret)
739 dev_err(&ar->udev->dev, "kill pending tx urbs.\n");
740
741 usb_poison_anchored_urbs(&ar->tx_anch);
742 carl9170_usb_handle_tx_err(ar);
743
744 /* kill any pending command */
745 spin_lock_bh(&ar->cmd_lock);
746 ar->readlen = 0;
747 spin_unlock_bh(&ar->cmd_lock);
748 complete_all(&ar->cmd_wait);
749
750 /* This is required to prevent an early completion on _start */
751 INIT_COMPLETION(ar->cmd_wait);
752
753 /*
754 * Note:
755 * So far we freed all tx urbs, but we won't dare to touch any rx urbs.
756 * Else we would end up with a unresponsive device...
757 */
758}
759
760int carl9170_usb_open(struct ar9170 *ar)
761{
762 usb_unpoison_anchored_urbs(&ar->tx_anch);
763
764 carl9170_set_state_when(ar, CARL9170_STOPPED, CARL9170_IDLE);
765 return 0;
766}
767
768static int carl9170_usb_load_firmware(struct ar9170 *ar)
769{
770 const u8 *data;
771 u8 *buf;
772 unsigned int transfer;
773 size_t len;
774 u32 addr;
775 int err = 0;
776
777 buf = kmalloc(4096, GFP_KERNEL);
778 if (!buf) {
779 err = -ENOMEM;
780 goto err_out;
781 }
782
783 data = ar->fw.fw->data;
784 len = ar->fw.fw->size;
785 addr = ar->fw.address;
786
787 /* this removes the miniboot image */
788 data += ar->fw.offset;
789 len -= ar->fw.offset;
790
791 while (len) {
792 transfer = min_t(unsigned int, len, 4096u);
793 memcpy(buf, data, transfer);
794
795 err = usb_control_msg(ar->udev, usb_sndctrlpipe(ar->udev, 0),
796 0x30 /* FW DL */, 0x40 | USB_DIR_OUT,
797 addr >> 8, 0, buf, transfer, 100);
798
799 if (err < 0) {
800 kfree(buf);
801 goto err_out;
802 }
803
804 len -= transfer;
805 data += transfer;
806 addr += transfer;
807 }
808 kfree(buf);
809
810 err = usb_control_msg(ar->udev, usb_sndctrlpipe(ar->udev, 0),
811 0x31 /* FW DL COMPLETE */,
812 0x40 | USB_DIR_OUT, 0, 0, NULL, 0, 200);
813
814 if (wait_for_completion_timeout(&ar->fw_boot_wait, HZ) == 0) {
815 err = -ETIMEDOUT;
816 goto err_out;
817 }
818
819 err = carl9170_echo_test(ar, 0x4a110123);
820 if (err)
821 goto err_out;
822
823 /* firmware restarts cmd counter */
824 ar->cmd_seq = -1;
825
826 return 0;
827
828err_out:
829 dev_err(&ar->udev->dev, "firmware upload failed (%d).\n", err);
830 return err;
831}
832
833int carl9170_usb_restart(struct ar9170 *ar)
834{
835 int err = 0;
836
837 if (ar->intf->condition != USB_INTERFACE_BOUND)
838 return 0;
839
840 /* Disable command response sequence counter. */
841 ar->cmd_seq = -2;
842
843 err = carl9170_reboot(ar);
844
845 carl9170_usb_stop(ar);
846
847 if (err)
848 goto err_out;
849
850 tasklet_schedule(&ar->usb_tasklet);
851
852 /* The reboot procedure can take quite a while to complete. */
853 msleep(1100);
854
855 err = carl9170_usb_open(ar);
856 if (err)
857 goto err_out;
858
859 err = carl9170_usb_load_firmware(ar);
860 if (err)
861 goto err_out;
862
863 return 0;
864
865err_out:
866 carl9170_usb_cancel_urbs(ar);
867 return err;
868}
869
870void carl9170_usb_reset(struct ar9170 *ar)
871{
872 /*
873 * This is the last resort to get the device going again
874 * without any *user replugging action*.
875 *
876 * But there is a catch: usb_reset really is like a physical
877 * *reconnect*. The mac80211 state will be lost in the process.
878 * Therefore a userspace application, which is monitoring
879 * the link must step in.
880 */
881 carl9170_usb_cancel_urbs(ar);
882
883 carl9170_usb_stop(ar);
884
885 usb_queue_reset_device(ar->intf);
886}
887
888static int carl9170_usb_init_device(struct ar9170 *ar)
889{
890 int err;
891
892 err = carl9170_usb_send_rx_irq_urb(ar);
893 if (err)
894 goto err_out;
895
896 err = carl9170_usb_init_rx_bulk_urbs(ar);
897 if (err)
898 goto err_unrx;
899
900 mutex_lock(&ar->mutex);
901 err = carl9170_usb_load_firmware(ar);
902 mutex_unlock(&ar->mutex);
903 if (err)
904 goto err_unrx;
905
906 return 0;
907
908err_unrx:
909 carl9170_usb_cancel_urbs(ar);
910
911err_out:
912 return err;
913}
914
915static void carl9170_usb_firmware_failed(struct ar9170 *ar)
916{
917 struct device *parent = ar->udev->dev.parent;
918 struct usb_device *udev;
919
920 /*
921 * Store a copy of the usb_device pointer locally.
922 * This is because device_release_driver initiates
923 * carl9170_usb_disconnect, which in turn frees our
924 * driver context (ar).
925 */
926 udev = ar->udev;
927
928 complete(&ar->fw_load_wait);
929
930 /* unbind anything failed */
931 if (parent)
932 device_lock(parent);
933
934 device_release_driver(&udev->dev);
935 if (parent)
936 device_unlock(parent);
937
938 usb_put_dev(udev);
939}
940
941static void carl9170_usb_firmware_finish(struct ar9170 *ar)
942{
943 int err;
944
945 err = carl9170_parse_firmware(ar);
946 if (err)
947 goto err_freefw;
948
949 err = carl9170_usb_init_device(ar);
950 if (err)
951 goto err_freefw;
952
953 err = carl9170_usb_open(ar);
954 if (err)
955 goto err_unrx;
956
957 err = carl9170_register(ar);
958
959 carl9170_usb_stop(ar);
960 if (err)
961 goto err_unrx;
962
963 complete(&ar->fw_load_wait);
964 usb_put_dev(ar->udev);
965 return;
966
967err_unrx:
968 carl9170_usb_cancel_urbs(ar);
969
970err_freefw:
971 carl9170_release_firmware(ar);
972 carl9170_usb_firmware_failed(ar);
973}
974
975static void carl9170_usb_firmware_step2(const struct firmware *fw,
976 void *context)
977{
978 struct ar9170 *ar = context;
979
980 if (fw) {
981 ar->fw.fw = fw;
982 carl9170_usb_firmware_finish(ar);
983 return;
984 }
985
986 dev_err(&ar->udev->dev, "firmware not found.\n");
987 carl9170_usb_firmware_failed(ar);
988}
989
990static int carl9170_usb_probe(struct usb_interface *intf,
991 const struct usb_device_id *id)
992{
993 struct ar9170 *ar;
994 struct usb_device *udev;
995 int err;
996
997 err = usb_reset_device(interface_to_usbdev(intf));
998 if (err)
999 return err;
1000
1001 ar = carl9170_alloc(sizeof(*ar));
1002 if (IS_ERR(ar))
1003 return PTR_ERR(ar);
1004
1005 udev = interface_to_usbdev(intf);
1006 usb_get_dev(udev);
1007 ar->udev = udev;
1008 ar->intf = intf;
1009 ar->features = id->driver_info;
1010
1011 usb_set_intfdata(intf, ar);
1012 SET_IEEE80211_DEV(ar->hw, &intf->dev);
1013
1014 init_usb_anchor(&ar->rx_anch);
1015 init_usb_anchor(&ar->rx_pool);
1016 init_usb_anchor(&ar->rx_work);
1017 init_usb_anchor(&ar->tx_wait);
1018 init_usb_anchor(&ar->tx_anch);
1019 init_usb_anchor(&ar->tx_cmd);
1020 init_usb_anchor(&ar->tx_err);
1021 init_completion(&ar->cmd_wait);
1022 init_completion(&ar->fw_boot_wait);
1023 init_completion(&ar->fw_load_wait);
1024 tasklet_init(&ar->usb_tasklet, carl9170_usb_tasklet,
1025 (unsigned long)ar);
1026
1027 atomic_set(&ar->tx_cmd_urbs, 0);
1028 atomic_set(&ar->tx_anch_urbs, 0);
1029 atomic_set(&ar->rx_work_urbs, 0);
1030 atomic_set(&ar->rx_anch_urbs, 0);
1031 atomic_set(&ar->rx_pool_urbs, 0);
1032 ar->cmd_seq = -2;
1033
1034 usb_get_dev(ar->udev);
1035
1036 carl9170_set_state(ar, CARL9170_STOPPED);
1037
1038 return request_firmware_nowait(THIS_MODULE, 1, CARL9170FW_NAME,
1039 &ar->udev->dev, GFP_KERNEL, ar, carl9170_usb_firmware_step2);
1040}
1041
1042static void carl9170_usb_disconnect(struct usb_interface *intf)
1043{
1044 struct ar9170 *ar = usb_get_intfdata(intf);
1045 struct usb_device *udev;
1046
1047 if (WARN_ON(!ar))
1048 return;
1049
1050 udev = ar->udev;
1051 wait_for_completion(&ar->fw_load_wait);
1052
1053 if (IS_INITIALIZED(ar)) {
1054 carl9170_reboot(ar);
1055 carl9170_usb_stop(ar);
1056 }
1057
1058 carl9170_usb_cancel_urbs(ar);
1059 carl9170_unregister(ar);
1060
1061 usb_set_intfdata(intf, NULL);
1062
1063 carl9170_release_firmware(ar);
1064 carl9170_free(ar);
1065 usb_put_dev(udev);
1066}
1067
1068#ifdef CONFIG_PM
1069static int carl9170_usb_suspend(struct usb_interface *intf,
1070 pm_message_t message)
1071{
1072 struct ar9170 *ar = usb_get_intfdata(intf);
1073
1074 if (!ar)
1075 return -ENODEV;
1076
1077 carl9170_usb_cancel_urbs(ar);
1078
1079 /*
1080 * firmware automatically reboots for usb suspend.
1081 */
1082
1083 return 0;
1084}
1085
1086static int carl9170_usb_resume(struct usb_interface *intf)
1087{
1088 struct ar9170 *ar = usb_get_intfdata(intf);
1089 int err;
1090
1091 if (!ar)
1092 return -ENODEV;
1093
1094 usb_unpoison_anchored_urbs(&ar->rx_anch);
1095
1096 err = carl9170_usb_init_device(ar);
1097 if (err)
1098 goto err_unrx;
1099
1100 err = carl9170_usb_open(ar);
1101 if (err)
1102 goto err_unrx;
1103
1104 return 0;
1105
1106err_unrx:
1107 carl9170_usb_cancel_urbs(ar);
1108
1109 return err;
1110}
1111#endif /* CONFIG_PM */
1112
1113static struct usb_driver carl9170_driver = {
1114 .name = KBUILD_MODNAME,
1115 .probe = carl9170_usb_probe,
1116 .disconnect = carl9170_usb_disconnect,
1117 .id_table = carl9170_usb_ids,
1118 .soft_unbind = 1,
1119#ifdef CONFIG_PM
1120 .suspend = carl9170_usb_suspend,
1121 .resume = carl9170_usb_resume,
1122#endif /* CONFIG_PM */
1123};
1124
1125static int __init carl9170_usb_init(void)
1126{
1127 return usb_register(&carl9170_driver);
1128}
1129
1130static void __exit carl9170_usb_exit(void)
1131{
1132 usb_deregister(&carl9170_driver);
1133}
1134
1135module_init(carl9170_usb_init);
1136module_exit(carl9170_usb_exit);
diff --git a/drivers/net/wireless/ath/carl9170/version.h b/drivers/net/wireless/ath/carl9170/version.h
new file mode 100644
index 000000000000..ff53f078a0b5
--- /dev/null
+++ b/drivers/net/wireless/ath/carl9170/version.h
@@ -0,0 +1,7 @@
1#ifndef __CARL9170_SHARED_VERSION_H
2#define __CARL9170_SHARED_VERSION_H
3#define CARL9170FW_VERSION_YEAR 10
4#define CARL9170FW_VERSION_MONTH 9
5#define CARL9170FW_VERSION_DAY 28
6#define CARL9170FW_VERSION_GIT "1.8.8.3"
7#endif /* __CARL9170_SHARED_VERSION_H */
diff --git a/drivers/net/wireless/ath/carl9170/wlan.h b/drivers/net/wireless/ath/carl9170/wlan.h
new file mode 100644
index 000000000000..24d63b583b6b
--- /dev/null
+++ b/drivers/net/wireless/ath/carl9170/wlan.h
@@ -0,0 +1,420 @@
1/*
2 * Shared Atheros AR9170 Header
3 *
4 * RX/TX meta descriptor format
5 *
6 * Copyright 2008, Johannes Berg <johannes@sipsolutions.net>
7 * Copyright 2009, 2010, Christian Lamparter <chunkeey@googlemail.com>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; see the file COPYING. If not, see
20 * http://www.gnu.org/licenses/.
21 *
22 * This file incorporates work covered by the following copyright and
23 * permission notice:
24 * Copyright (c) 2007-2008 Atheros Communications, Inc.
25 *
26 * Permission to use, copy, modify, and/or distribute this software for any
27 * purpose with or without fee is hereby granted, provided that the above
28 * copyright notice and this permission notice appear in all copies.
29 *
30 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
31 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
32 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
33 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
34 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
35 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
36 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
37 */
38
39#ifndef __CARL9170_SHARED_WLAN_H
40#define __CARL9170_SHARED_WLAN_H
41
42#include "fwcmd.h"
43
44#define AR9170_RX_PHY_RATE_CCK_1M 0x0a
45#define AR9170_RX_PHY_RATE_CCK_2M 0x14
46#define AR9170_RX_PHY_RATE_CCK_5M 0x37
47#define AR9170_RX_PHY_RATE_CCK_11M 0x6e
48
49#define AR9170_ENC_ALG_NONE 0x0
50#define AR9170_ENC_ALG_WEP64 0x1
51#define AR9170_ENC_ALG_TKIP 0x2
52#define AR9170_ENC_ALG_AESCCMP 0x4
53#define AR9170_ENC_ALG_WEP128 0x5
54#define AR9170_ENC_ALG_WEP256 0x6
55#define AR9170_ENC_ALG_CENC 0x7
56
57#define AR9170_RX_ENC_SOFTWARE 0x8
58
59#define AR9170_RX_STATUS_MODULATION 0x03
60#define AR9170_RX_STATUS_MODULATION_S 0
61#define AR9170_RX_STATUS_MODULATION_CCK 0x00
62#define AR9170_RX_STATUS_MODULATION_OFDM 0x01
63#define AR9170_RX_STATUS_MODULATION_HT 0x02
64#define AR9170_RX_STATUS_MODULATION_DUPOFDM 0x03
65
66/* depends on modulation */
67#define AR9170_RX_STATUS_SHORT_PREAMBLE 0x08
68#define AR9170_RX_STATUS_GREENFIELD 0x08
69
70#define AR9170_RX_STATUS_MPDU 0x30
71#define AR9170_RX_STATUS_MPDU_S 4
72#define AR9170_RX_STATUS_MPDU_SINGLE 0x00
73#define AR9170_RX_STATUS_MPDU_FIRST 0x20
74#define AR9170_RX_STATUS_MPDU_MIDDLE 0x30
75#define AR9170_RX_STATUS_MPDU_LAST 0x10
76
77#define AR9170_RX_STATUS_CONT_AGGR 0x40
78#define AR9170_RX_STATUS_TOTAL_ERROR 0x80
79
80#define AR9170_RX_ERROR_RXTO 0x01
81#define AR9170_RX_ERROR_OVERRUN 0x02
82#define AR9170_RX_ERROR_DECRYPT 0x04
83#define AR9170_RX_ERROR_FCS 0x08
84#define AR9170_RX_ERROR_WRONG_RA 0x10
85#define AR9170_RX_ERROR_PLCP 0x20
86#define AR9170_RX_ERROR_MMIC 0x40
87
88/* these are either-or */
89#define AR9170_TX_MAC_PROT_RTS 0x0001
90#define AR9170_TX_MAC_PROT_CTS 0x0002
91#define AR9170_TX_MAC_PROT 0x0003
92
93#define AR9170_TX_MAC_NO_ACK 0x0004
94/* if unset, MAC will only do SIFS space before frame */
95#define AR9170_TX_MAC_BACKOFF 0x0008
96#define AR9170_TX_MAC_BURST 0x0010
97#define AR9170_TX_MAC_AGGR 0x0020
98
99/* encryption is a two-bit field */
100#define AR9170_TX_MAC_ENCR_NONE 0x0000
101#define AR9170_TX_MAC_ENCR_RC4 0x0040
102#define AR9170_TX_MAC_ENCR_CENC 0x0080
103#define AR9170_TX_MAC_ENCR_AES 0x00c0
104
105#define AR9170_TX_MAC_MMIC 0x0100
106#define AR9170_TX_MAC_HW_DURATION 0x0200
107#define AR9170_TX_MAC_QOS_S 10
108#define AR9170_TX_MAC_QOS 0x0c00
109#define AR9170_TX_MAC_DISABLE_TXOP 0x1000
110#define AR9170_TX_MAC_TXOP_RIFS 0x2000
111#define AR9170_TX_MAC_IMM_BA 0x4000
112
113/* either-or */
114#define AR9170_TX_PHY_MOD_CCK 0x00000000
115#define AR9170_TX_PHY_MOD_OFDM 0x00000001
116#define AR9170_TX_PHY_MOD_HT 0x00000002
117
118/* depends on modulation */
119#define AR9170_TX_PHY_SHORT_PREAMBLE 0x00000004
120#define AR9170_TX_PHY_GREENFIELD 0x00000004
121
122#define AR9170_TX_PHY_BW_S 3
123#define AR9170_TX_PHY_BW (3 << AR9170_TX_PHY_BW_SHIFT)
124#define AR9170_TX_PHY_BW_20MHZ 0
125#define AR9170_TX_PHY_BW_40MHZ 2
126#define AR9170_TX_PHY_BW_40MHZ_DUP 3
127
128#define AR9170_TX_PHY_TX_HEAVY_CLIP_S 6
129#define AR9170_TX_PHY_TX_HEAVY_CLIP (7 << \
130 AR9170_TX_PHY_TX_HEAVY_CLIP_S)
131
132#define AR9170_TX_PHY_TX_PWR_S 9
133#define AR9170_TX_PHY_TX_PWR (0x3f << \
134 AR9170_TX_PHY_TX_PWR_S)
135
136#define AR9170_TX_PHY_TXCHAIN_S 15
137#define AR9170_TX_PHY_TXCHAIN (7 << \
138 AR9170_TX_PHY_TXCHAIN_S)
139#define AR9170_TX_PHY_TXCHAIN_1 1
140/* use for cck, ofdm 6/9/12/18/24 and HT if capable */
141#define AR9170_TX_PHY_TXCHAIN_2 5
142
143#define AR9170_TX_PHY_MCS_S 18
144#define AR9170_TX_PHY_MCS (0x7f << \
145 AR9170_TX_PHY_MCS_S)
146
147#define AR9170_TX_PHY_RATE_CCK_1M 0x0
148#define AR9170_TX_PHY_RATE_CCK_2M 0x1
149#define AR9170_TX_PHY_RATE_CCK_5M 0x2
150#define AR9170_TX_PHY_RATE_CCK_11M 0x3
151
152/* same as AR9170_RX_PHY_RATE */
153#define AR9170_TXRX_PHY_RATE_OFDM_6M 0xb
154#define AR9170_TXRX_PHY_RATE_OFDM_9M 0xf
155#define AR9170_TXRX_PHY_RATE_OFDM_12M 0xa
156#define AR9170_TXRX_PHY_RATE_OFDM_18M 0xe
157#define AR9170_TXRX_PHY_RATE_OFDM_24M 0x9
158#define AR9170_TXRX_PHY_RATE_OFDM_36M 0xd
159#define AR9170_TXRX_PHY_RATE_OFDM_48M 0x8
160#define AR9170_TXRX_PHY_RATE_OFDM_54M 0xc
161
162#define AR9170_TXRX_PHY_RATE_HT_MCS0 0x0
163#define AR9170_TXRX_PHY_RATE_HT_MCS1 0x1
164#define AR9170_TXRX_PHY_RATE_HT_MCS2 0x2
165#define AR9170_TXRX_PHY_RATE_HT_MCS3 0x3
166#define AR9170_TXRX_PHY_RATE_HT_MCS4 0x4
167#define AR9170_TXRX_PHY_RATE_HT_MCS5 0x5
168#define AR9170_TXRX_PHY_RATE_HT_MCS6 0x6
169#define AR9170_TXRX_PHY_RATE_HT_MCS7 0x7
170#define AR9170_TXRX_PHY_RATE_HT_MCS8 0x8
171#define AR9170_TXRX_PHY_RATE_HT_MCS9 0x9
172#define AR9170_TXRX_PHY_RATE_HT_MCS10 0xa
173#define AR9170_TXRX_PHY_RATE_HT_MCS11 0xb
174#define AR9170_TXRX_PHY_RATE_HT_MCS12 0xc
175#define AR9170_TXRX_PHY_RATE_HT_MCS13 0xd
176#define AR9170_TXRX_PHY_RATE_HT_MCS14 0xe
177#define AR9170_TXRX_PHY_RATE_HT_MCS15 0xf
178
179#define AR9170_TX_PHY_SHORT_GI 0x80000000
180
181#ifdef __CARL9170FW__
182struct ar9170_tx_hw_mac_control {
183 union {
184 struct {
185 /*
186 * Beware of compiler bugs in all gcc pre 4.4!
187 */
188
189 u8 erp_prot:2;
190 u8 no_ack:1;
191 u8 backoff:1;
192 u8 burst:1;
193 u8 ampdu:1;
194
195 u8 enc_mode:2;
196
197 u8 hw_mmic:1;
198 u8 hw_duration:1;
199
200 u8 qos_queue:2;
201
202 u8 disable_txop:1;
203 u8 txop_rifs:1;
204
205 u8 ba_end:1;
206 u8 probe:1;
207 } __packed;
208
209 __le16 set;
210 } __packed;
211} __packed;
212
213struct ar9170_tx_hw_phy_control {
214 union {
215 struct {
216 /*
217 * Beware of compiler bugs in all gcc pre 4.4!
218 */
219
220 u8 modulation:2;
221 u8 preamble:1;
222 u8 bandwidth:2;
223 u8:1;
224 u8 heavy_clip:3;
225 u8 tx_power:6;
226 u8 chains:3;
227 u8 mcs:7;
228 u8:6;
229 u8 short_gi:1;
230 } __packed;
231
232 __le32 set;
233 } __packed;
234} __packed;
235
236struct ar9170_tx_rate_info {
237 u8 tries:3;
238 u8 erp_prot:2;
239 u8 ampdu:1;
240 u8 free:2; /* free for use (e.g.:RIFS/TXOP/AMPDU) */
241} __packed;
242
243struct carl9170_tx_superdesc {
244 __le16 len;
245 u8 rix;
246 u8 cnt;
247 u8 cookie;
248 u8 ampdu_density:3;
249 u8 ampdu_factor:2;
250 u8 ampdu_commit_density:1;
251 u8 ampdu_commit_factor:1;
252 u8 ampdu_unused_bit:1;
253 u8 queue:2;
254 u8 reserved:1;
255 u8 vif_id:3;
256 u8 fill_in_tsf:1;
257 u8 cab:1;
258 u8 padding2;
259 struct ar9170_tx_rate_info ri[CARL9170_TX_MAX_RATES];
260 struct ar9170_tx_hw_phy_control rr[CARL9170_TX_MAX_RETRY_RATES];
261} __packed;
262
263struct ar9170_tx_hwdesc {
264 __le16 length;
265 struct ar9170_tx_hw_mac_control mac;
266 struct ar9170_tx_hw_phy_control phy;
267} __packed;
268
269struct ar9170_tx_frame {
270 struct ar9170_tx_hwdesc hdr;
271
272 union {
273 struct ieee80211_hdr i3e;
274 u8 payload[0];
275 } data;
276} __packed;
277
278struct carl9170_tx_superframe {
279 struct carl9170_tx_superdesc s;
280 struct ar9170_tx_frame f;
281} __packed;
282
283#endif /* __CARL9170FW__ */
284
285struct _ar9170_tx_hwdesc {
286 __le16 length;
287 __le16 mac_control;
288 __le32 phy_control;
289} __packed;
290
291#define CARL9170_TX_SUPER_AMPDU_DENSITY_S 0
292#define CARL9170_TX_SUPER_AMPDU_DENSITY 0x7
293#define CARL9170_TX_SUPER_AMPDU_FACTOR 0x18
294#define CARL9170_TX_SUPER_AMPDU_FACTOR_S 3
295#define CARL9170_TX_SUPER_AMPDU_COMMIT_DENSITY 0x20
296#define CARL9170_TX_SUPER_AMPDU_COMMIT_DENSITY_S 5
297#define CARL9170_TX_SUPER_AMPDU_COMMIT_FACTOR 0x40
298#define CARL9170_TX_SUPER_AMPDU_COMMIT_FACTOR_S 6
299
300#define CARL9170_TX_SUPER_MISC_QUEUE 0x3
301#define CARL9170_TX_SUPER_MISC_QUEUE_S 0
302#define CARL9170_TX_SUPER_MISC_VIF_ID 0x38
303#define CARL9170_TX_SUPER_MISC_VIF_ID_S 3
304#define CARL9170_TX_SUPER_MISC_FILL_IN_TSF 0x40
305#define CARL9170_TX_SUPER_MISC_CAB 0x80
306
307#define CARL9170_TX_SUPER_RI_TRIES 0x7
308#define CARL9170_TX_SUPER_RI_TRIES_S 0
309#define CARL9170_TX_SUPER_RI_ERP_PROT 0x18
310#define CARL9170_TX_SUPER_RI_ERP_PROT_S 3
311#define CARL9170_TX_SUPER_RI_AMPDU 0x20
312#define CARL9170_TX_SUPER_RI_AMPDU_S 5
313
314struct _carl9170_tx_superdesc {
315 __le16 len;
316 u8 rix;
317 u8 cnt;
318 u8 cookie;
319 u8 ampdu_settings;
320 u8 misc;
321 u8 padding;
322 u8 ri[CARL9170_TX_MAX_RATES];
323 __le32 rr[CARL9170_TX_MAX_RETRY_RATES];
324} __packed;
325
326struct _carl9170_tx_superframe {
327 struct _carl9170_tx_superdesc s;
328 struct _ar9170_tx_hwdesc f;
329 u8 frame_data[0];
330} __packed;
331
332#define CARL9170_TX_SUPERDESC_LEN 24
333#define AR9170_TX_HWDESC_LEN 8
334#define CARL9170_TX_SUPERFRAME_LEN (CARL9170_TX_SUPERDESC_LEN + \
335 AR9170_TX_HWDESC_LEN)
336
337struct ar9170_rx_head {
338 u8 plcp[12];
339} __packed;
340
341#define AR9170_RX_HEAD_LEN 12
342
343struct ar9170_rx_phystatus {
344 union {
345 struct {
346 u8 rssi_ant0, rssi_ant1, rssi_ant2,
347 rssi_ant0x, rssi_ant1x, rssi_ant2x,
348 rssi_combined;
349 } __packed;
350 u8 rssi[7];
351 } __packed;
352
353 u8 evm_stream0[6], evm_stream1[6];
354 u8 phy_err;
355} __packed;
356
357#define AR9170_RX_PHYSTATUS_LEN 20
358
359struct ar9170_rx_macstatus {
360 u8 SAidx, DAidx;
361 u8 error;
362 u8 status;
363} __packed;
364
365#define AR9170_RX_MACSTATUS_LEN 4
366
367struct ar9170_rx_frame_single {
368 struct ar9170_rx_head phy_head;
369 struct ieee80211_hdr i3e;
370 struct ar9170_rx_phystatus phy_tail;
371 struct ar9170_rx_macstatus macstatus;
372} __packed;
373
374struct ar9170_rx_frame_head {
375 struct ar9170_rx_head phy_head;
376 struct ieee80211_hdr i3e;
377 struct ar9170_rx_macstatus macstatus;
378} __packed;
379
380struct ar9170_rx_frame_middle {
381 struct ieee80211_hdr i3e;
382 struct ar9170_rx_macstatus macstatus;
383} __packed;
384
385struct ar9170_rx_frame_tail {
386 struct ieee80211_hdr i3e;
387 struct ar9170_rx_phystatus phy_tail;
388 struct ar9170_rx_macstatus macstatus;
389} __packed;
390
391struct ar9170_rx_frame {
392 union {
393 struct ar9170_rx_frame_single single;
394 struct ar9170_rx_frame_head head;
395 struct ar9170_rx_frame_middle middle;
396 struct ar9170_rx_frame_tail tail;
397 } __packed;
398} __packed;
399
400static inline u8 ar9170_get_decrypt_type(struct ar9170_rx_macstatus *t)
401{
402 return (t->SAidx & 0xc0) >> 4 |
403 (t->DAidx & 0xc0) >> 6;
404}
405
406enum ar9170_txq {
407 AR9170_TXQ_BE,
408
409 AR9170_TXQ_VI,
410 AR9170_TXQ_VO,
411 AR9170_TXQ_BK,
412
413 __AR9170_NUM_TXQ,
414};
415
416static const u8 ar9170_qmap[__AR9170_NUM_TXQ] = { 2, 1, 0, 3 };
417
418#define AR9170_TXQ_DEPTH 32
419
420#endif /* __CARL9170_SHARED_WLAN_H */
diff --git a/drivers/net/wireless/ath/debug.c b/drivers/net/wireless/ath/debug.c
index 53e77bd131b9..dacfb234f491 100644
--- a/drivers/net/wireless/ath/debug.c
+++ b/drivers/net/wireless/ath/debug.c
@@ -30,3 +30,32 @@ void ath_print(struct ath_common *common, int dbg_mask, const char *fmt, ...)
30 va_end(args); 30 va_end(args);
31} 31}
32EXPORT_SYMBOL(ath_print); 32EXPORT_SYMBOL(ath_print);
33
34const char *ath_opmode_to_string(enum nl80211_iftype opmode)
35{
36 switch (opmode) {
37 case NL80211_IFTYPE_UNSPECIFIED:
38 return "UNSPEC";
39 case NL80211_IFTYPE_ADHOC:
40 return "ADHOC";
41 case NL80211_IFTYPE_STATION:
42 return "STATION";
43 case NL80211_IFTYPE_AP:
44 return "AP";
45 case NL80211_IFTYPE_AP_VLAN:
46 return "AP-VLAN";
47 case NL80211_IFTYPE_WDS:
48 return "WDS";
49 case NL80211_IFTYPE_MONITOR:
50 return "MONITOR";
51 case NL80211_IFTYPE_MESH_POINT:
52 return "MESH";
53 case NL80211_IFTYPE_P2P_CLIENT:
54 return "P2P-CLIENT";
55 case NL80211_IFTYPE_P2P_GO:
56 return "P2P-GO";
57 default:
58 return "UNKNOWN";
59 }
60}
61EXPORT_SYMBOL(ath_opmode_to_string);
diff --git a/drivers/net/wireless/ath/debug.h b/drivers/net/wireless/ath/debug.h
index fd3a020682dc..64e4af2c2887 100644
--- a/drivers/net/wireless/ath/debug.h
+++ b/drivers/net/wireless/ath/debug.h
@@ -77,4 +77,14 @@ ath_print(struct ath_common *common, int dbg_mask, const char *fmt, ...)
77} 77}
78#endif /* CONFIG_ATH_DEBUG */ 78#endif /* CONFIG_ATH_DEBUG */
79 79
80/** Returns string describing opmode, or NULL if unknown mode. */
81#ifdef CONFIG_ATH_DEBUG
82const char *ath_opmode_to_string(enum nl80211_iftype opmode);
83#else
84static inline const char *ath_opmode_to_string(enum nl80211_iftype opmode)
85{
86 return "UNKNOWN";
87}
88#endif
89
80#endif /* ATH_DEBUG_H */ 90#endif /* ATH_DEBUG_H */
diff --git a/drivers/net/wireless/ath/hw.c b/drivers/net/wireless/ath/hw.c
index a8f81ea09f14..183c28281385 100644
--- a/drivers/net/wireless/ath/hw.c
+++ b/drivers/net/wireless/ath/hw.c
@@ -124,3 +124,62 @@ void ath_hw_setbssidmask(struct ath_common *common)
124 REG_WRITE(ah, get_unaligned_le16(common->bssidmask + 4), AR_BSSMSKU); 124 REG_WRITE(ah, get_unaligned_le16(common->bssidmask + 4), AR_BSSMSKU);
125} 125}
126EXPORT_SYMBOL(ath_hw_setbssidmask); 126EXPORT_SYMBOL(ath_hw_setbssidmask);
127
128
129/**
130 * ath_hw_cycle_counters_update - common function to update cycle counters
131 *
132 * @common: the ath_common struct for the device.
133 *
134 * This function is used to update all cycle counters in one place.
135 * It has to be called while holding common->cc_lock!
136 */
137void ath_hw_cycle_counters_update(struct ath_common *common)
138{
139 u32 cycles, busy, rx, tx;
140 void *ah = common->ah;
141
142 /* freeze */
143 REG_WRITE(ah, AR_MIBC_FMC, AR_MIBC);
144
145 /* read */
146 cycles = REG_READ(ah, AR_CCCNT);
147 busy = REG_READ(ah, AR_RCCNT);
148 rx = REG_READ(ah, AR_RFCNT);
149 tx = REG_READ(ah, AR_TFCNT);
150
151 /* clear */
152 REG_WRITE(ah, 0, AR_CCCNT);
153 REG_WRITE(ah, 0, AR_RFCNT);
154 REG_WRITE(ah, 0, AR_RCCNT);
155 REG_WRITE(ah, 0, AR_TFCNT);
156
157 /* unfreeze */
158 REG_WRITE(ah, 0, AR_MIBC);
159
160 /* update all cycle counters here */
161 common->cc_ani.cycles += cycles;
162 common->cc_ani.rx_busy += busy;
163 common->cc_ani.rx_frame += rx;
164 common->cc_ani.tx_frame += tx;
165
166 common->cc_survey.cycles += cycles;
167 common->cc_survey.rx_busy += busy;
168 common->cc_survey.rx_frame += rx;
169 common->cc_survey.tx_frame += tx;
170}
171EXPORT_SYMBOL(ath_hw_cycle_counters_update);
172
173int32_t ath_hw_get_listen_time(struct ath_common *common)
174{
175 struct ath_cycle_counters *cc = &common->cc_ani;
176 int32_t listen_time;
177
178 listen_time = (cc->cycles - cc->rx_frame - cc->tx_frame) /
179 (common->clockrate * 1000);
180
181 memset(cc, 0, sizeof(*cc));
182
183 return listen_time;
184}
185EXPORT_SYMBOL(ath_hw_get_listen_time);
diff --git a/drivers/net/wireless/ath/key.c b/drivers/net/wireless/ath/key.c
new file mode 100644
index 000000000000..bd21a4d82085
--- /dev/null
+++ b/drivers/net/wireless/ath/key.c
@@ -0,0 +1,568 @@
1/*
2 * Copyright (c) 2009 Atheros Communications Inc.
3 * Copyright (c) 2010 Bruno Randolf <br1@einfach.org>
4 *
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 */
17
18#include <asm/unaligned.h>
19#include <net/mac80211.h>
20
21#include "ath.h"
22#include "reg.h"
23#include "debug.h"
24
25#define REG_READ (common->ops->read)
26#define REG_WRITE(_ah, _reg, _val) (common->ops->write)(_ah, _val, _reg)
27
28#define IEEE80211_WEP_NKID 4 /* number of key ids */
29
30/************************/
31/* Key Cache Management */
32/************************/
33
34bool ath_hw_keyreset(struct ath_common *common, u16 entry)
35{
36 u32 keyType;
37 void *ah = common->ah;
38
39 if (entry >= common->keymax) {
40 ath_print(common, ATH_DBG_FATAL,
41 "keychache entry %u out of range\n", entry);
42 return false;
43 }
44
45 keyType = REG_READ(ah, AR_KEYTABLE_TYPE(entry));
46
47 REG_WRITE(ah, AR_KEYTABLE_KEY0(entry), 0);
48 REG_WRITE(ah, AR_KEYTABLE_KEY1(entry), 0);
49 REG_WRITE(ah, AR_KEYTABLE_KEY2(entry), 0);
50 REG_WRITE(ah, AR_KEYTABLE_KEY3(entry), 0);
51 REG_WRITE(ah, AR_KEYTABLE_KEY4(entry), 0);
52 REG_WRITE(ah, AR_KEYTABLE_TYPE(entry), AR_KEYTABLE_TYPE_CLR);
53 REG_WRITE(ah, AR_KEYTABLE_MAC0(entry), 0);
54 REG_WRITE(ah, AR_KEYTABLE_MAC1(entry), 0);
55
56 if (keyType == AR_KEYTABLE_TYPE_TKIP) {
57 u16 micentry = entry + 64;
58
59 REG_WRITE(ah, AR_KEYTABLE_KEY0(micentry), 0);
60 REG_WRITE(ah, AR_KEYTABLE_KEY1(micentry), 0);
61 REG_WRITE(ah, AR_KEYTABLE_KEY2(micentry), 0);
62 REG_WRITE(ah, AR_KEYTABLE_KEY3(micentry), 0);
63
64 }
65
66 return true;
67}
68EXPORT_SYMBOL(ath_hw_keyreset);
69
70bool ath_hw_keysetmac(struct ath_common *common, u16 entry, const u8 *mac)
71{
72 u32 macHi, macLo;
73 u32 unicast_flag = AR_KEYTABLE_VALID;
74 void *ah = common->ah;
75
76 if (entry >= common->keymax) {
77 ath_print(common, ATH_DBG_FATAL,
78 "keychache entry %u out of range\n", entry);
79 return false;
80 }
81
82 if (mac != NULL) {
83 /*
84 * AR_KEYTABLE_VALID indicates that the address is a unicast
85 * address, which must match the transmitter address for
86 * decrypting frames.
87 * Not setting this bit allows the hardware to use the key
88 * for multicast frame decryption.
89 */
90 if (mac[0] & 0x01)
91 unicast_flag = 0;
92
93 macHi = (mac[5] << 8) | mac[4];
94 macLo = (mac[3] << 24) |
95 (mac[2] << 16) |
96 (mac[1] << 8) |
97 mac[0];
98 macLo >>= 1;
99 macLo |= (macHi & 1) << 31;
100 macHi >>= 1;
101 } else {
102 macLo = macHi = 0;
103 }
104 REG_WRITE(ah, AR_KEYTABLE_MAC0(entry), macLo);
105 REG_WRITE(ah, AR_KEYTABLE_MAC1(entry), macHi | unicast_flag);
106
107 return true;
108}
109
110bool ath_hw_set_keycache_entry(struct ath_common *common, u16 entry,
111 const struct ath_keyval *k,
112 const u8 *mac)
113{
114 void *ah = common->ah;
115 u32 key0, key1, key2, key3, key4;
116 u32 keyType;
117
118 if (entry >= common->keymax) {
119 ath_print(common, ATH_DBG_FATAL,
120 "keycache entry %u out of range\n", entry);
121 return false;
122 }
123
124 switch (k->kv_type) {
125 case ATH_CIPHER_AES_OCB:
126 keyType = AR_KEYTABLE_TYPE_AES;
127 break;
128 case ATH_CIPHER_AES_CCM:
129 if (!(common->crypt_caps & ATH_CRYPT_CAP_CIPHER_AESCCM)) {
130 ath_print(common, ATH_DBG_ANY,
131 "AES-CCM not supported by this mac rev\n");
132 return false;
133 }
134 keyType = AR_KEYTABLE_TYPE_CCM;
135 break;
136 case ATH_CIPHER_TKIP:
137 keyType = AR_KEYTABLE_TYPE_TKIP;
138 if (entry + 64 >= common->keymax) {
139 ath_print(common, ATH_DBG_ANY,
140 "entry %u inappropriate for TKIP\n", entry);
141 return false;
142 }
143 break;
144 case ATH_CIPHER_WEP:
145 if (k->kv_len < WLAN_KEY_LEN_WEP40) {
146 ath_print(common, ATH_DBG_ANY,
147 "WEP key length %u too small\n", k->kv_len);
148 return false;
149 }
150 if (k->kv_len <= WLAN_KEY_LEN_WEP40)
151 keyType = AR_KEYTABLE_TYPE_40;
152 else if (k->kv_len <= WLAN_KEY_LEN_WEP104)
153 keyType = AR_KEYTABLE_TYPE_104;
154 else
155 keyType = AR_KEYTABLE_TYPE_128;
156 break;
157 case ATH_CIPHER_CLR:
158 keyType = AR_KEYTABLE_TYPE_CLR;
159 break;
160 default:
161 ath_print(common, ATH_DBG_FATAL,
162 "cipher %u not supported\n", k->kv_type);
163 return false;
164 }
165
166 key0 = get_unaligned_le32(k->kv_val + 0);
167 key1 = get_unaligned_le16(k->kv_val + 4);
168 key2 = get_unaligned_le32(k->kv_val + 6);
169 key3 = get_unaligned_le16(k->kv_val + 10);
170 key4 = get_unaligned_le32(k->kv_val + 12);
171 if (k->kv_len <= WLAN_KEY_LEN_WEP104)
172 key4 &= 0xff;
173
174 /*
175 * Note: Key cache registers access special memory area that requires
176 * two 32-bit writes to actually update the values in the internal
177 * memory. Consequently, the exact order and pairs used here must be
178 * maintained.
179 */
180
181 if (keyType == AR_KEYTABLE_TYPE_TKIP) {
182 u16 micentry = entry + 64;
183
184 /*
185 * Write inverted key[47:0] first to avoid Michael MIC errors
186 * on frames that could be sent or received at the same time.
187 * The correct key will be written in the end once everything
188 * else is ready.
189 */
190 REG_WRITE(ah, AR_KEYTABLE_KEY0(entry), ~key0);
191 REG_WRITE(ah, AR_KEYTABLE_KEY1(entry), ~key1);
192
193 /* Write key[95:48] */
194 REG_WRITE(ah, AR_KEYTABLE_KEY2(entry), key2);
195 REG_WRITE(ah, AR_KEYTABLE_KEY3(entry), key3);
196
197 /* Write key[127:96] and key type */
198 REG_WRITE(ah, AR_KEYTABLE_KEY4(entry), key4);
199 REG_WRITE(ah, AR_KEYTABLE_TYPE(entry), keyType);
200
201 /* Write MAC address for the entry */
202 (void) ath_hw_keysetmac(common, entry, mac);
203
204 if (common->crypt_caps & ATH_CRYPT_CAP_MIC_COMBINED) {
205 /*
206 * TKIP uses two key cache entries:
207 * Michael MIC TX/RX keys in the same key cache entry
208 * (idx = main index + 64):
209 * key0 [31:0] = RX key [31:0]
210 * key1 [15:0] = TX key [31:16]
211 * key1 [31:16] = reserved
212 * key2 [31:0] = RX key [63:32]
213 * key3 [15:0] = TX key [15:0]
214 * key3 [31:16] = reserved
215 * key4 [31:0] = TX key [63:32]
216 */
217 u32 mic0, mic1, mic2, mic3, mic4;
218
219 mic0 = get_unaligned_le32(k->kv_mic + 0);
220 mic2 = get_unaligned_le32(k->kv_mic + 4);
221 mic1 = get_unaligned_le16(k->kv_txmic + 2) & 0xffff;
222 mic3 = get_unaligned_le16(k->kv_txmic + 0) & 0xffff;
223 mic4 = get_unaligned_le32(k->kv_txmic + 4);
224
225 /* Write RX[31:0] and TX[31:16] */
226 REG_WRITE(ah, AR_KEYTABLE_KEY0(micentry), mic0);
227 REG_WRITE(ah, AR_KEYTABLE_KEY1(micentry), mic1);
228
229 /* Write RX[63:32] and TX[15:0] */
230 REG_WRITE(ah, AR_KEYTABLE_KEY2(micentry), mic2);
231 REG_WRITE(ah, AR_KEYTABLE_KEY3(micentry), mic3);
232
233 /* Write TX[63:32] and keyType(reserved) */
234 REG_WRITE(ah, AR_KEYTABLE_KEY4(micentry), mic4);
235 REG_WRITE(ah, AR_KEYTABLE_TYPE(micentry),
236 AR_KEYTABLE_TYPE_CLR);
237
238 } else {
239 /*
240 * TKIP uses four key cache entries (two for group
241 * keys):
242 * Michael MIC TX/RX keys are in different key cache
243 * entries (idx = main index + 64 for TX and
244 * main index + 32 + 96 for RX):
245 * key0 [31:0] = TX/RX MIC key [31:0]
246 * key1 [31:0] = reserved
247 * key2 [31:0] = TX/RX MIC key [63:32]
248 * key3 [31:0] = reserved
249 * key4 [31:0] = reserved
250 *
251 * Upper layer code will call this function separately
252 * for TX and RX keys when these registers offsets are
253 * used.
254 */
255 u32 mic0, mic2;
256
257 mic0 = get_unaligned_le32(k->kv_mic + 0);
258 mic2 = get_unaligned_le32(k->kv_mic + 4);
259
260 /* Write MIC key[31:0] */
261 REG_WRITE(ah, AR_KEYTABLE_KEY0(micentry), mic0);
262 REG_WRITE(ah, AR_KEYTABLE_KEY1(micentry), 0);
263
264 /* Write MIC key[63:32] */
265 REG_WRITE(ah, AR_KEYTABLE_KEY2(micentry), mic2);
266 REG_WRITE(ah, AR_KEYTABLE_KEY3(micentry), 0);
267
268 /* Write TX[63:32] and keyType(reserved) */
269 REG_WRITE(ah, AR_KEYTABLE_KEY4(micentry), 0);
270 REG_WRITE(ah, AR_KEYTABLE_TYPE(micentry),
271 AR_KEYTABLE_TYPE_CLR);
272 }
273
274 /* MAC address registers are reserved for the MIC entry */
275 REG_WRITE(ah, AR_KEYTABLE_MAC0(micentry), 0);
276 REG_WRITE(ah, AR_KEYTABLE_MAC1(micentry), 0);
277
278 /*
279 * Write the correct (un-inverted) key[47:0] last to enable
280 * TKIP now that all other registers are set with correct
281 * values.
282 */
283 REG_WRITE(ah, AR_KEYTABLE_KEY0(entry), key0);
284 REG_WRITE(ah, AR_KEYTABLE_KEY1(entry), key1);
285 } else {
286 /* Write key[47:0] */
287 REG_WRITE(ah, AR_KEYTABLE_KEY0(entry), key0);
288 REG_WRITE(ah, AR_KEYTABLE_KEY1(entry), key1);
289
290 /* Write key[95:48] */
291 REG_WRITE(ah, AR_KEYTABLE_KEY2(entry), key2);
292 REG_WRITE(ah, AR_KEYTABLE_KEY3(entry), key3);
293
294 /* Write key[127:96] and key type */
295 REG_WRITE(ah, AR_KEYTABLE_KEY4(entry), key4);
296 REG_WRITE(ah, AR_KEYTABLE_TYPE(entry), keyType);
297
298 /* Write MAC address for the entry */
299 (void) ath_hw_keysetmac(common, entry, mac);
300 }
301
302 return true;
303}
304
305static int ath_setkey_tkip(struct ath_common *common, u16 keyix, const u8 *key,
306 struct ath_keyval *hk, const u8 *addr,
307 bool authenticator)
308{
309 const u8 *key_rxmic;
310 const u8 *key_txmic;
311
312 key_txmic = key + NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY;
313 key_rxmic = key + NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY;
314
315 if (addr == NULL) {
316 /*
317 * Group key installation - only two key cache entries are used
318 * regardless of splitmic capability since group key is only
319 * used either for TX or RX.
320 */
321 if (authenticator) {
322 memcpy(hk->kv_mic, key_txmic, sizeof(hk->kv_mic));
323 memcpy(hk->kv_txmic, key_txmic, sizeof(hk->kv_mic));
324 } else {
325 memcpy(hk->kv_mic, key_rxmic, sizeof(hk->kv_mic));
326 memcpy(hk->kv_txmic, key_rxmic, sizeof(hk->kv_mic));
327 }
328 return ath_hw_set_keycache_entry(common, keyix, hk, addr);
329 }
330 if (common->crypt_caps & ATH_CRYPT_CAP_MIC_COMBINED) {
331 /* TX and RX keys share the same key cache entry. */
332 memcpy(hk->kv_mic, key_rxmic, sizeof(hk->kv_mic));
333 memcpy(hk->kv_txmic, key_txmic, sizeof(hk->kv_txmic));
334 return ath_hw_set_keycache_entry(common, keyix, hk, addr);
335 }
336
337 /* Separate key cache entries for TX and RX */
338
339 /* TX key goes at first index, RX key at +32. */
340 memcpy(hk->kv_mic, key_txmic, sizeof(hk->kv_mic));
341 if (!ath_hw_set_keycache_entry(common, keyix, hk, NULL)) {
342 /* TX MIC entry failed. No need to proceed further */
343 ath_print(common, ATH_DBG_FATAL,
344 "Setting TX MIC Key Failed\n");
345 return 0;
346 }
347
348 memcpy(hk->kv_mic, key_rxmic, sizeof(hk->kv_mic));
349 /* XXX delete tx key on failure? */
350 return ath_hw_set_keycache_entry(common, keyix + 32, hk, addr);
351}
352
353static int ath_reserve_key_cache_slot_tkip(struct ath_common *common)
354{
355 int i;
356
357 for (i = IEEE80211_WEP_NKID; i < common->keymax / 2; i++) {
358 if (test_bit(i, common->keymap) ||
359 test_bit(i + 64, common->keymap))
360 continue; /* At least one part of TKIP key allocated */
361 if (!(common->crypt_caps & ATH_CRYPT_CAP_MIC_COMBINED) &&
362 (test_bit(i + 32, common->keymap) ||
363 test_bit(i + 64 + 32, common->keymap)))
364 continue; /* At least one part of TKIP key allocated */
365
366 /* Found a free slot for a TKIP key */
367 return i;
368 }
369 return -1;
370}
371
372static int ath_reserve_key_cache_slot(struct ath_common *common,
373 u32 cipher)
374{
375 int i;
376
377 if (cipher == WLAN_CIPHER_SUITE_TKIP)
378 return ath_reserve_key_cache_slot_tkip(common);
379
380 /* First, try to find slots that would not be available for TKIP. */
381 if (!(common->crypt_caps & ATH_CRYPT_CAP_MIC_COMBINED)) {
382 for (i = IEEE80211_WEP_NKID; i < common->keymax / 4; i++) {
383 if (!test_bit(i, common->keymap) &&
384 (test_bit(i + 32, common->keymap) ||
385 test_bit(i + 64, common->keymap) ||
386 test_bit(i + 64 + 32, common->keymap)))
387 return i;
388 if (!test_bit(i + 32, common->keymap) &&
389 (test_bit(i, common->keymap) ||
390 test_bit(i + 64, common->keymap) ||
391 test_bit(i + 64 + 32, common->keymap)))
392 return i + 32;
393 if (!test_bit(i + 64, common->keymap) &&
394 (test_bit(i , common->keymap) ||
395 test_bit(i + 32, common->keymap) ||
396 test_bit(i + 64 + 32, common->keymap)))
397 return i + 64;
398 if (!test_bit(i + 64 + 32, common->keymap) &&
399 (test_bit(i, common->keymap) ||
400 test_bit(i + 32, common->keymap) ||
401 test_bit(i + 64, common->keymap)))
402 return i + 64 + 32;
403 }
404 } else {
405 for (i = IEEE80211_WEP_NKID; i < common->keymax / 2; i++) {
406 if (!test_bit(i, common->keymap) &&
407 test_bit(i + 64, common->keymap))
408 return i;
409 if (test_bit(i, common->keymap) &&
410 !test_bit(i + 64, common->keymap))
411 return i + 64;
412 }
413 }
414
415 /* No partially used TKIP slots, pick any available slot */
416 for (i = IEEE80211_WEP_NKID; i < common->keymax; i++) {
417 /* Do not allow slots that could be needed for TKIP group keys
418 * to be used. This limitation could be removed if we know that
419 * TKIP will not be used. */
420 if (i >= 64 && i < 64 + IEEE80211_WEP_NKID)
421 continue;
422 if (!(common->crypt_caps & ATH_CRYPT_CAP_MIC_COMBINED)) {
423 if (i >= 32 && i < 32 + IEEE80211_WEP_NKID)
424 continue;
425 if (i >= 64 + 32 && i < 64 + 32 + IEEE80211_WEP_NKID)
426 continue;
427 }
428
429 if (!test_bit(i, common->keymap))
430 return i; /* Found a free slot for a key */
431 }
432
433 /* No free slot found */
434 return -1;
435}
436
437/*
438 * Configure encryption in the HW.
439 */
440int ath_key_config(struct ath_common *common,
441 struct ieee80211_vif *vif,
442 struct ieee80211_sta *sta,
443 struct ieee80211_key_conf *key)
444{
445 struct ath_keyval hk;
446 const u8 *mac = NULL;
447 u8 gmac[ETH_ALEN];
448 int ret = 0;
449 int idx;
450
451 memset(&hk, 0, sizeof(hk));
452
453 switch (key->cipher) {
454 case WLAN_CIPHER_SUITE_WEP40:
455 case WLAN_CIPHER_SUITE_WEP104:
456 hk.kv_type = ATH_CIPHER_WEP;
457 break;
458 case WLAN_CIPHER_SUITE_TKIP:
459 hk.kv_type = ATH_CIPHER_TKIP;
460 break;
461 case WLAN_CIPHER_SUITE_CCMP:
462 hk.kv_type = ATH_CIPHER_AES_CCM;
463 break;
464 default:
465 return -EOPNOTSUPP;
466 }
467
468 hk.kv_len = key->keylen;
469 memcpy(hk.kv_val, key->key, key->keylen);
470
471 if (!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)) {
472 switch (vif->type) {
473 case NL80211_IFTYPE_AP:
474 memcpy(gmac, vif->addr, ETH_ALEN);
475 gmac[0] |= 0x01;
476 mac = gmac;
477 idx = ath_reserve_key_cache_slot(common, key->cipher);
478 break;
479 case NL80211_IFTYPE_ADHOC:
480 if (!sta) {
481 idx = key->keyidx;
482 break;
483 }
484 memcpy(gmac, sta->addr, ETH_ALEN);
485 gmac[0] |= 0x01;
486 mac = gmac;
487 idx = ath_reserve_key_cache_slot(common, key->cipher);
488 break;
489 default:
490 idx = key->keyidx;
491 break;
492 }
493 } else if (key->keyidx) {
494 if (WARN_ON(!sta))
495 return -EOPNOTSUPP;
496 mac = sta->addr;
497
498 if (vif->type != NL80211_IFTYPE_AP) {
499 /* Only keyidx 0 should be used with unicast key, but
500 * allow this for client mode for now. */
501 idx = key->keyidx;
502 } else
503 return -EIO;
504 } else {
505 if (WARN_ON(!sta))
506 return -EOPNOTSUPP;
507 mac = sta->addr;
508
509 idx = ath_reserve_key_cache_slot(common, key->cipher);
510 }
511
512 if (idx < 0)
513 return -ENOSPC; /* no free key cache entries */
514
515 if (key->cipher == WLAN_CIPHER_SUITE_TKIP)
516 ret = ath_setkey_tkip(common, idx, key->key, &hk, mac,
517 vif->type == NL80211_IFTYPE_AP);
518 else
519 ret = ath_hw_set_keycache_entry(common, idx, &hk, mac);
520
521 if (!ret)
522 return -EIO;
523
524 set_bit(idx, common->keymap);
525 if (key->cipher == WLAN_CIPHER_SUITE_TKIP) {
526 set_bit(idx + 64, common->keymap);
527 set_bit(idx, common->tkip_keymap);
528 set_bit(idx + 64, common->tkip_keymap);
529 if (!(common->crypt_caps & ATH_CRYPT_CAP_MIC_COMBINED)) {
530 set_bit(idx + 32, common->keymap);
531 set_bit(idx + 64 + 32, common->keymap);
532 set_bit(idx + 32, common->tkip_keymap);
533 set_bit(idx + 64 + 32, common->tkip_keymap);
534 }
535 }
536
537 return idx;
538}
539EXPORT_SYMBOL(ath_key_config);
540
541/*
542 * Delete Key.
543 */
544void ath_key_delete(struct ath_common *common, struct ieee80211_key_conf *key)
545{
546 ath_hw_keyreset(common, key->hw_key_idx);
547 if (key->hw_key_idx < IEEE80211_WEP_NKID)
548 return;
549
550 clear_bit(key->hw_key_idx, common->keymap);
551 if (key->cipher != WLAN_CIPHER_SUITE_TKIP)
552 return;
553
554 clear_bit(key->hw_key_idx + 64, common->keymap);
555
556 clear_bit(key->hw_key_idx, common->tkip_keymap);
557 clear_bit(key->hw_key_idx + 64, common->tkip_keymap);
558
559 if (!(common->crypt_caps & ATH_CRYPT_CAP_MIC_COMBINED)) {
560 ath_hw_keyreset(common, key->hw_key_idx + 32);
561 clear_bit(key->hw_key_idx + 32, common->keymap);
562 clear_bit(key->hw_key_idx + 64 + 32, common->keymap);
563
564 clear_bit(key->hw_key_idx + 32, common->tkip_keymap);
565 clear_bit(key->hw_key_idx + 64 + 32, common->tkip_keymap);
566 }
567}
568EXPORT_SYMBOL(ath_key_delete);
diff --git a/drivers/net/wireless/ath/reg.h b/drivers/net/wireless/ath/reg.h
index dfe1fbec24f5..298e53f3fa48 100644
--- a/drivers/net/wireless/ath/reg.h
+++ b/drivers/net/wireless/ath/reg.h
@@ -17,6 +17,12 @@
17#ifndef ATH_REGISTERS_H 17#ifndef ATH_REGISTERS_H
18#define ATH_REGISTERS_H 18#define ATH_REGISTERS_H
19 19
20#define AR_MIBC 0x0040
21#define AR_MIBC_COW 0x00000001
22#define AR_MIBC_FMC 0x00000002
23#define AR_MIBC_CMC 0x00000004
24#define AR_MIBC_MCS 0x00000008
25
20/* 26/*
21 * BSSID mask registers. See ath_hw_set_bssid_mask() 27 * BSSID mask registers. See ath_hw_set_bssid_mask()
22 * for detailed documentation about these registers. 28 * for detailed documentation about these registers.
@@ -24,4 +30,32 @@
24#define AR_BSSMSKL 0x80e0 30#define AR_BSSMSKL 0x80e0
25#define AR_BSSMSKU 0x80e4 31#define AR_BSSMSKU 0x80e4
26 32
33#define AR_TFCNT 0x80ec
34#define AR_RFCNT 0x80f0
35#define AR_RCCNT 0x80f4
36#define AR_CCCNT 0x80f8
37
38#define AR_KEYTABLE_0 0x8800
39#define AR_KEYTABLE(_n) (AR_KEYTABLE_0 + ((_n)*32))
40#define AR_KEY_CACHE_SIZE 128
41#define AR_RSVD_KEYTABLE_ENTRIES 4
42#define AR_KEY_TYPE 0x00000007
43#define AR_KEYTABLE_TYPE_40 0x00000000
44#define AR_KEYTABLE_TYPE_104 0x00000001
45#define AR_KEYTABLE_TYPE_128 0x00000003
46#define AR_KEYTABLE_TYPE_TKIP 0x00000004
47#define AR_KEYTABLE_TYPE_AES 0x00000005
48#define AR_KEYTABLE_TYPE_CCM 0x00000006
49#define AR_KEYTABLE_TYPE_CLR 0x00000007
50#define AR_KEYTABLE_ANT 0x00000008
51#define AR_KEYTABLE_VALID 0x00008000
52#define AR_KEYTABLE_KEY0(_n) (AR_KEYTABLE(_n) + 0)
53#define AR_KEYTABLE_KEY1(_n) (AR_KEYTABLE(_n) + 4)
54#define AR_KEYTABLE_KEY2(_n) (AR_KEYTABLE(_n) + 8)
55#define AR_KEYTABLE_KEY3(_n) (AR_KEYTABLE(_n) + 12)
56#define AR_KEYTABLE_KEY4(_n) (AR_KEYTABLE(_n) + 16)
57#define AR_KEYTABLE_TYPE(_n) (AR_KEYTABLE(_n) + 20)
58#define AR_KEYTABLE_MAC0(_n) (AR_KEYTABLE(_n) + 24)
59#define AR_KEYTABLE_MAC1(_n) (AR_KEYTABLE(_n) + 28)
60
27#endif /* ATH_REGISTERS_H */ 61#endif /* ATH_REGISTERS_H */
diff --git a/drivers/net/wireless/b43/Makefile b/drivers/net/wireless/b43/Makefile
index 5e83b6f0a3a0..69d4af09a6cb 100644
--- a/drivers/net/wireless/b43/Makefile
+++ b/drivers/net/wireless/b43/Makefile
@@ -1,6 +1,8 @@
1b43-y += main.o 1b43-y += main.o
2b43-y += tables.o 2b43-y += tables.o
3b43-$(CONFIG_B43_NPHY) += tables_nphy.o 3b43-$(CONFIG_B43_NPHY) += tables_nphy.o
4b43-$(CONFIG_B43_NPHY) += radio_2055.o
5b43-$(CONFIG_B43_NPHY) += radio_2056.o
4b43-y += phy_common.o 6b43-y += phy_common.o
5b43-y += phy_g.o 7b43-y += phy_g.o
6b43-y += phy_a.o 8b43-y += phy_a.o
diff --git a/drivers/net/wireless/b43/b43.h b/drivers/net/wireless/b43/b43.h
index 8674a99356af..72821c456b02 100644
--- a/drivers/net/wireless/b43/b43.h
+++ b/drivers/net/wireless/b43/b43.h
@@ -186,7 +186,8 @@ enum {
186#define B43_SHM_SH_PHYTXNOI 0x006E /* PHY noise directly after TX (lower 8bit only) */ 186#define B43_SHM_SH_PHYTXNOI 0x006E /* PHY noise directly after TX (lower 8bit only) */
187#define B43_SHM_SH_RFRXSP1 0x0072 /* RF RX SP Register 1 */ 187#define B43_SHM_SH_RFRXSP1 0x0072 /* RF RX SP Register 1 */
188#define B43_SHM_SH_CHAN 0x00A0 /* Current channel (low 8bit only) */ 188#define B43_SHM_SH_CHAN 0x00A0 /* Current channel (low 8bit only) */
189#define B43_SHM_SH_CHAN_5GHZ 0x0100 /* Bit set, if 5Ghz channel */ 189#define B43_SHM_SH_CHAN_5GHZ 0x0100 /* Bit set, if 5 Ghz channel */
190#define B43_SHM_SH_CHAN_40MHZ 0x0200 /* Bit set, if 40 Mhz channel width */
190#define B43_SHM_SH_BCMCFIFOID 0x0108 /* Last posted cookie to the bcast/mcast FIFO */ 191#define B43_SHM_SH_BCMCFIFOID 0x0108 /* Last posted cookie to the bcast/mcast FIFO */
191/* TSSI information */ 192/* TSSI information */
192#define B43_SHM_SH_TSSI_CCK 0x0058 /* TSSI for last 4 CCK frames (32bit) */ 193#define B43_SHM_SH_TSSI_CCK 0x0058 /* TSSI for last 4 CCK frames (32bit) */
diff --git a/drivers/net/wireless/b43/phy_common.c b/drivers/net/wireless/b43/phy_common.c
index 8f7d7eff2d80..7b2ea6781457 100644
--- a/drivers/net/wireless/b43/phy_common.c
+++ b/drivers/net/wireless/b43/phy_common.c
@@ -294,8 +294,10 @@ int b43_switch_channel(struct b43_wldev *dev, unsigned int new_channel)
294 */ 294 */
295 channelcookie = new_channel; 295 channelcookie = new_channel;
296 if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) 296 if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ)
297 channelcookie |= 0x100; 297 channelcookie |= B43_SHM_SH_CHAN_5GHZ;
298 //FIXME set 40Mhz flag if required 298 /* FIXME: set 40Mhz flag if required */
299 if (0)
300 channelcookie |= B43_SHM_SH_CHAN_40MHZ;
299 savedcookie = b43_shm_read16(dev, B43_SHM_SHARED, B43_SHM_SH_CHAN); 301 savedcookie = b43_shm_read16(dev, B43_SHM_SHARED, B43_SHM_SH_CHAN);
300 b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_CHAN, channelcookie); 302 b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_CHAN, channelcookie);
301 303
diff --git a/drivers/net/wireless/b43/phy_common.h b/drivers/net/wireless/b43/phy_common.h
index bd480b481bfc..0e6194228845 100644
--- a/drivers/net/wireless/b43/phy_common.h
+++ b/drivers/net/wireless/b43/phy_common.h
@@ -2,6 +2,7 @@
2#define LINUX_B43_PHY_COMMON_H_ 2#define LINUX_B43_PHY_COMMON_H_
3 3
4#include <linux/types.h> 4#include <linux/types.h>
5#include <linux/nl80211.h>
5 6
6struct b43_wldev; 7struct b43_wldev;
7 8
@@ -250,8 +251,10 @@ struct b43_phy {
250 * check is needed. */ 251 * check is needed. */
251 unsigned long next_txpwr_check_time; 252 unsigned long next_txpwr_check_time;
252 253
253 /* current channel */ 254 /* Current channel */
254 unsigned int channel; 255 unsigned int channel;
256 u16 channel_freq;
257 enum nl80211_channel_type channel_type;
255 258
256 /* PHY TX errors counter. */ 259 /* PHY TX errors counter. */
257 atomic_t txerr_cnt; 260 atomic_t txerr_cnt;
diff --git a/drivers/net/wireless/b43/phy_n.c b/drivers/net/wireless/b43/phy_n.c
index 2466c0a52e5d..dfec5496055e 100644
--- a/drivers/net/wireless/b43/phy_n.c
+++ b/drivers/net/wireless/b43/phy_n.c
@@ -29,6 +29,8 @@
29#include "b43.h" 29#include "b43.h"
30#include "phy_n.h" 30#include "phy_n.h"
31#include "tables_nphy.h" 31#include "tables_nphy.h"
32#include "radio_2055.h"
33#include "radio_2056.h"
32#include "main.h" 34#include "main.h"
33 35
34struct nphy_txgains { 36struct nphy_txgains {
@@ -73,21 +75,12 @@ static void b43_nphy_rf_control_override(struct b43_wldev *dev, u16 field,
73 u16 value, u8 core, bool off); 75 u16 value, u8 core, bool off);
74static void b43_nphy_rf_control_intc_override(struct b43_wldev *dev, u8 field, 76static void b43_nphy_rf_control_intc_override(struct b43_wldev *dev, u8 field,
75 u16 value, u8 core); 77 u16 value, u8 core);
76static int nphy_channel_switch(struct b43_wldev *dev, unsigned int channel);
77 78
78static inline bool b43_empty_chanspec(struct b43_chanspec *chanspec) 79static inline bool b43_channel_type_is_40mhz(
80 enum nl80211_channel_type channel_type)
79{ 81{
80 return !chanspec->channel && !chanspec->sideband && 82 return (channel_type == NL80211_CHAN_HT40MINUS ||
81 !chanspec->b_width && !chanspec->b_freq; 83 channel_type == NL80211_CHAN_HT40PLUS);
82}
83
84static inline bool b43_eq_chanspecs(struct b43_chanspec *chanspec1,
85 struct b43_chanspec *chanspec2)
86{
87 return (chanspec1->channel == chanspec2->channel &&
88 chanspec1->sideband == chanspec2->sideband &&
89 chanspec1->b_width == chanspec2->b_width &&
90 chanspec1->b_freq == chanspec2->b_freq);
91} 84}
92 85
93void b43_nphy_set_rxantenna(struct b43_wldev *dev, int antenna) 86void b43_nphy_set_rxantenna(struct b43_wldev *dev, int antenna)
@@ -223,7 +216,7 @@ static void b43_radio_init2055_post(struct b43_wldev *dev)
223 if (i) 216 if (i)
224 b43err(dev->wl, "radio post init timeout\n"); 217 b43err(dev->wl, "radio post init timeout\n");
225 b43_radio_mask(dev, B2055_CAL_LPOCTL, 0xFF7F); 218 b43_radio_mask(dev, B2055_CAL_LPOCTL, 0xFF7F);
226 nphy_channel_switch(dev, dev->phy.channel); 219 b43_switch_channel(dev, dev->phy.channel);
227 b43_radio_write(dev, B2055_C1_RX_BB_LPF, 0x9); 220 b43_radio_write(dev, B2055_C1_RX_BB_LPF, 0x9);
228 b43_radio_write(dev, B2055_C2_RX_BB_LPF, 0x9); 221 b43_radio_write(dev, B2055_C2_RX_BB_LPF, 0x9);
229 b43_radio_write(dev, B2055_C1_RX_BB_MIDACHP, 0x83); 222 b43_radio_write(dev, B2055_C1_RX_BB_MIDACHP, 0x83);
@@ -782,7 +775,7 @@ static void b43_nphy_spur_workaround(struct b43_wldev *dev)
782{ 775{
783 struct b43_phy_n *nphy = dev->phy.n; 776 struct b43_phy_n *nphy = dev->phy.n;
784 777
785 u8 channel = nphy->radio_chanspec.channel; 778 u8 channel = dev->phy.channel;
786 int tone[2] = { 57, 58 }; 779 int tone[2] = { 57, 58 };
787 u32 noise[2] = { 0x3FF, 0x3FF }; 780 u32 noise[2] = { 0x3FF, 0x3FF };
788 781
@@ -856,9 +849,9 @@ static void b43_nphy_adjust_lna_gain_table(struct b43_wldev *dev)
856 gain[0] = 6; 849 gain[0] = 6;
857 gain[1] = 6; 850 gain[1] = 6;
858 } else { 851 } else {
859 tmp = 40370 - 315 * nphy->radio_chanspec.channel; 852 tmp = 40370 - 315 * dev->phy.channel;
860 gain[0] = ((tmp >> 13) + ((tmp >> 12) & 1)); 853 gain[0] = ((tmp >> 13) + ((tmp >> 12) & 1));
861 tmp = 23242 - 224 * nphy->radio_chanspec.channel; 854 tmp = 23242 - 224 * dev->phy.channel;
862 gain[1] = ((tmp >> 13) + ((tmp >> 12) & 1)); 855 gain[1] = ((tmp >> 13) + ((tmp >> 12) & 1));
863 } 856 }
864 } else { 857 } else {
@@ -2084,12 +2077,12 @@ static void b43_nphy_restore_rssi_cal(struct b43_wldev *dev)
2084 u16 *rssical_phy_regs = NULL; 2077 u16 *rssical_phy_regs = NULL;
2085 2078
2086 if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) { 2079 if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
2087 if (b43_empty_chanspec(&nphy->rssical_chanspec_2G)) 2080 if (!nphy->rssical_chanspec_2G.center_freq)
2088 return; 2081 return;
2089 rssical_radio_regs = nphy->rssical_cache.rssical_radio_regs_2G; 2082 rssical_radio_regs = nphy->rssical_cache.rssical_radio_regs_2G;
2090 rssical_phy_regs = nphy->rssical_cache.rssical_phy_regs_2G; 2083 rssical_phy_regs = nphy->rssical_cache.rssical_phy_regs_2G;
2091 } else { 2084 } else {
2092 if (b43_empty_chanspec(&nphy->rssical_chanspec_5G)) 2085 if (!nphy->rssical_chanspec_5G.center_freq)
2093 return; 2086 return;
2094 rssical_radio_regs = nphy->rssical_cache.rssical_radio_regs_5G; 2087 rssical_radio_regs = nphy->rssical_cache.rssical_radio_regs_5G;
2095 rssical_phy_regs = nphy->rssical_cache.rssical_phy_regs_5G; 2088 rssical_phy_regs = nphy->rssical_cache.rssical_phy_regs_5G;
@@ -2545,8 +2538,9 @@ static void b43_nphy_save_cal(struct b43_wldev *dev)
2545 txcal_radio_regs[2] = b43_radio_read(dev, 0x8D); 2538 txcal_radio_regs[2] = b43_radio_read(dev, 0x8D);
2546 txcal_radio_regs[3] = b43_radio_read(dev, 0xBC); 2539 txcal_radio_regs[3] = b43_radio_read(dev, 0xBC);
2547 } 2540 }
2548 *iqcal_chanspec = nphy->radio_chanspec; 2541 iqcal_chanspec->center_freq = dev->phy.channel_freq;
2549 b43_ntab_write_bulk(dev, B43_NTAB16(15, 80), 8, table); 2542 iqcal_chanspec->channel_type = dev->phy.channel_type;
2543 b43_ntab_read_bulk(dev, B43_NTAB16(15, 80), 8, table);
2550 2544
2551 if (nphy->hang_avoid) 2545 if (nphy->hang_avoid)
2552 b43_nphy_stay_in_carrier_search(dev, 0); 2546 b43_nphy_stay_in_carrier_search(dev, 0);
@@ -2566,12 +2560,12 @@ static void b43_nphy_restore_cal(struct b43_wldev *dev)
2566 struct b43_phy_n_iq_comp *rxcal_coeffs = NULL; 2560 struct b43_phy_n_iq_comp *rxcal_coeffs = NULL;
2567 2561
2568 if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) { 2562 if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
2569 if (b43_empty_chanspec(&nphy->iqcal_chanspec_2G)) 2563 if (!nphy->iqcal_chanspec_2G.center_freq)
2570 return; 2564 return;
2571 table = nphy->cal_cache.txcal_coeffs_2G; 2565 table = nphy->cal_cache.txcal_coeffs_2G;
2572 loft = &nphy->cal_cache.txcal_coeffs_2G[5]; 2566 loft = &nphy->cal_cache.txcal_coeffs_2G[5];
2573 } else { 2567 } else {
2574 if (b43_empty_chanspec(&nphy->iqcal_chanspec_5G)) 2568 if (!nphy->iqcal_chanspec_5G.center_freq)
2575 return; 2569 return;
2576 table = nphy->cal_cache.txcal_coeffs_5G; 2570 table = nphy->cal_cache.txcal_coeffs_5G;
2577 loft = &nphy->cal_cache.txcal_coeffs_5G[5]; 2571 loft = &nphy->cal_cache.txcal_coeffs_5G[5];
@@ -2816,7 +2810,10 @@ static int b43_nphy_cal_tx_iq_lo(struct b43_wldev *dev,
2816 b43_ntab_read_bulk(dev, B43_NTAB16(15, 96), length, 2810 b43_ntab_read_bulk(dev, B43_NTAB16(15, 96), length,
2817 nphy->txiqlocal_bestc); 2811 nphy->txiqlocal_bestc);
2818 nphy->txiqlocal_coeffsvalid = true; 2812 nphy->txiqlocal_coeffsvalid = true;
2819 nphy->txiqlocal_chanspec = nphy->radio_chanspec; 2813 nphy->txiqlocal_chanspec.center_freq =
2814 dev->phy.channel_freq;
2815 nphy->txiqlocal_chanspec.channel_type =
2816 dev->phy.channel_type;
2820 } else { 2817 } else {
2821 length = 11; 2818 length = 11;
2822 if (dev->phy.rev < 3) 2819 if (dev->phy.rev < 3)
@@ -2852,7 +2849,8 @@ static void b43_nphy_reapply_tx_cal_coeffs(struct b43_wldev *dev)
2852 bool equal = true; 2849 bool equal = true;
2853 2850
2854 if (!nphy->txiqlocal_coeffsvalid || 2851 if (!nphy->txiqlocal_coeffsvalid ||
2855 b43_eq_chanspecs(&nphy->txiqlocal_chanspec, &nphy->radio_chanspec)) 2852 nphy->txiqlocal_chanspec.center_freq != dev->phy.channel_freq ||
2853 nphy->txiqlocal_chanspec.channel_type != dev->phy.channel_type)
2856 return; 2854 return;
2857 2855
2858 b43_ntab_read_bulk(dev, B43_NTAB16(15, 80), 7, buffer); 2856 b43_ntab_read_bulk(dev, B43_NTAB16(15, 80), 7, buffer);
@@ -3258,11 +3256,9 @@ int b43_phy_initn(struct b43_wldev *dev)
3258 do_rssi_cal = false; 3256 do_rssi_cal = false;
3259 if (phy->rev >= 3) { 3257 if (phy->rev >= 3) {
3260 if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) 3258 if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
3261 do_rssi_cal = 3259 do_rssi_cal = !nphy->rssical_chanspec_2G.center_freq;
3262 b43_empty_chanspec(&nphy->rssical_chanspec_2G);
3263 else 3260 else
3264 do_rssi_cal = 3261 do_rssi_cal = !nphy->rssical_chanspec_5G.center_freq;
3265 b43_empty_chanspec(&nphy->rssical_chanspec_5G);
3266 3262
3267 if (do_rssi_cal) 3263 if (do_rssi_cal)
3268 b43_nphy_rssi_cal(dev); 3264 b43_nphy_rssi_cal(dev);
@@ -3274,9 +3270,9 @@ int b43_phy_initn(struct b43_wldev *dev)
3274 3270
3275 if (!((nphy->measure_hold & 0x6) != 0)) { 3271 if (!((nphy->measure_hold & 0x6) != 0)) {
3276 if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) 3272 if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
3277 do_cal = b43_empty_chanspec(&nphy->iqcal_chanspec_2G); 3273 do_cal = !nphy->iqcal_chanspec_2G.center_freq;
3278 else 3274 else
3279 do_cal = b43_empty_chanspec(&nphy->iqcal_chanspec_5G); 3275 do_cal = !nphy->iqcal_chanspec_5G.center_freq;
3280 3276
3281 if (nphy->mute) 3277 if (nphy->mute)
3282 do_cal = false; 3278 do_cal = false;
@@ -3324,24 +3320,25 @@ int b43_phy_initn(struct b43_wldev *dev)
3324} 3320}
3325 3321
3326/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/ChanspecSetup */ 3322/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/ChanspecSetup */
3327static void b43_nphy_chanspec_setup(struct b43_wldev *dev, 3323static void b43_nphy_channel_setup(struct b43_wldev *dev,
3328 const struct b43_phy_n_sfo_cfg *e, 3324 const struct b43_phy_n_sfo_cfg *e,
3329 struct b43_chanspec chanspec) 3325 struct ieee80211_channel *new_channel)
3330{ 3326{
3331 struct b43_phy *phy = &dev->phy; 3327 struct b43_phy *phy = &dev->phy;
3332 struct b43_phy_n *nphy = dev->phy.n; 3328 struct b43_phy_n *nphy = dev->phy.n;
3333 3329
3334 u16 tmp; 3330 u16 old_band_5ghz;
3335 u32 tmp32; 3331 u32 tmp32;
3336 3332
3337 tmp = b43_phy_read(dev, B43_NPHY_BANDCTL) & B43_NPHY_BANDCTL_5GHZ; 3333 old_band_5ghz =
3338 if (chanspec.b_freq == 1 && tmp == 0) { 3334 b43_phy_read(dev, B43_NPHY_BANDCTL) & B43_NPHY_BANDCTL_5GHZ;
3335 if (new_channel->band == IEEE80211_BAND_5GHZ && !old_band_5ghz) {
3339 tmp32 = b43_read32(dev, B43_MMIO_PSM_PHY_HDR); 3336 tmp32 = b43_read32(dev, B43_MMIO_PSM_PHY_HDR);
3340 b43_write32(dev, B43_MMIO_PSM_PHY_HDR, tmp32 | 4); 3337 b43_write32(dev, B43_MMIO_PSM_PHY_HDR, tmp32 | 4);
3341 b43_phy_set(dev, B43_PHY_B_BBCFG, 0xC000); 3338 b43_phy_set(dev, B43_PHY_B_BBCFG, 0xC000);
3342 b43_write32(dev, B43_MMIO_PSM_PHY_HDR, tmp32); 3339 b43_write32(dev, B43_MMIO_PSM_PHY_HDR, tmp32);
3343 b43_phy_set(dev, B43_NPHY_BANDCTL, B43_NPHY_BANDCTL_5GHZ); 3340 b43_phy_set(dev, B43_NPHY_BANDCTL, B43_NPHY_BANDCTL_5GHZ);
3344 } else if (chanspec.b_freq == 1) { 3341 } else if (new_channel->band == IEEE80211_BAND_2GHZ && old_band_5ghz) {
3345 b43_phy_mask(dev, B43_NPHY_BANDCTL, ~B43_NPHY_BANDCTL_5GHZ); 3342 b43_phy_mask(dev, B43_NPHY_BANDCTL, ~B43_NPHY_BANDCTL_5GHZ);
3346 tmp32 = b43_read32(dev, B43_MMIO_PSM_PHY_HDR); 3343 tmp32 = b43_read32(dev, B43_MMIO_PSM_PHY_HDR);
3347 b43_write32(dev, B43_MMIO_PSM_PHY_HDR, tmp32 | 4); 3344 b43_write32(dev, B43_MMIO_PSM_PHY_HDR, tmp32 | 4);
@@ -3351,19 +3348,12 @@ static void b43_nphy_chanspec_setup(struct b43_wldev *dev,
3351 3348
3352 b43_chantab_phy_upload(dev, e); 3349 b43_chantab_phy_upload(dev, e);
3353 3350
3354 tmp = chanspec.channel; 3351 if (new_channel->hw_value == 14) {
3355 if (chanspec.b_freq == 1)
3356 tmp |= 0x0100;
3357 if (chanspec.b_width == 3)
3358 tmp |= 0x0200;
3359 b43_shm_write16(dev, B43_SHM_SHARED, 0xA0, tmp);
3360
3361 if (nphy->radio_chanspec.channel == 14) {
3362 b43_nphy_classifier(dev, 2, 0); 3352 b43_nphy_classifier(dev, 2, 0);
3363 b43_phy_set(dev, B43_PHY_B_TEST, 0x0800); 3353 b43_phy_set(dev, B43_PHY_B_TEST, 0x0800);
3364 } else { 3354 } else {
3365 b43_nphy_classifier(dev, 2, 2); 3355 b43_nphy_classifier(dev, 2, 2);
3366 if (chanspec.b_freq == 2) 3356 if (new_channel->band == IEEE80211_BAND_2GHZ)
3367 b43_phy_mask(dev, B43_PHY_B_TEST, ~0x840); 3357 b43_phy_mask(dev, B43_PHY_B_TEST, ~0x840);
3368 } 3358 }
3369 3359
@@ -3386,70 +3376,62 @@ static void b43_nphy_chanspec_setup(struct b43_wldev *dev,
3386} 3376}
3387 3377
3388/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/SetChanspec */ 3378/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/SetChanspec */
3389static int b43_nphy_set_chanspec(struct b43_wldev *dev, 3379static int b43_nphy_set_channel(struct b43_wldev *dev,
3390 struct b43_chanspec chanspec) 3380 struct ieee80211_channel *channel,
3381 enum nl80211_channel_type channel_type)
3391{ 3382{
3383 struct b43_phy *phy = &dev->phy;
3392 struct b43_phy_n *nphy = dev->phy.n; 3384 struct b43_phy_n *nphy = dev->phy.n;
3393 3385
3394 const struct b43_nphy_channeltab_entry_rev2 *tabent_r2; 3386 const struct b43_nphy_channeltab_entry_rev2 *tabent_r2;
3395 const struct b43_nphy_channeltab_entry_rev3 *tabent_r3; 3387 const struct b43_nphy_channeltab_entry_rev3 *tabent_r3;
3396 3388
3397 u8 tmp; 3389 u8 tmp;
3398 u8 channel = chanspec.channel;
3399 3390
3400 if (dev->phy.rev >= 3) { 3391 if (dev->phy.rev >= 3) {
3401 /* TODO */ 3392 tabent_r3 = b43_nphy_get_chantabent_rev3(dev,
3393 channel->center_freq);
3402 tabent_r3 = NULL; 3394 tabent_r3 = NULL;
3403 if (!tabent_r3) 3395 if (!tabent_r3)
3404 return -ESRCH; 3396 return -ESRCH;
3405 } else { 3397 } else {
3406 tabent_r2 = b43_nphy_get_chantabent_rev2(dev, channel); 3398 tabent_r2 = b43_nphy_get_chantabent_rev2(dev,
3399 channel->hw_value);
3407 if (!tabent_r2) 3400 if (!tabent_r2)
3408 return -ESRCH; 3401 return -ESRCH;
3409 } 3402 }
3410 3403
3411 nphy->radio_chanspec = chanspec; 3404 /* Channel is set later in common code, but we need to set it on our
3405 own to let this function's subcalls work properly. */
3406 phy->channel = channel->hw_value;
3407 phy->channel_freq = channel->center_freq;
3412 3408
3413 if (chanspec.b_width != nphy->b_width) 3409 if (b43_channel_type_is_40mhz(phy->channel_type) !=
3414 ; /* TODO: BMAC BW Set (chanspec.b_width) */ 3410 b43_channel_type_is_40mhz(channel_type))
3411 ; /* TODO: BMAC BW Set (channel_type) */
3415 3412
3416 /* TODO: use defines */ 3413 if (channel_type == NL80211_CHAN_HT40PLUS)
3417 if (chanspec.b_width == 3) { 3414 b43_phy_set(dev, B43_NPHY_RXCTL,
3418 if (chanspec.sideband == 2) 3415 B43_NPHY_RXCTL_BSELU20);
3419 b43_phy_set(dev, B43_NPHY_RXCTL, 3416 else if (channel_type == NL80211_CHAN_HT40MINUS)
3420 B43_NPHY_RXCTL_BSELU20); 3417 b43_phy_mask(dev, B43_NPHY_RXCTL,
3421 else 3418 ~B43_NPHY_RXCTL_BSELU20);
3422 b43_phy_mask(dev, B43_NPHY_RXCTL,
3423 ~B43_NPHY_RXCTL_BSELU20);
3424 }
3425 3419
3426 if (dev->phy.rev >= 3) { 3420 if (dev->phy.rev >= 3) {
3427 tmp = (chanspec.b_freq == 1) ? 4 : 0; 3421 tmp = (channel->band == IEEE80211_BAND_5GHZ) ? 4 : 0;
3428 b43_radio_maskset(dev, 0x08, 0xFFFB, tmp); 3422 b43_radio_maskset(dev, 0x08, 0xFFFB, tmp);
3429 /* TODO: PHY Radio2056 Setup (dev, tabent_r3); */ 3423 /* TODO: PHY Radio2056 Setup (dev, tabent_r3); */
3430 b43_nphy_chanspec_setup(dev, &(tabent_r3->phy_regs), chanspec); 3424 b43_nphy_channel_setup(dev, &(tabent_r3->phy_regs), channel);
3431 } else { 3425 } else {
3432 tmp = (chanspec.b_freq == 1) ? 0x0020 : 0x0050; 3426 tmp = (channel->band == IEEE80211_BAND_5GHZ) ? 0x0020 : 0x0050;
3433 b43_radio_maskset(dev, B2055_MASTER1, 0xFF8F, tmp); 3427 b43_radio_maskset(dev, B2055_MASTER1, 0xFF8F, tmp);
3434 b43_radio_2055_setup(dev, tabent_r2); 3428 b43_radio_2055_setup(dev, tabent_r2);
3435 b43_nphy_chanspec_setup(dev, &(tabent_r2->phy_regs), chanspec); 3429 b43_nphy_channel_setup(dev, &(tabent_r2->phy_regs), channel);
3436 } 3430 }
3437 3431
3438 return 0; 3432 return 0;
3439} 3433}
3440 3434
3441/* Tune the hardware to a new channel */
3442static int nphy_channel_switch(struct b43_wldev *dev, unsigned int channel)
3443{
3444 struct b43_phy_n *nphy = dev->phy.n;
3445
3446 struct b43_chanspec chanspec;
3447 chanspec = nphy->radio_chanspec;
3448 chanspec.channel = channel;
3449
3450 return b43_nphy_set_chanspec(dev, chanspec);
3451}
3452
3453static int b43_nphy_op_allocate(struct b43_wldev *dev) 3435static int b43_nphy_op_allocate(struct b43_wldev *dev)
3454{ 3436{
3455 struct b43_phy_n *nphy; 3437 struct b43_phy_n *nphy;
@@ -3570,7 +3552,7 @@ static void b43_nphy_op_software_rfkill(struct b43_wldev *dev,
3570 } else { 3552 } else {
3571 if (dev->phy.rev >= 3) { 3553 if (dev->phy.rev >= 3) {
3572 b43_radio_init2056(dev); 3554 b43_radio_init2056(dev);
3573 b43_nphy_set_chanspec(dev, nphy->radio_chanspec); 3555 b43_switch_channel(dev, dev->phy.channel);
3574 } else { 3556 } else {
3575 b43_radio_init2055(dev); 3557 b43_radio_init2055(dev);
3576 } 3558 }
@@ -3586,6 +3568,9 @@ static void b43_nphy_op_switch_analog(struct b43_wldev *dev, bool on)
3586static int b43_nphy_op_switch_channel(struct b43_wldev *dev, 3568static int b43_nphy_op_switch_channel(struct b43_wldev *dev,
3587 unsigned int new_channel) 3569 unsigned int new_channel)
3588{ 3570{
3571 struct ieee80211_channel *channel = dev->wl->hw->conf.channel;
3572 enum nl80211_channel_type channel_type = dev->wl->hw->conf.channel_type;
3573
3589 if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) { 3574 if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
3590 if ((new_channel < 1) || (new_channel > 14)) 3575 if ((new_channel < 1) || (new_channel > 14))
3591 return -EINVAL; 3576 return -EINVAL;
@@ -3594,7 +3579,7 @@ static int b43_nphy_op_switch_channel(struct b43_wldev *dev,
3594 return -EINVAL; 3579 return -EINVAL;
3595 } 3580 }
3596 3581
3597 return nphy_channel_switch(dev, new_channel); 3582 return b43_nphy_set_channel(dev, channel, channel_type);
3598} 3583}
3599 3584
3600static unsigned int b43_nphy_op_get_default_chan(struct b43_wldev *dev) 3585static unsigned int b43_nphy_op_get_default_chan(struct b43_wldev *dev)
diff --git a/drivers/net/wireless/b43/phy_n.h b/drivers/net/wireless/b43/phy_n.h
index 8b6d570dd0aa..c144e59a708b 100644
--- a/drivers/net/wireless/b43/phy_n.h
+++ b/drivers/net/wireless/b43/phy_n.h
@@ -714,223 +714,11 @@
714#define B43_PHY_B_BBCFG B43_PHY_N_BMODE(0x001) /* BB config */ 714#define B43_PHY_B_BBCFG B43_PHY_N_BMODE(0x001) /* BB config */
715#define B43_PHY_B_TEST B43_PHY_N_BMODE(0x00A) 715#define B43_PHY_B_TEST B43_PHY_N_BMODE(0x00A)
716 716
717
718/* Broadcom 2055 radio registers */
719
720#define B2055_GEN_SPARE 0x00 /* GEN spare */
721#define B2055_SP_PINPD 0x02 /* SP PIN PD */
722#define B2055_C1_SP_RSSI 0x03 /* SP RSSI Core 1 */
723#define B2055_C1_SP_PDMISC 0x04 /* SP PD MISC Core 1 */
724#define B2055_C2_SP_RSSI 0x05 /* SP RSSI Core 2 */
725#define B2055_C2_SP_PDMISC 0x06 /* SP PD MISC Core 2 */
726#define B2055_C1_SP_RXGC1 0x07 /* SP RX GC1 Core 1 */
727#define B2055_C1_SP_RXGC2 0x08 /* SP RX GC2 Core 1 */
728#define B2055_C2_SP_RXGC1 0x09 /* SP RX GC1 Core 2 */
729#define B2055_C2_SP_RXGC2 0x0A /* SP RX GC2 Core 2 */
730#define B2055_C1_SP_LPFBWSEL 0x0B /* SP LPF BW select Core 1 */
731#define B2055_C2_SP_LPFBWSEL 0x0C /* SP LPF BW select Core 2 */
732#define B2055_C1_SP_TXGC1 0x0D /* SP TX GC1 Core 1 */
733#define B2055_C1_SP_TXGC2 0x0E /* SP TX GC2 Core 1 */
734#define B2055_C2_SP_TXGC1 0x0F /* SP TX GC1 Core 2 */
735#define B2055_C2_SP_TXGC2 0x10 /* SP TX GC2 Core 2 */
736#define B2055_MASTER1 0x11 /* Master control 1 */
737#define B2055_MASTER2 0x12 /* Master control 2 */
738#define B2055_PD_LGEN 0x13 /* PD LGEN */
739#define B2055_PD_PLLTS 0x14 /* PD PLL TS */
740#define B2055_C1_PD_LGBUF 0x15 /* PD Core 1 LGBUF */
741#define B2055_C1_PD_TX 0x16 /* PD Core 1 TX */
742#define B2055_C1_PD_RXTX 0x17 /* PD Core 1 RXTX */
743#define B2055_C1_PD_RSSIMISC 0x18 /* PD Core 1 RSSI MISC */
744#define B2055_C2_PD_LGBUF 0x19 /* PD Core 2 LGBUF */
745#define B2055_C2_PD_TX 0x1A /* PD Core 2 TX */
746#define B2055_C2_PD_RXTX 0x1B /* PD Core 2 RXTX */
747#define B2055_C2_PD_RSSIMISC 0x1C /* PD Core 2 RSSI MISC */
748#define B2055_PWRDET_LGEN 0x1D /* PWRDET LGEN */
749#define B2055_C1_PWRDET_LGBUF 0x1E /* PWRDET LGBUF Core 1 */
750#define B2055_C1_PWRDET_RXTX 0x1F /* PWRDET RXTX Core 1 */
751#define B2055_C2_PWRDET_LGBUF 0x20 /* PWRDET LGBUF Core 2 */
752#define B2055_C2_PWRDET_RXTX 0x21 /* PWRDET RXTX Core 2 */
753#define B2055_RRCCAL_CS 0x22 /* RRCCAL Control spare */
754#define B2055_RRCCAL_NOPTSEL 0x23 /* RRCCAL N OPT SEL */
755#define B2055_CAL_MISC 0x24 /* CAL MISC */
756#define B2055_CAL_COUT 0x25 /* CAL Counter out */
757#define B2055_CAL_COUT2 0x26 /* CAL Counter out 2 */
758#define B2055_CAL_CVARCTL 0x27 /* CAL CVAR Control */
759#define B2055_CAL_RVARCTL 0x28 /* CAL RVAR Control */
760#define B2055_CAL_LPOCTL 0x29 /* CAL LPO Control */
761#define B2055_CAL_TS 0x2A /* CAL TS */
762#define B2055_CAL_RCCALRTS 0x2B /* CAL RCCAL READ TS */
763#define B2055_CAL_RCALRTS 0x2C /* CAL RCAL READ TS */
764#define B2055_PADDRV 0x2D /* PAD driver */
765#define B2055_XOCTL1 0x2E /* XO Control 1 */
766#define B2055_XOCTL2 0x2F /* XO Control 2 */
767#define B2055_XOREGUL 0x30 /* XO Regulator */
768#define B2055_XOMISC 0x31 /* XO misc */
769#define B2055_PLL_LFC1 0x32 /* PLL LF C1 */
770#define B2055_PLL_CALVTH 0x33 /* PLL CAL VTH */
771#define B2055_PLL_LFC2 0x34 /* PLL LF C2 */
772#define B2055_PLL_REF 0x35 /* PLL reference */
773#define B2055_PLL_LFR1 0x36 /* PLL LF R1 */
774#define B2055_PLL_PFDCP 0x37 /* PLL PFD CP */
775#define B2055_PLL_IDAC_CPOPAMP 0x38 /* PLL IDAC CPOPAMP */
776#define B2055_PLL_CPREG 0x39 /* PLL CP Regulator */
777#define B2055_PLL_RCAL 0x3A /* PLL RCAL */
778#define B2055_RF_PLLMOD0 0x3B /* RF PLL MOD0 */
779#define B2055_RF_PLLMOD1 0x3C /* RF PLL MOD1 */
780#define B2055_RF_MMDIDAC1 0x3D /* RF MMD IDAC 1 */
781#define B2055_RF_MMDIDAC0 0x3E /* RF MMD IDAC 0 */
782#define B2055_RF_MMDSP 0x3F /* RF MMD spare */
783#define B2055_VCO_CAL1 0x40 /* VCO cal 1 */
784#define B2055_VCO_CAL2 0x41 /* VCO cal 2 */
785#define B2055_VCO_CAL3 0x42 /* VCO cal 3 */
786#define B2055_VCO_CAL4 0x43 /* VCO cal 4 */
787#define B2055_VCO_CAL5 0x44 /* VCO cal 5 */
788#define B2055_VCO_CAL6 0x45 /* VCO cal 6 */
789#define B2055_VCO_CAL7 0x46 /* VCO cal 7 */
790#define B2055_VCO_CAL8 0x47 /* VCO cal 8 */
791#define B2055_VCO_CAL9 0x48 /* VCO cal 9 */
792#define B2055_VCO_CAL10 0x49 /* VCO cal 10 */
793#define B2055_VCO_CAL11 0x4A /* VCO cal 11 */
794#define B2055_VCO_CAL12 0x4B /* VCO cal 12 */
795#define B2055_VCO_CAL13 0x4C /* VCO cal 13 */
796#define B2055_VCO_CAL14 0x4D /* VCO cal 14 */
797#define B2055_VCO_CAL15 0x4E /* VCO cal 15 */
798#define B2055_VCO_CAL16 0x4F /* VCO cal 16 */
799#define B2055_VCO_KVCO 0x50 /* VCO KVCO */
800#define B2055_VCO_CAPTAIL 0x51 /* VCO CAP TAIL */
801#define B2055_VCO_IDACVCO 0x52 /* VCO IDAC VCO */
802#define B2055_VCO_REG 0x53 /* VCO Regulator */
803#define B2055_PLL_RFVTH 0x54 /* PLL RF VTH */
804#define B2055_LGBUF_CENBUF 0x55 /* LGBUF CEN BUF */
805#define B2055_LGEN_TUNE1 0x56 /* LGEN tune 1 */
806#define B2055_LGEN_TUNE2 0x57 /* LGEN tune 2 */
807#define B2055_LGEN_IDAC1 0x58 /* LGEN IDAC 1 */
808#define B2055_LGEN_IDAC2 0x59 /* LGEN IDAC 2 */
809#define B2055_LGEN_BIASC 0x5A /* LGEN BIAS counter */
810#define B2055_LGEN_BIASIDAC 0x5B /* LGEN BIAS IDAC */
811#define B2055_LGEN_RCAL 0x5C /* LGEN RCAL */
812#define B2055_LGEN_DIV 0x5D /* LGEN div */
813#define B2055_LGEN_SPARE2 0x5E /* LGEN spare 2 */
814#define B2055_C1_LGBUF_ATUNE 0x5F /* Core 1 LGBUF A tune */
815#define B2055_C1_LGBUF_GTUNE 0x60 /* Core 1 LGBUF G tune */
816#define B2055_C1_LGBUF_DIV 0x61 /* Core 1 LGBUF div */
817#define B2055_C1_LGBUF_AIDAC 0x62 /* Core 1 LGBUF A IDAC */
818#define B2055_C1_LGBUF_GIDAC 0x63 /* Core 1 LGBUF G IDAC */
819#define B2055_C1_LGBUF_IDACFO 0x64 /* Core 1 LGBUF IDAC filter override */
820#define B2055_C1_LGBUF_SPARE 0x65 /* Core 1 LGBUF spare */
821#define B2055_C1_RX_RFSPC1 0x66 /* Core 1 RX RF SPC1 */
822#define B2055_C1_RX_RFR1 0x67 /* Core 1 RX RF reg 1 */
823#define B2055_C1_RX_RFR2 0x68 /* Core 1 RX RF reg 2 */
824#define B2055_C1_RX_RFRCAL 0x69 /* Core 1 RX RF RCAL */
825#define B2055_C1_RX_BB_BLCMP 0x6A /* Core 1 RX Baseband BUFI LPF CMP */
826#define B2055_C1_RX_BB_LPF 0x6B /* Core 1 RX Baseband LPF */
827#define B2055_C1_RX_BB_MIDACHP 0x6C /* Core 1 RX Baseband MIDAC High-pass */
828#define B2055_C1_RX_BB_VGA1IDAC 0x6D /* Core 1 RX Baseband VGA1 IDAC */
829#define B2055_C1_RX_BB_VGA2IDAC 0x6E /* Core 1 RX Baseband VGA2 IDAC */
830#define B2055_C1_RX_BB_VGA3IDAC 0x6F /* Core 1 RX Baseband VGA3 IDAC */
831#define B2055_C1_RX_BB_BUFOCTL 0x70 /* Core 1 RX Baseband BUFO Control */
832#define B2055_C1_RX_BB_RCCALCTL 0x71 /* Core 1 RX Baseband RCCAL Control */
833#define B2055_C1_RX_BB_RSSICTL1 0x72 /* Core 1 RX Baseband RSSI Control 1 */
834#define B2055_C1_RX_BB_RSSICTL2 0x73 /* Core 1 RX Baseband RSSI Control 2 */
835#define B2055_C1_RX_BB_RSSICTL3 0x74 /* Core 1 RX Baseband RSSI Control 3 */
836#define B2055_C1_RX_BB_RSSICTL4 0x75 /* Core 1 RX Baseband RSSI Control 4 */
837#define B2055_C1_RX_BB_RSSICTL5 0x76 /* Core 1 RX Baseband RSSI Control 5 */
838#define B2055_C1_RX_BB_REG 0x77 /* Core 1 RX Baseband Regulator */
839#define B2055_C1_RX_BB_SPARE1 0x78 /* Core 1 RX Baseband spare 1 */
840#define B2055_C1_RX_TXBBRCAL 0x79 /* Core 1 RX TX BB RCAL */
841#define B2055_C1_TX_RF_SPGA 0x7A /* Core 1 TX RF SGM PGA */
842#define B2055_C1_TX_RF_SPAD 0x7B /* Core 1 TX RF SGM PAD */
843#define B2055_C1_TX_RF_CNTPGA1 0x7C /* Core 1 TX RF counter PGA 1 */
844#define B2055_C1_TX_RF_CNTPAD1 0x7D /* Core 1 TX RF counter PAD 1 */
845#define B2055_C1_TX_RF_PGAIDAC 0x7E /* Core 1 TX RF PGA IDAC */
846#define B2055_C1_TX_PGAPADTN 0x7F /* Core 1 TX PGA PAD TN */
847#define B2055_C1_TX_PADIDAC1 0x80 /* Core 1 TX PAD IDAC 1 */
848#define B2055_C1_TX_PADIDAC2 0x81 /* Core 1 TX PAD IDAC 2 */
849#define B2055_C1_TX_MXBGTRIM 0x82 /* Core 1 TX MX B/G TRIM */
850#define B2055_C1_TX_RF_RCAL 0x83 /* Core 1 TX RF RCAL */
851#define B2055_C1_TX_RF_PADTSSI1 0x84 /* Core 1 TX RF PAD TSSI1 */
852#define B2055_C1_TX_RF_PADTSSI2 0x85 /* Core 1 TX RF PAD TSSI2 */
853#define B2055_C1_TX_RF_SPARE 0x86 /* Core 1 TX RF spare */
854#define B2055_C1_TX_RF_IQCAL1 0x87 /* Core 1 TX RF I/Q CAL 1 */
855#define B2055_C1_TX_RF_IQCAL2 0x88 /* Core 1 TX RF I/Q CAL 2 */
856#define B2055_C1_TXBB_RCCAL 0x89 /* Core 1 TXBB RC CAL Control */
857#define B2055_C1_TXBB_LPF1 0x8A /* Core 1 TXBB LPF 1 */
858#define B2055_C1_TX_VOSCNCL 0x8B /* Core 1 TX VOS CNCL */
859#define B2055_C1_TX_LPF_MXGMIDAC 0x8C /* Core 1 TX LPF MXGM IDAC */
860#define B2055_C1_TX_BB_MXGM 0x8D /* Core 1 TX BB MXGM */
861#define B2055_C2_LGBUF_ATUNE 0x8E /* Core 2 LGBUF A tune */
862#define B2055_C2_LGBUF_GTUNE 0x8F /* Core 2 LGBUF G tune */
863#define B2055_C2_LGBUF_DIV 0x90 /* Core 2 LGBUF div */
864#define B2055_C2_LGBUF_AIDAC 0x91 /* Core 2 LGBUF A IDAC */
865#define B2055_C2_LGBUF_GIDAC 0x92 /* Core 2 LGBUF G IDAC */
866#define B2055_C2_LGBUF_IDACFO 0x93 /* Core 2 LGBUF IDAC filter override */
867#define B2055_C2_LGBUF_SPARE 0x94 /* Core 2 LGBUF spare */
868#define B2055_C2_RX_RFSPC1 0x95 /* Core 2 RX RF SPC1 */
869#define B2055_C2_RX_RFR1 0x96 /* Core 2 RX RF reg 1 */
870#define B2055_C2_RX_RFR2 0x97 /* Core 2 RX RF reg 2 */
871#define B2055_C2_RX_RFRCAL 0x98 /* Core 2 RX RF RCAL */
872#define B2055_C2_RX_BB_BLCMP 0x99 /* Core 2 RX Baseband BUFI LPF CMP */
873#define B2055_C2_RX_BB_LPF 0x9A /* Core 2 RX Baseband LPF */
874#define B2055_C2_RX_BB_MIDACHP 0x9B /* Core 2 RX Baseband MIDAC High-pass */
875#define B2055_C2_RX_BB_VGA1IDAC 0x9C /* Core 2 RX Baseband VGA1 IDAC */
876#define B2055_C2_RX_BB_VGA2IDAC 0x9D /* Core 2 RX Baseband VGA2 IDAC */
877#define B2055_C2_RX_BB_VGA3IDAC 0x9E /* Core 2 RX Baseband VGA3 IDAC */
878#define B2055_C2_RX_BB_BUFOCTL 0x9F /* Core 2 RX Baseband BUFO Control */
879#define B2055_C2_RX_BB_RCCALCTL 0xA0 /* Core 2 RX Baseband RCCAL Control */
880#define B2055_C2_RX_BB_RSSICTL1 0xA1 /* Core 2 RX Baseband RSSI Control 1 */
881#define B2055_C2_RX_BB_RSSICTL2 0xA2 /* Core 2 RX Baseband RSSI Control 2 */
882#define B2055_C2_RX_BB_RSSICTL3 0xA3 /* Core 2 RX Baseband RSSI Control 3 */
883#define B2055_C2_RX_BB_RSSICTL4 0xA4 /* Core 2 RX Baseband RSSI Control 4 */
884#define B2055_C2_RX_BB_RSSICTL5 0xA5 /* Core 2 RX Baseband RSSI Control 5 */
885#define B2055_C2_RX_BB_REG 0xA6 /* Core 2 RX Baseband Regulator */
886#define B2055_C2_RX_BB_SPARE1 0xA7 /* Core 2 RX Baseband spare 1 */
887#define B2055_C2_RX_TXBBRCAL 0xA8 /* Core 2 RX TX BB RCAL */
888#define B2055_C2_TX_RF_SPGA 0xA9 /* Core 2 TX RF SGM PGA */
889#define B2055_C2_TX_RF_SPAD 0xAA /* Core 2 TX RF SGM PAD */
890#define B2055_C2_TX_RF_CNTPGA1 0xAB /* Core 2 TX RF counter PGA 1 */
891#define B2055_C2_TX_RF_CNTPAD1 0xAC /* Core 2 TX RF counter PAD 1 */
892#define B2055_C2_TX_RF_PGAIDAC 0xAD /* Core 2 TX RF PGA IDAC */
893#define B2055_C2_TX_PGAPADTN 0xAE /* Core 2 TX PGA PAD TN */
894#define B2055_C2_TX_PADIDAC1 0xAF /* Core 2 TX PAD IDAC 1 */
895#define B2055_C2_TX_PADIDAC2 0xB0 /* Core 2 TX PAD IDAC 2 */
896#define B2055_C2_TX_MXBGTRIM 0xB1 /* Core 2 TX MX B/G TRIM */
897#define B2055_C2_TX_RF_RCAL 0xB2 /* Core 2 TX RF RCAL */
898#define B2055_C2_TX_RF_PADTSSI1 0xB3 /* Core 2 TX RF PAD TSSI1 */
899#define B2055_C2_TX_RF_PADTSSI2 0xB4 /* Core 2 TX RF PAD TSSI2 */
900#define B2055_C2_TX_RF_SPARE 0xB5 /* Core 2 TX RF spare */
901#define B2055_C2_TX_RF_IQCAL1 0xB6 /* Core 2 TX RF I/Q CAL 1 */
902#define B2055_C2_TX_RF_IQCAL2 0xB7 /* Core 2 TX RF I/Q CAL 2 */
903#define B2055_C2_TXBB_RCCAL 0xB8 /* Core 2 TXBB RC CAL Control */
904#define B2055_C2_TXBB_LPF1 0xB9 /* Core 2 TXBB LPF 1 */
905#define B2055_C2_TX_VOSCNCL 0xBA /* Core 2 TX VOS CNCL */
906#define B2055_C2_TX_LPF_MXGMIDAC 0xBB /* Core 2 TX LPF MXGM IDAC */
907#define B2055_C2_TX_BB_MXGM 0xBC /* Core 2 TX BB MXGM */
908#define B2055_PRG_GCHP21 0xBD /* PRG GC HPVGA23 21 */
909#define B2055_PRG_GCHP22 0xBE /* PRG GC HPVGA23 22 */
910#define B2055_PRG_GCHP23 0xBF /* PRG GC HPVGA23 23 */
911#define B2055_PRG_GCHP24 0xC0 /* PRG GC HPVGA23 24 */
912#define B2055_PRG_GCHP25 0xC1 /* PRG GC HPVGA23 25 */
913#define B2055_PRG_GCHP26 0xC2 /* PRG GC HPVGA23 26 */
914#define B2055_PRG_GCHP27 0xC3 /* PRG GC HPVGA23 27 */
915#define B2055_PRG_GCHP28 0xC4 /* PRG GC HPVGA23 28 */
916#define B2055_PRG_GCHP29 0xC5 /* PRG GC HPVGA23 29 */
917#define B2055_PRG_GCHP30 0xC6 /* PRG GC HPVGA23 30 */
918#define B2055_C1_LNA_GAINBST 0xCD /* Core 1 LNA GAINBST */
919#define B2055_C1_B0NB_RSSIVCM 0xD2 /* Core 1 B0 narrow-band RSSI VCM */
920#define B2055_C1_GENSPARE2 0xD6 /* Core 1 GEN spare 2 */
921#define B2055_C2_LNA_GAINBST 0xD9 /* Core 2 LNA GAINBST */
922#define B2055_C2_B0NB_RSSIVCM 0xDE /* Core 2 B0 narrow-band RSSI VCM */
923#define B2055_C2_GENSPARE2 0xE2 /* Core 2 GEN spare 2 */
924
925
926
927struct b43_wldev; 717struct b43_wldev;
928 718
929struct b43_chanspec { 719struct b43_chanspec {
930 u8 channel; 720 u16 center_freq;
931 u8 sideband; 721 enum nl80211_channel_type channel_type;
932 u8 b_width;
933 u8 b_freq;
934}; 722};
935 723
936struct b43_phy_n_iq_comp { 724struct b43_phy_n_iq_comp {
@@ -984,8 +772,6 @@ struct b43_phy_n {
984 u16 papd_epsilon_offset[2]; 772 u16 papd_epsilon_offset[2];
985 s32 preamble_override; 773 s32 preamble_override;
986 u32 bb_mult_save; 774 u32 bb_mult_save;
987 u8 b_width;
988 struct b43_chanspec radio_chanspec;
989 775
990 bool gain_boost; 776 bool gain_boost;
991 bool elna_gain_config; 777 bool elna_gain_config;
diff --git a/drivers/net/wireless/b43/radio_2055.c b/drivers/net/wireless/b43/radio_2055.c
new file mode 100644
index 000000000000..1b5316586cbf
--- /dev/null
+++ b/drivers/net/wireless/b43/radio_2055.c
@@ -0,0 +1,1332 @@
1/*
2
3 Broadcom B43 wireless driver
4 IEEE 802.11n PHY and radio device data tables
5
6 Copyright (c) 2008 Michael Buesch <mb@bu3sch.de>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 2 of the License, or
11 (at your option) any later version.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program; see the file COPYING. If not, write to
20 the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor,
21 Boston, MA 02110-1301, USA.
22
23*/
24
25#include "b43.h"
26#include "radio_2055.h"
27#include "phy_common.h"
28
29struct b2055_inittab_entry {
30 /* Value to write if we use the 5GHz band. */
31 u16 ghz5;
32 /* Value to write if we use the 2.4GHz band. */
33 u16 ghz2;
34 /* Flags */
35 u8 flags;
36#define B2055_INITTAB_ENTRY_OK 0x01
37#define B2055_INITTAB_UPLOAD 0x02
38};
39#define UPLOAD .flags = B2055_INITTAB_ENTRY_OK | B2055_INITTAB_UPLOAD
40#define NOUPLOAD .flags = B2055_INITTAB_ENTRY_OK
41
42static const struct b2055_inittab_entry b2055_inittab [] = {
43 [B2055_SP_PINPD] = { .ghz5 = 0x0080, .ghz2 = 0x0080, NOUPLOAD, },
44 [B2055_C1_SP_RSSI] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
45 [B2055_C1_SP_PDMISC] = { .ghz5 = 0x0027, .ghz2 = 0x0027, NOUPLOAD, },
46 [B2055_C2_SP_RSSI] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
47 [B2055_C2_SP_PDMISC] = { .ghz5 = 0x0027, .ghz2 = 0x0027, NOUPLOAD, },
48 [B2055_C1_SP_RXGC1] = { .ghz5 = 0x007F, .ghz2 = 0x007F, UPLOAD, },
49 [B2055_C1_SP_RXGC2] = { .ghz5 = 0x0007, .ghz2 = 0x0007, UPLOAD, },
50 [B2055_C2_SP_RXGC1] = { .ghz5 = 0x007F, .ghz2 = 0x007F, UPLOAD, },
51 [B2055_C2_SP_RXGC2] = { .ghz5 = 0x0007, .ghz2 = 0x0007, UPLOAD, },
52 [B2055_C1_SP_LPFBWSEL] = { .ghz5 = 0x0015, .ghz2 = 0x0015, NOUPLOAD, },
53 [B2055_C2_SP_LPFBWSEL] = { .ghz5 = 0x0015, .ghz2 = 0x0015, NOUPLOAD, },
54 [B2055_C1_SP_TXGC1] = { .ghz5 = 0x004F, .ghz2 = 0x004F, UPLOAD, },
55 [B2055_C1_SP_TXGC2] = { .ghz5 = 0x0005, .ghz2 = 0x0005, UPLOAD, },
56 [B2055_C2_SP_TXGC1] = { .ghz5 = 0x004F, .ghz2 = 0x004F, UPLOAD, },
57 [B2055_C2_SP_TXGC2] = { .ghz5 = 0x0005, .ghz2 = 0x0005, UPLOAD, },
58 [B2055_MASTER1] = { .ghz5 = 0x00D0, .ghz2 = 0x00D0, NOUPLOAD, },
59 [B2055_MASTER2] = { .ghz5 = 0x0002, .ghz2 = 0x0002, NOUPLOAD, },
60 [B2055_PD_LGEN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
61 [B2055_PD_PLLTS] = { .ghz5 = 0x0040, .ghz2 = 0x0040, NOUPLOAD, },
62 [B2055_C1_PD_LGBUF] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
63 [B2055_C1_PD_TX] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
64 [B2055_C1_PD_RXTX] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
65 [B2055_C1_PD_RSSIMISC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
66 [B2055_C2_PD_LGBUF] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
67 [B2055_C2_PD_TX] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
68 [B2055_C2_PD_RXTX] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
69 [B2055_C2_PD_RSSIMISC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
70 [B2055_PWRDET_LGEN] = { .ghz5 = 0x00C0, .ghz2 = 0x00C0, NOUPLOAD, },
71 [B2055_C1_PWRDET_LGBUF] = { .ghz5 = 0x00FF, .ghz2 = 0x00FF, NOUPLOAD, },
72 [B2055_C1_PWRDET_RXTX] = { .ghz5 = 0x00C0, .ghz2 = 0x00C0, NOUPLOAD, },
73 [B2055_C2_PWRDET_LGBUF] = { .ghz5 = 0x00FF, .ghz2 = 0x00FF, NOUPLOAD, },
74 [B2055_C2_PWRDET_RXTX] = { .ghz5 = 0x00C0, .ghz2 = 0x00C0, NOUPLOAD, },
75 [B2055_RRCCAL_CS] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
76 [B2055_RRCCAL_NOPTSEL] = { .ghz5 = 0x002C, .ghz2 = 0x002C, NOUPLOAD, },
77 [B2055_CAL_MISC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
78 [B2055_CAL_COUT] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
79 [B2055_CAL_COUT2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
80 [B2055_CAL_CVARCTL] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
81 [B2055_CAL_RVARCTL] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
82 [B2055_CAL_LPOCTL] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
83 [B2055_CAL_TS] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
84 [B2055_CAL_RCCALRTS] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
85 [B2055_CAL_RCALRTS] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
86 [B2055_PADDRV] = { .ghz5 = 0x00A4, .ghz2 = 0x00A4, NOUPLOAD, },
87 [B2055_XOCTL1] = { .ghz5 = 0x0038, .ghz2 = 0x0038, NOUPLOAD, },
88 [B2055_XOCTL2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
89 [B2055_XOREGUL] = { .ghz5 = 0x0004, .ghz2 = 0x0004, UPLOAD, },
90 [B2055_XOMISC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
91 [B2055_PLL_LFC1] = { .ghz5 = 0x000A, .ghz2 = 0x000A, NOUPLOAD, },
92 [B2055_PLL_CALVTH] = { .ghz5 = 0x0087, .ghz2 = 0x0087, NOUPLOAD, },
93 [B2055_PLL_LFC2] = { .ghz5 = 0x0009, .ghz2 = 0x0009, NOUPLOAD, },
94 [B2055_PLL_REF] = { .ghz5 = 0x0070, .ghz2 = 0x0070, NOUPLOAD, },
95 [B2055_PLL_LFR1] = { .ghz5 = 0x0011, .ghz2 = 0x0011, NOUPLOAD, },
96 [B2055_PLL_PFDCP] = { .ghz5 = 0x0018, .ghz2 = 0x0018, UPLOAD, },
97 [B2055_PLL_IDAC_CPOPAMP] = { .ghz5 = 0x0006, .ghz2 = 0x0006, NOUPLOAD, },
98 [B2055_PLL_CPREG] = { .ghz5 = 0x0004, .ghz2 = 0x0004, UPLOAD, },
99 [B2055_PLL_RCAL] = { .ghz5 = 0x0006, .ghz2 = 0x0006, NOUPLOAD, },
100 [B2055_RF_PLLMOD0] = { .ghz5 = 0x009E, .ghz2 = 0x009E, NOUPLOAD, },
101 [B2055_RF_PLLMOD1] = { .ghz5 = 0x0009, .ghz2 = 0x0009, NOUPLOAD, },
102 [B2055_RF_MMDIDAC1] = { .ghz5 = 0x00C8, .ghz2 = 0x00C8, UPLOAD, },
103 [B2055_RF_MMDIDAC0] = { .ghz5 = 0x0088, .ghz2 = 0x0088, NOUPLOAD, },
104 [B2055_RF_MMDSP] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
105 [B2055_VCO_CAL1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
106 [B2055_VCO_CAL2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
107 [B2055_VCO_CAL3] = { .ghz5 = 0x0001, .ghz2 = 0x0001, NOUPLOAD, },
108 [B2055_VCO_CAL4] = { .ghz5 = 0x0002, .ghz2 = 0x0002, NOUPLOAD, },
109 [B2055_VCO_CAL5] = { .ghz5 = 0x0096, .ghz2 = 0x0096, NOUPLOAD, },
110 [B2055_VCO_CAL6] = { .ghz5 = 0x003E, .ghz2 = 0x003E, NOUPLOAD, },
111 [B2055_VCO_CAL7] = { .ghz5 = 0x003E, .ghz2 = 0x003E, NOUPLOAD, },
112 [B2055_VCO_CAL8] = { .ghz5 = 0x0013, .ghz2 = 0x0013, NOUPLOAD, },
113 [B2055_VCO_CAL9] = { .ghz5 = 0x0002, .ghz2 = 0x0002, NOUPLOAD, },
114 [B2055_VCO_CAL10] = { .ghz5 = 0x0015, .ghz2 = 0x0015, NOUPLOAD, },
115 [B2055_VCO_CAL11] = { .ghz5 = 0x0007, .ghz2 = 0x0007, NOUPLOAD, },
116 [B2055_VCO_CAL12] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
117 [B2055_VCO_CAL13] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
118 [B2055_VCO_CAL14] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
119 [B2055_VCO_CAL15] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
120 [B2055_VCO_CAL16] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
121 [B2055_VCO_KVCO] = { .ghz5 = 0x0008, .ghz2 = 0x0008, NOUPLOAD, },
122 [B2055_VCO_CAPTAIL] = { .ghz5 = 0x0008, .ghz2 = 0x0008, NOUPLOAD, },
123 [B2055_VCO_IDACVCO] = { .ghz5 = 0x0006, .ghz2 = 0x0006, NOUPLOAD, },
124 [B2055_VCO_REG] = { .ghz5 = 0x0084, .ghz2 = 0x0084, UPLOAD, },
125 [B2055_PLL_RFVTH] = { .ghz5 = 0x00C3, .ghz2 = 0x00C3, NOUPLOAD, },
126 [B2055_LGBUF_CENBUF] = { .ghz5 = 0x008F, .ghz2 = 0x008F, NOUPLOAD, },
127 [B2055_LGEN_TUNE1] = { .ghz5 = 0x00FF, .ghz2 = 0x00FF, NOUPLOAD, },
128 [B2055_LGEN_TUNE2] = { .ghz5 = 0x00FF, .ghz2 = 0x00FF, NOUPLOAD, },
129 [B2055_LGEN_IDAC1] = { .ghz5 = 0x0088, .ghz2 = 0x0088, NOUPLOAD, },
130 [B2055_LGEN_IDAC2] = { .ghz5 = 0x0088, .ghz2 = 0x0088, NOUPLOAD, },
131 [B2055_LGEN_BIASC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
132 [B2055_LGEN_BIASIDAC] = { .ghz5 = 0x00CC, .ghz2 = 0x00CC, NOUPLOAD, },
133 [B2055_LGEN_RCAL] = { .ghz5 = 0x0006, .ghz2 = 0x0006, NOUPLOAD, },
134 [B2055_LGEN_DIV] = { .ghz5 = 0x0080, .ghz2 = 0x0080, NOUPLOAD, },
135 [B2055_LGEN_SPARE2] = { .ghz5 = 0x0080, .ghz2 = 0x0080, NOUPLOAD, },
136 [B2055_C1_LGBUF_ATUNE] = { .ghz5 = 0x00F8, .ghz2 = 0x00F8, NOUPLOAD, },
137 [B2055_C1_LGBUF_GTUNE] = { .ghz5 = 0x0088, .ghz2 = 0x0088, NOUPLOAD, },
138 [B2055_C1_LGBUF_DIV] = { .ghz5 = 0x0088, .ghz2 = 0x0088, NOUPLOAD, },
139 [B2055_C1_LGBUF_AIDAC] = { .ghz5 = 0x0088, .ghz2 = 0x0008, UPLOAD, },
140 [B2055_C1_LGBUF_GIDAC] = { .ghz5 = 0x0088, .ghz2 = 0x0088, NOUPLOAD, },
141 [B2055_C1_LGBUF_IDACFO] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
142 [B2055_C1_LGBUF_SPARE] = { .ghz5 = 0x0001, .ghz2 = 0x0001, UPLOAD, },
143 [B2055_C1_RX_RFSPC1] = { .ghz5 = 0x008A, .ghz2 = 0x008A, NOUPLOAD, },
144 [B2055_C1_RX_RFR1] = { .ghz5 = 0x0008, .ghz2 = 0x0008, NOUPLOAD, },
145 [B2055_C1_RX_RFR2] = { .ghz5 = 0x0083, .ghz2 = 0x0083, NOUPLOAD, },
146 [B2055_C1_RX_RFRCAL] = { .ghz5 = 0x0006, .ghz2 = 0x0006, NOUPLOAD, },
147 [B2055_C1_RX_BB_BLCMP] = { .ghz5 = 0x00A0, .ghz2 = 0x00A0, NOUPLOAD, },
148 [B2055_C1_RX_BB_LPF] = { .ghz5 = 0x000A, .ghz2 = 0x000A, NOUPLOAD, },
149 [B2055_C1_RX_BB_MIDACHP] = { .ghz5 = 0x0087, .ghz2 = 0x0087, UPLOAD, },
150 [B2055_C1_RX_BB_VGA1IDAC] = { .ghz5 = 0x002A, .ghz2 = 0x002A, NOUPLOAD, },
151 [B2055_C1_RX_BB_VGA2IDAC] = { .ghz5 = 0x002A, .ghz2 = 0x002A, NOUPLOAD, },
152 [B2055_C1_RX_BB_VGA3IDAC] = { .ghz5 = 0x002A, .ghz2 = 0x002A, NOUPLOAD, },
153 [B2055_C1_RX_BB_BUFOCTL] = { .ghz5 = 0x002A, .ghz2 = 0x002A, NOUPLOAD, },
154 [B2055_C1_RX_BB_RCCALCTL] = { .ghz5 = 0x0018, .ghz2 = 0x0018, NOUPLOAD, },
155 [B2055_C1_RX_BB_RSSICTL1] = { .ghz5 = 0x006A, .ghz2 = 0x006A, UPLOAD, },
156 [B2055_C1_RX_BB_RSSICTL2] = { .ghz5 = 0x00AB, .ghz2 = 0x00AB, UPLOAD, },
157 [B2055_C1_RX_BB_RSSICTL3] = { .ghz5 = 0x0013, .ghz2 = 0x0013, UPLOAD, },
158 [B2055_C1_RX_BB_RSSICTL4] = { .ghz5 = 0x00C1, .ghz2 = 0x00C1, UPLOAD, },
159 [B2055_C1_RX_BB_RSSICTL5] = { .ghz5 = 0x00AA, .ghz2 = 0x00AA, UPLOAD, },
160 [B2055_C1_RX_BB_REG] = { .ghz5 = 0x0087, .ghz2 = 0x0087, UPLOAD, },
161 [B2055_C1_RX_BB_SPARE1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
162 [B2055_C1_RX_TXBBRCAL] = { .ghz5 = 0x0006, .ghz2 = 0x0006, NOUPLOAD, },
163 [B2055_C1_TX_RF_SPGA] = { .ghz5 = 0x0007, .ghz2 = 0x0007, NOUPLOAD, },
164 [B2055_C1_TX_RF_SPAD] = { .ghz5 = 0x0007, .ghz2 = 0x0007, NOUPLOAD, },
165 [B2055_C1_TX_RF_CNTPGA1] = { .ghz5 = 0x0015, .ghz2 = 0x0015, NOUPLOAD, },
166 [B2055_C1_TX_RF_CNTPAD1] = { .ghz5 = 0x0055, .ghz2 = 0x0055, NOUPLOAD, },
167 [B2055_C1_TX_RF_PGAIDAC] = { .ghz5 = 0x0097, .ghz2 = 0x0097, UPLOAD, },
168 [B2055_C1_TX_PGAPADTN] = { .ghz5 = 0x0008, .ghz2 = 0x0008, NOUPLOAD, },
169 [B2055_C1_TX_PADIDAC1] = { .ghz5 = 0x0014, .ghz2 = 0x0014, UPLOAD, },
170 [B2055_C1_TX_PADIDAC2] = { .ghz5 = 0x0033, .ghz2 = 0x0033, NOUPLOAD, },
171 [B2055_C1_TX_MXBGTRIM] = { .ghz5 = 0x0088, .ghz2 = 0x0088, NOUPLOAD, },
172 [B2055_C1_TX_RF_RCAL] = { .ghz5 = 0x0006, .ghz2 = 0x0006, NOUPLOAD, },
173 [B2055_C1_TX_RF_PADTSSI1] = { .ghz5 = 0x0003, .ghz2 = 0x0003, UPLOAD, },
174 [B2055_C1_TX_RF_PADTSSI2] = { .ghz5 = 0x000A, .ghz2 = 0x000A, NOUPLOAD, },
175 [B2055_C1_TX_RF_SPARE] = { .ghz5 = 0x0003, .ghz2 = 0x0003, UPLOAD, },
176 [B2055_C1_TX_RF_IQCAL1] = { .ghz5 = 0x002A, .ghz2 = 0x002A, NOUPLOAD, },
177 [B2055_C1_TX_RF_IQCAL2] = { .ghz5 = 0x00A4, .ghz2 = 0x00A4, NOUPLOAD, },
178 [B2055_C1_TXBB_RCCAL] = { .ghz5 = 0x0018, .ghz2 = 0x0018, NOUPLOAD, },
179 [B2055_C1_TXBB_LPF1] = { .ghz5 = 0x0028, .ghz2 = 0x0028, NOUPLOAD, },
180 [B2055_C1_TX_VOSCNCL] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
181 [B2055_C1_TX_LPF_MXGMIDAC] = { .ghz5 = 0x004A, .ghz2 = 0x004A, NOUPLOAD, },
182 [B2055_C1_TX_BB_MXGM] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
183 [B2055_C2_LGBUF_ATUNE] = { .ghz5 = 0x00F8, .ghz2 = 0x00F8, NOUPLOAD, },
184 [B2055_C2_LGBUF_GTUNE] = { .ghz5 = 0x0088, .ghz2 = 0x0088, NOUPLOAD, },
185 [B2055_C2_LGBUF_DIV] = { .ghz5 = 0x0088, .ghz2 = 0x0088, NOUPLOAD, },
186 [B2055_C2_LGBUF_AIDAC] = { .ghz5 = 0x0088, .ghz2 = 0x0008, UPLOAD, },
187 [B2055_C2_LGBUF_GIDAC] = { .ghz5 = 0x0088, .ghz2 = 0x0088, NOUPLOAD, },
188 [B2055_C2_LGBUF_IDACFO] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
189 [B2055_C2_LGBUF_SPARE] = { .ghz5 = 0x0001, .ghz2 = 0x0001, UPLOAD, },
190 [B2055_C2_RX_RFSPC1] = { .ghz5 = 0x008A, .ghz2 = 0x008A, NOUPLOAD, },
191 [B2055_C2_RX_RFR1] = { .ghz5 = 0x0008, .ghz2 = 0x0008, NOUPLOAD, },
192 [B2055_C2_RX_RFR2] = { .ghz5 = 0x0083, .ghz2 = 0x0083, NOUPLOAD, },
193 [B2055_C2_RX_RFRCAL] = { .ghz5 = 0x0006, .ghz2 = 0x0006, NOUPLOAD, },
194 [B2055_C2_RX_BB_BLCMP] = { .ghz5 = 0x00A0, .ghz2 = 0x00A0, NOUPLOAD, },
195 [B2055_C2_RX_BB_LPF] = { .ghz5 = 0x000A, .ghz2 = 0x000A, NOUPLOAD, },
196 [B2055_C2_RX_BB_MIDACHP] = { .ghz5 = 0x0087, .ghz2 = 0x0087, UPLOAD, },
197 [B2055_C2_RX_BB_VGA1IDAC] = { .ghz5 = 0x002A, .ghz2 = 0x002A, NOUPLOAD, },
198 [B2055_C2_RX_BB_VGA2IDAC] = { .ghz5 = 0x002A, .ghz2 = 0x002A, NOUPLOAD, },
199 [B2055_C2_RX_BB_VGA3IDAC] = { .ghz5 = 0x002A, .ghz2 = 0x002A, NOUPLOAD, },
200 [B2055_C2_RX_BB_BUFOCTL] = { .ghz5 = 0x002A, .ghz2 = 0x002A, NOUPLOAD, },
201 [B2055_C2_RX_BB_RCCALCTL] = { .ghz5 = 0x0018, .ghz2 = 0x0018, NOUPLOAD, },
202 [B2055_C2_RX_BB_RSSICTL1] = { .ghz5 = 0x006A, .ghz2 = 0x006A, UPLOAD, },
203 [B2055_C2_RX_BB_RSSICTL2] = { .ghz5 = 0x00AB, .ghz2 = 0x00AB, UPLOAD, },
204 [B2055_C2_RX_BB_RSSICTL3] = { .ghz5 = 0x0013, .ghz2 = 0x0013, UPLOAD, },
205 [B2055_C2_RX_BB_RSSICTL4] = { .ghz5 = 0x00C1, .ghz2 = 0x00C1, UPLOAD, },
206 [B2055_C2_RX_BB_RSSICTL5] = { .ghz5 = 0x00AA, .ghz2 = 0x00AA, UPLOAD, },
207 [B2055_C2_RX_BB_REG] = { .ghz5 = 0x0087, .ghz2 = 0x0087, UPLOAD, },
208 [B2055_C2_RX_BB_SPARE1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
209 [B2055_C2_RX_TXBBRCAL] = { .ghz5 = 0x0006, .ghz2 = 0x0006, NOUPLOAD, },
210 [B2055_C2_TX_RF_SPGA] = { .ghz5 = 0x0007, .ghz2 = 0x0007, NOUPLOAD, },
211 [B2055_C2_TX_RF_SPAD] = { .ghz5 = 0x0007, .ghz2 = 0x0007, NOUPLOAD, },
212 [B2055_C2_TX_RF_CNTPGA1] = { .ghz5 = 0x0015, .ghz2 = 0x0015, NOUPLOAD, },
213 [B2055_C2_TX_RF_CNTPAD1] = { .ghz5 = 0x0055, .ghz2 = 0x0055, NOUPLOAD, },
214 [B2055_C2_TX_RF_PGAIDAC] = { .ghz5 = 0x0097, .ghz2 = 0x0097, UPLOAD, },
215 [B2055_C2_TX_PGAPADTN] = { .ghz5 = 0x0008, .ghz2 = 0x0008, NOUPLOAD, },
216 [B2055_C2_TX_PADIDAC1] = { .ghz5 = 0x0014, .ghz2 = 0x0014, UPLOAD, },
217 [B2055_C2_TX_PADIDAC2] = { .ghz5 = 0x0033, .ghz2 = 0x0033, NOUPLOAD, },
218 [B2055_C2_TX_MXBGTRIM] = { .ghz5 = 0x0088, .ghz2 = 0x0088, NOUPLOAD, },
219 [B2055_C2_TX_RF_RCAL] = { .ghz5 = 0x0006, .ghz2 = 0x0006, NOUPLOAD, },
220 [B2055_C2_TX_RF_PADTSSI1] = { .ghz5 = 0x0003, .ghz2 = 0x0003, UPLOAD, },
221 [B2055_C2_TX_RF_PADTSSI2] = { .ghz5 = 0x000A, .ghz2 = 0x000A, NOUPLOAD, },
222 [B2055_C2_TX_RF_SPARE] = { .ghz5 = 0x0003, .ghz2 = 0x0003, UPLOAD, },
223 [B2055_C2_TX_RF_IQCAL1] = { .ghz5 = 0x002A, .ghz2 = 0x002A, NOUPLOAD, },
224 [B2055_C2_TX_RF_IQCAL2] = { .ghz5 = 0x00A4, .ghz2 = 0x00A4, NOUPLOAD, },
225 [B2055_C2_TXBB_RCCAL] = { .ghz5 = 0x0018, .ghz2 = 0x0018, NOUPLOAD, },
226 [B2055_C2_TXBB_LPF1] = { .ghz5 = 0x0028, .ghz2 = 0x0028, NOUPLOAD, },
227 [B2055_C2_TX_VOSCNCL] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
228 [B2055_C2_TX_LPF_MXGMIDAC] = { .ghz5 = 0x004A, .ghz2 = 0x004A, NOUPLOAD, },
229 [B2055_C2_TX_BB_MXGM] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
230 [B2055_PRG_GCHP21] = { .ghz5 = 0x0071, .ghz2 = 0x0071, NOUPLOAD, },
231 [B2055_PRG_GCHP22] = { .ghz5 = 0x0072, .ghz2 = 0x0072, NOUPLOAD, },
232 [B2055_PRG_GCHP23] = { .ghz5 = 0x0073, .ghz2 = 0x0073, NOUPLOAD, },
233 [B2055_PRG_GCHP24] = { .ghz5 = 0x0074, .ghz2 = 0x0074, NOUPLOAD, },
234 [B2055_PRG_GCHP25] = { .ghz5 = 0x0075, .ghz2 = 0x0075, NOUPLOAD, },
235 [B2055_PRG_GCHP26] = { .ghz5 = 0x0076, .ghz2 = 0x0076, NOUPLOAD, },
236 [B2055_PRG_GCHP27] = { .ghz5 = 0x0077, .ghz2 = 0x0077, NOUPLOAD, },
237 [B2055_PRG_GCHP28] = { .ghz5 = 0x0078, .ghz2 = 0x0078, NOUPLOAD, },
238 [B2055_PRG_GCHP29] = { .ghz5 = 0x0079, .ghz2 = 0x0079, NOUPLOAD, },
239 [B2055_PRG_GCHP30] = { .ghz5 = 0x007A, .ghz2 = 0x007A, NOUPLOAD, },
240 [0xC7] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
241 [0xC8] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
242 [0xC9] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
243 [0xCA] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
244 [0xCB] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
245 [0xCC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
246 [B2055_C1_LNA_GAINBST] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
247 [0xCE] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
248 [0xCF] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
249 [0xD0] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
250 [0xD1] = { .ghz5 = 0x0018, .ghz2 = 0x0018, NOUPLOAD, },
251 [B2055_C1_B0NB_RSSIVCM] = { .ghz5 = 0x0088, .ghz2 = 0x0088, NOUPLOAD, },
252 [0xD3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
253 [0xD4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
254 [0xD5] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
255 [B2055_C1_GENSPARE2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
256 [0xD7] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
257 [0xD8] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
258 [B2055_C2_LNA_GAINBST] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
259 [0xDA] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
260 [0xDB] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
261 [0xDC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
262 [0xDD] = { .ghz5 = 0x0018, .ghz2 = 0x0018, NOUPLOAD, },
263 [B2055_C2_B0NB_RSSIVCM] = { .ghz5 = 0x0088, .ghz2 = 0x0088, NOUPLOAD, },
264 [0xDF] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
265 [0xE0] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
266 [0xE1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
267 [B2055_C2_GENSPARE2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
268};
269
270#define RADIOREGS(r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11, \
271 r12, r13, r14, r15, r16, r17, r18, r19, r20, r21) \
272 .radio_pll_ref = r0, \
273 .radio_rf_pllmod0 = r1, \
274 .radio_rf_pllmod1 = r2, \
275 .radio_vco_captail = r3, \
276 .radio_vco_cal1 = r4, \
277 .radio_vco_cal2 = r5, \
278 .radio_pll_lfc1 = r6, \
279 .radio_pll_lfr1 = r7, \
280 .radio_pll_lfc2 = r8, \
281 .radio_lgbuf_cenbuf = r9, \
282 .radio_lgen_tune1 = r10, \
283 .radio_lgen_tune2 = r11, \
284 .radio_c1_lgbuf_atune = r12, \
285 .radio_c1_lgbuf_gtune = r13, \
286 .radio_c1_rx_rfr1 = r14, \
287 .radio_c1_tx_pgapadtn = r15, \
288 .radio_c1_tx_mxbgtrim = r16, \
289 .radio_c2_lgbuf_atune = r17, \
290 .radio_c2_lgbuf_gtune = r18, \
291 .radio_c2_rx_rfr1 = r19, \
292 .radio_c2_tx_pgapadtn = r20, \
293 .radio_c2_tx_mxbgtrim = r21
294
295#define PHYREGS(r0, r1, r2, r3, r4, r5) \
296 .phy_regs.phy_bw1a = r0, \
297 .phy_regs.phy_bw2 = r1, \
298 .phy_regs.phy_bw3 = r2, \
299 .phy_regs.phy_bw4 = r3, \
300 .phy_regs.phy_bw5 = r4, \
301 .phy_regs.phy_bw6 = r5
302
303static const struct b43_nphy_channeltab_entry_rev2 b43_nphy_channeltab_rev2[] = {
304 { .channel = 184,
305 .freq = 4920, /* MHz */
306 .unk2 = 3280,
307 RADIOREGS(0x71, 0x01, 0xEC, 0x0F, 0xFF, 0x01, 0x04, 0x0A,
308 0x00, 0x8F, 0xFF, 0xFF, 0xFF, 0x00, 0x0F, 0x0F,
309 0x8F, 0xFF, 0x00, 0x0F, 0x0F, 0x8F),
310 PHYREGS(0xB407, 0xB007, 0xAC07, 0x1402, 0x1502, 0x1602),
311 },
312 { .channel = 186,
313 .freq = 4930, /* MHz */
314 .unk2 = 3287,
315 RADIOREGS(0x71, 0x01, 0xED, 0x0F, 0xFF, 0x01, 0x04, 0x0A,
316 0x00, 0x8F, 0xFF, 0xFF, 0xFF, 0x00, 0x0F, 0x0F,
317 0x8F, 0xFF, 0x00, 0x0F, 0x0F, 0x8F),
318 PHYREGS(0xB807, 0xB407, 0xB007, 0x1302, 0x1402, 0x1502),
319 },
320 { .channel = 188,
321 .freq = 4940, /* MHz */
322 .unk2 = 3293,
323 RADIOREGS(0x71, 0x01, 0xEE, 0x0F, 0xFF, 0x01, 0x04, 0x0A,
324 0x00, 0x8F, 0xEE, 0xEE, 0xFF, 0x00, 0x0F, 0x0F,
325 0x8F, 0xFF, 0x00, 0x0F, 0x0F, 0x8F),
326 PHYREGS(0xBC07, 0xB807, 0xB407, 0x1202, 0x1302, 0x1402),
327 },
328 { .channel = 190,
329 .freq = 4950, /* MHz */
330 .unk2 = 3300,
331 RADIOREGS(0x71, 0x01, 0xEF, 0x0F, 0xFF, 0x01, 0x04, 0x0A,
332 0x00, 0x8F, 0xEE, 0xEE, 0xFF, 0x00, 0x0F, 0x0F,
333 0x8F, 0xFF, 0x00, 0x0F, 0x0F, 0x8F),
334 PHYREGS(0xC007, 0xBC07, 0xB807, 0x1102, 0x1202, 0x1302),
335 },
336 { .channel = 192,
337 .freq = 4960, /* MHz */
338 .unk2 = 3307,
339 RADIOREGS(0x71, 0x01, 0xF0, 0x0F, 0xFF, 0x01, 0x04, 0x0A,
340 0x00, 0x8F, 0xEE, 0xEE, 0xFF, 0x00, 0x0F, 0x0F,
341 0x8F, 0xFF, 0x00, 0x0F, 0x0F, 0x8F),
342 PHYREGS(0xC407, 0xC007, 0xBC07, 0x0F02, 0x1102, 0x1202),
343 },
344 { .channel = 194,
345 .freq = 4970, /* MHz */
346 .unk2 = 3313,
347 RADIOREGS(0x71, 0x01, 0xF1, 0x0F, 0xFF, 0x01, 0x04, 0x0A,
348 0x00, 0x8F, 0xEE, 0xEE, 0xFF, 0x00, 0x0F, 0x0F,
349 0x8F, 0xFF, 0x00, 0x0F, 0x0F, 0x8F),
350 PHYREGS(0xC807, 0xC407, 0xC007, 0x0E02, 0x0F02, 0x1102),
351 },
352 { .channel = 196,
353 .freq = 4980, /* MHz */
354 .unk2 = 3320,
355 RADIOREGS(0x71, 0x01, 0xF2, 0x0E, 0xFF, 0x01, 0x04, 0x0A,
356 0x00, 0x8F, 0xDD, 0xDD, 0xFF, 0x00, 0x0F, 0x0F,
357 0x8F, 0xFF, 0x00, 0x0F, 0x0F, 0x8F),
358 PHYREGS(0xCC07, 0xC807, 0xC407, 0x0D02, 0x0E02, 0x0F02),
359 },
360 { .channel = 198,
361 .freq = 4990, /* MHz */
362 .unk2 = 3327,
363 RADIOREGS(0x71, 0x01, 0xF3, 0x0E, 0xFF, 0x01, 0x04, 0x0A,
364 0x00, 0x8F, 0xDD, 0xDD, 0xFF, 0x00, 0x0F, 0x0F,
365 0x8F, 0xFF, 0x00, 0x0F, 0x0F, 0x8F),
366 PHYREGS(0xD007, 0xCC07, 0xC807, 0x0C02, 0x0D02, 0x0E02),
367 },
368 { .channel = 200,
369 .freq = 5000, /* MHz */
370 .unk2 = 3333,
371 RADIOREGS(0x71, 0x01, 0xF4, 0x0E, 0xFF, 0x01, 0x04, 0x0A,
372 0x00, 0x8F, 0xDD, 0xDD, 0xFF, 0x00, 0x0F, 0x0F,
373 0x8F, 0xFF, 0x00, 0x0F, 0x0F, 0x8F),
374 PHYREGS(0xD407, 0xD007, 0xCC07, 0x0B02, 0x0C02, 0x0D02),
375 },
376 { .channel = 202,
377 .freq = 5010, /* MHz */
378 .unk2 = 3340,
379 RADIOREGS(0x71, 0x01, 0xF5, 0x0E, 0xFF, 0x01, 0x04, 0x0A,
380 0x00, 0x8F, 0xDD, 0xDD, 0xFF, 0x00, 0x0F, 0x0F,
381 0x8F, 0xFF, 0x00, 0x0F, 0x0F, 0x8F),
382 PHYREGS(0xD807, 0xD407, 0xD007, 0x0A02, 0x0B02, 0x0C02),
383 },
384 { .channel = 204,
385 .freq = 5020, /* MHz */
386 .unk2 = 3347,
387 RADIOREGS(0x71, 0x01, 0xF6, 0x0E, 0xF7, 0x01, 0x04, 0x0A,
388 0x00, 0x8F, 0xCC, 0xCC, 0xFF, 0x00, 0x0F, 0x0F,
389 0x8F, 0xFF, 0x00, 0x0F, 0x0F, 0x8F),
390 PHYREGS(0xDC07, 0xD807, 0xD407, 0x0902, 0x0A02, 0x0B02),
391 },
392 { .channel = 206,
393 .freq = 5030, /* MHz */
394 .unk2 = 3353,
395 RADIOREGS(0x71, 0x01, 0xF7, 0x0E, 0xF7, 0x01, 0x04, 0x0A,
396 0x00, 0x8F, 0xCC, 0xCC, 0xFF, 0x00, 0x0F, 0x0F,
397 0x8F, 0xFF, 0x00, 0x0F, 0x0F, 0x8F),
398 PHYREGS(0xE007, 0xDC07, 0xD807, 0x0802, 0x0902, 0x0A02),
399 },
400 { .channel = 208,
401 .freq = 5040, /* MHz */
402 .unk2 = 3360,
403 RADIOREGS(0x71, 0x01, 0xF8, 0x0D, 0xEF, 0x01, 0x04, 0x0A,
404 0x00, 0x8F, 0xCC, 0xCC, 0xFF, 0x00, 0x0F, 0x0F,
405 0x8F, 0xFF, 0x00, 0x0F, 0x0F, 0x8F),
406 PHYREGS(0xE407, 0xE007, 0xDC07, 0x0702, 0x0802, 0x0902),
407 },
408 { .channel = 210,
409 .freq = 5050, /* MHz */
410 .unk2 = 3367,
411 RADIOREGS(0x71, 0x01, 0xF9, 0x0D, 0xEF, 0x01, 0x04, 0x0A,
412 0x00, 0x8F, 0xCC, 0xCC, 0xFF, 0x00, 0x0F, 0x0F,
413 0x8F, 0xFF, 0x00, 0x0F, 0x0F, 0x8F),
414 PHYREGS(0xE807, 0xE407, 0xE007, 0x0602, 0x0702, 0x0802),
415 },
416 { .channel = 212,
417 .freq = 5060, /* MHz */
418 .unk2 = 3373,
419 RADIOREGS(0x71, 0x01, 0xFA, 0x0D, 0xE6, 0x01, 0x04, 0x0A,
420 0x00, 0x8F, 0xBB, 0xBB, 0xFF, 0x00, 0x0E, 0x0F,
421 0x8E, 0xFF, 0x00, 0x0E, 0x0F, 0x8E),
422 PHYREGS(0xEC07, 0xE807, 0xE407, 0x0502, 0x0602, 0x0702),
423 },
424 { .channel = 214,
425 .freq = 5070, /* MHz */
426 .unk2 = 3380,
427 RADIOREGS(0x71, 0x01, 0xFB, 0x0D, 0xE6, 0x01, 0x04, 0x0A,
428 0x00, 0x8F, 0xBB, 0xBB, 0xFF, 0x00, 0x0E, 0x0F,
429 0x8E, 0xFF, 0x00, 0x0E, 0x0F, 0x8E),
430 PHYREGS(0xF007, 0xEC07, 0xE807, 0x0402, 0x0502, 0x0602),
431 },
432 { .channel = 216,
433 .freq = 5080, /* MHz */
434 .unk2 = 3387,
435 RADIOREGS(0x71, 0x01, 0xFC, 0x0D, 0xDE, 0x01, 0x04, 0x0A,
436 0x00, 0x8E, 0xBB, 0xBB, 0xEE, 0x00, 0x0E, 0x0F,
437 0x8D, 0xEE, 0x00, 0x0E, 0x0F, 0x8D),
438 PHYREGS(0xF407, 0xF007, 0xEC07, 0x0302, 0x0402, 0x0502),
439 },
440 { .channel = 218,
441 .freq = 5090, /* MHz */
442 .unk2 = 3393,
443 RADIOREGS(0x71, 0x01, 0xFD, 0x0D, 0xDE, 0x01, 0x04, 0x0A,
444 0x00, 0x8E, 0xBB, 0xBB, 0xEE, 0x00, 0x0E, 0x0F,
445 0x8D, 0xEE, 0x00, 0x0E, 0x0F, 0x8D),
446 PHYREGS(0xF807, 0xF407, 0xF007, 0x0202, 0x0302, 0x0402),
447 },
448 { .channel = 220,
449 .freq = 5100, /* MHz */
450 .unk2 = 3400,
451 RADIOREGS(0x71, 0x01, 0xFE, 0x0C, 0xD6, 0x01, 0x04, 0x0A,
452 0x00, 0x8E, 0xAA, 0xAA, 0xEE, 0x00, 0x0D, 0x0F,
453 0x8D, 0xEE, 0x00, 0x0D, 0x0F, 0x8D),
454 PHYREGS(0xFC07, 0xF807, 0xF407, 0x0102, 0x0202, 0x0302),
455 },
456 { .channel = 222,
457 .freq = 5110, /* MHz */
458 .unk2 = 3407,
459 RADIOREGS(0x71, 0x01, 0xFF, 0x0C, 0xD6, 0x01, 0x04, 0x0A,
460 0x00, 0x8E, 0xAA, 0xAA, 0xEE, 0x00, 0x0D, 0x0F,
461 0x8D, 0xEE, 0x00, 0x0D, 0x0F, 0x8D),
462 PHYREGS(0x0008, 0xFC07, 0xF807, 0x0002, 0x0102, 0x0202),
463 },
464 { .channel = 224,
465 .freq = 5120, /* MHz */
466 .unk2 = 3413,
467 RADIOREGS(0x71, 0x02, 0x00, 0x0C, 0xCE, 0x01, 0x04, 0x0A,
468 0x00, 0x8D, 0xAA, 0xAA, 0xDD, 0x00, 0x0D, 0x0F,
469 0x8C, 0xDD, 0x00, 0x0D, 0x0F, 0x8C),
470 PHYREGS(0x0408, 0x0008, 0xFC07, 0xFF01, 0x0002, 0x0102),
471 },
472 { .channel = 226,
473 .freq = 5130, /* MHz */
474 .unk2 = 3420,
475 RADIOREGS(0x71, 0x02, 0x01, 0x0C, 0xCE, 0x01, 0x04, 0x0A,
476 0x00, 0x8D, 0xAA, 0xAA, 0xDD, 0x00, 0x0D, 0x0F,
477 0x8C, 0xDD, 0x00, 0x0D, 0x0F, 0x8C),
478 PHYREGS(0x0808, 0x0408, 0x0008, 0xFE01, 0xFF01, 0x0002),
479 },
480 { .channel = 228,
481 .freq = 5140, /* MHz */
482 .unk2 = 3427,
483 RADIOREGS(0x71, 0x02, 0x02, 0x0C, 0xC6, 0x01, 0x04, 0x0A,
484 0x00, 0x8D, 0x99, 0x99, 0xDD, 0x00, 0x0C, 0x0E,
485 0x8B, 0xDD, 0x00, 0x0C, 0x0E, 0x8B),
486 PHYREGS(0x0C08, 0x0808, 0x0408, 0xFD01, 0xFE01, 0xFF01),
487 },
488 { .channel = 32,
489 .freq = 5160, /* MHz */
490 .unk2 = 3440,
491 RADIOREGS(0x71, 0x02, 0x04, 0x0B, 0xBE, 0x01, 0x04, 0x0A,
492 0x00, 0x8C, 0x99, 0x99, 0xCC, 0x00, 0x0B, 0x0D,
493 0x8A, 0xCC, 0x00, 0x0B, 0x0D, 0x8A),
494 PHYREGS(0x1408, 0x1008, 0x0C08, 0xFB01, 0xFC01, 0xFD01),
495 },
496 { .channel = 34,
497 .freq = 5170, /* MHz */
498 .unk2 = 3447,
499 RADIOREGS(0x71, 0x02, 0x05, 0x0B, 0xBE, 0x01, 0x04, 0x0A,
500 0x00, 0x8C, 0x99, 0x99, 0xCC, 0x00, 0x0B, 0x0D,
501 0x8A, 0xCC, 0x00, 0x0B, 0x0D, 0x8A),
502 PHYREGS(0x1808, 0x1408, 0x1008, 0xFA01, 0xFB01, 0xFC01),
503 },
504 { .channel = 36,
505 .freq = 5180, /* MHz */
506 .unk2 = 3453,
507 RADIOREGS(0x71, 0x02, 0x06, 0x0B, 0xB6, 0x01, 0x04, 0x0A,
508 0x00, 0x8C, 0x88, 0x88, 0xCC, 0x00, 0x0B, 0x0C,
509 0x89, 0xCC, 0x00, 0x0B, 0x0C, 0x89),
510 PHYREGS(0x1C08, 0x1808, 0x1408, 0xF901, 0xFA01, 0xFB01),
511 },
512 { .channel = 38,
513 .freq = 5190, /* MHz */
514 .unk2 = 3460,
515 RADIOREGS(0x71, 0x02, 0x07, 0x0B, 0xB6, 0x01, 0x04, 0x0A,
516 0x00, 0x8C, 0x88, 0x88, 0xCC, 0x00, 0x0B, 0x0C,
517 0x89, 0xCC, 0x00, 0x0B, 0x0C, 0x89),
518 PHYREGS(0x2008, 0x1C08, 0x1808, 0xF801, 0xF901, 0xFA01),
519 },
520 { .channel = 40,
521 .freq = 5200, /* MHz */
522 .unk2 = 3467,
523 RADIOREGS(0x71, 0x02, 0x08, 0x0B, 0xAF, 0x01, 0x04, 0x0A,
524 0x00, 0x8B, 0x88, 0x88, 0xBB, 0x00, 0x0A, 0x0B,
525 0x89, 0xBB, 0x00, 0x0A, 0x0B, 0x89),
526 PHYREGS(0x2408, 0x2008, 0x1C08, 0xF701, 0xF801, 0xF901),
527 },
528 { .channel = 42,
529 .freq = 5210, /* MHz */
530 .unk2 = 3473,
531 RADIOREGS(0x71, 0x02, 0x09, 0x0B, 0xAF, 0x01, 0x04, 0x0A,
532 0x00, 0x8B, 0x88, 0x88, 0xBB, 0x00, 0x0A, 0x0B,
533 0x89, 0xBB, 0x00, 0x0A, 0x0B, 0x89),
534 PHYREGS(0x2808, 0x2408, 0x2008, 0xF601, 0xF701, 0xF801),
535 },
536 { .channel = 44,
537 .freq = 5220, /* MHz */
538 .unk2 = 3480,
539 RADIOREGS(0x71, 0x02, 0x0A, 0x0A, 0xA7, 0x01, 0x04, 0x0A,
540 0x00, 0x8B, 0x77, 0x77, 0xBB, 0x00, 0x09, 0x0A,
541 0x88, 0xBB, 0x00, 0x09, 0x0A, 0x88),
542 PHYREGS(0x2C08, 0x2808, 0x2408, 0xF501, 0xF601, 0xF701),
543 },
544 { .channel = 46,
545 .freq = 5230, /* MHz */
546 .unk2 = 3487,
547 RADIOREGS(0x71, 0x02, 0x0B, 0x0A, 0xA7, 0x01, 0x04, 0x0A,
548 0x00, 0x8B, 0x77, 0x77, 0xBB, 0x00, 0x09, 0x0A,
549 0x88, 0xBB, 0x00, 0x09, 0x0A, 0x88),
550 PHYREGS(0x3008, 0x2C08, 0x2808, 0xF401, 0xF501, 0xF601),
551 },
552 { .channel = 48,
553 .freq = 5240, /* MHz */
554 .unk2 = 3493,
555 RADIOREGS(0x71, 0x02, 0x0C, 0x0A, 0xA0, 0x01, 0x04, 0x0A,
556 0x00, 0x8A, 0x77, 0x77, 0xAA, 0x00, 0x09, 0x0A,
557 0x87, 0xAA, 0x00, 0x09, 0x0A, 0x87),
558 PHYREGS(0x3408, 0x3008, 0x2C08, 0xF301, 0xF401, 0xF501),
559 },
560 { .channel = 50,
561 .freq = 5250, /* MHz */
562 .unk2 = 3500,
563 RADIOREGS(0x71, 0x02, 0x0D, 0x0A, 0xA0, 0x01, 0x04, 0x0A,
564 0x00, 0x8A, 0x77, 0x77, 0xAA, 0x00, 0x09, 0x0A,
565 0x87, 0xAA, 0x00, 0x09, 0x0A, 0x87),
566 PHYREGS(0x3808, 0x3408, 0x3008, 0xF201, 0xF301, 0xF401),
567 },
568 { .channel = 52,
569 .freq = 5260, /* MHz */
570 .unk2 = 3507,
571 RADIOREGS(0x71, 0x02, 0x0E, 0x0A, 0x98, 0x01, 0x04, 0x0A,
572 0x00, 0x8A, 0x66, 0x66, 0xAA, 0x00, 0x08, 0x09,
573 0x87, 0xAA, 0x00, 0x08, 0x09, 0x87),
574 PHYREGS(0x3C08, 0x3808, 0x3408, 0xF101, 0xF201, 0xF301),
575 },
576 { .channel = 54,
577 .freq = 5270, /* MHz */
578 .unk2 = 3513,
579 RADIOREGS(0x71, 0x02, 0x0F, 0x0A, 0x98, 0x01, 0x04, 0x0A,
580 0x00, 0x8A, 0x66, 0x66, 0xAA, 0x00, 0x08, 0x09,
581 0x87, 0xAA, 0x00, 0x08, 0x09, 0x87),
582 PHYREGS(0x4008, 0x3C08, 0x3808, 0xF001, 0xF101, 0xF201),
583 },
584 { .channel = 56,
585 .freq = 5280, /* MHz */
586 .unk2 = 3520,
587 RADIOREGS(0x71, 0x02, 0x10, 0x09, 0x91, 0x01, 0x04, 0x0A,
588 0x00, 0x89, 0x66, 0x66, 0x99, 0x00, 0x08, 0x08,
589 0x86, 0x99, 0x00, 0x08, 0x08, 0x86),
590 PHYREGS(0x4408, 0x4008, 0x3C08, 0xF001, 0xF001, 0xF101),
591 },
592 { .channel = 58,
593 .freq = 5290, /* MHz */
594 .unk2 = 3527,
595 RADIOREGS(0x71, 0x02, 0x11, 0x09, 0x91, 0x01, 0x04, 0x0A,
596 0x00, 0x89, 0x66, 0x66, 0x99, 0x00, 0x08, 0x08,
597 0x86, 0x99, 0x00, 0x08, 0x08, 0x86),
598 PHYREGS(0x4808, 0x4408, 0x4008, 0xEF01, 0xF001, 0xF001),
599 },
600 { .channel = 60,
601 .freq = 5300, /* MHz */
602 .unk2 = 3533,
603 RADIOREGS(0x71, 0x02, 0x12, 0x09, 0x8A, 0x01, 0x04, 0x0A,
604 0x00, 0x89, 0x55, 0x55, 0x99, 0x00, 0x08, 0x07,
605 0x85, 0x99, 0x00, 0x08, 0x07, 0x85),
606 PHYREGS(0x4C08, 0x4808, 0x4408, 0xEE01, 0xEF01, 0xF001),
607 },
608 { .channel = 62,
609 .freq = 5310, /* MHz */
610 .unk2 = 3540,
611 RADIOREGS(0x71, 0x02, 0x13, 0x09, 0x8A, 0x01, 0x04, 0x0A,
612 0x00, 0x89, 0x55, 0x55, 0x99, 0x00, 0x08, 0x07,
613 0x85, 0x99, 0x00, 0x08, 0x07, 0x85),
614 PHYREGS(0x5008, 0x4C08, 0x4808, 0xED01, 0xEE01, 0xEF01),
615 },
616 { .channel = 64,
617 .freq = 5320, /* MHz */
618 .unk2 = 3547,
619 RADIOREGS(0x71, 0x02, 0x14, 0x09, 0x83, 0x01, 0x04, 0x0A,
620 0x00, 0x88, 0x55, 0x55, 0x88, 0x00, 0x07, 0x07,
621 0x84, 0x88, 0x00, 0x07, 0x07, 0x84),
622 PHYREGS(0x5408, 0x5008, 0x4C08, 0xEC01, 0xED01, 0xEE01),
623 },
624 { .channel = 66,
625 .freq = 5330, /* MHz */
626 .unk2 = 3553,
627 RADIOREGS(0x71, 0x02, 0x15, 0x09, 0x83, 0x01, 0x04, 0x0A,
628 0x00, 0x88, 0x55, 0x55, 0x88, 0x00, 0x07, 0x07,
629 0x84, 0x88, 0x00, 0x07, 0x07, 0x84),
630 PHYREGS(0x5808, 0x5408, 0x5008, 0xEB01, 0xEC01, 0xED01),
631 },
632 { .channel = 68,
633 .freq = 5340, /* MHz */
634 .unk2 = 3560,
635 RADIOREGS(0x71, 0x02, 0x16, 0x08, 0x7C, 0x01, 0x04, 0x0A,
636 0x00, 0x88, 0x44, 0x44, 0x88, 0x00, 0x07, 0x06,
637 0x84, 0x88, 0x00, 0x07, 0x06, 0x84),
638 PHYREGS(0x5C08, 0x5808, 0x5408, 0xEA01, 0xEB01, 0xEC01),
639 },
640 { .channel = 70,
641 .freq = 5350, /* MHz */
642 .unk2 = 3567,
643 RADIOREGS(0x71, 0x02, 0x17, 0x08, 0x7C, 0x01, 0x04, 0x0A,
644 0x00, 0x88, 0x44, 0x44, 0x88, 0x00, 0x07, 0x06,
645 0x84, 0x88, 0x00, 0x07, 0x06, 0x84),
646 PHYREGS(0x6008, 0x5C08, 0x5808, 0xE901, 0xEA01, 0xEB01),
647 },
648 { .channel = 72,
649 .freq = 5360, /* MHz */
650 .unk2 = 3573,
651 RADIOREGS(0x71, 0x02, 0x18, 0x08, 0x75, 0x01, 0x04, 0x0A,
652 0x00, 0x87, 0x44, 0x44, 0x77, 0x00, 0x06, 0x05,
653 0x83, 0x77, 0x00, 0x06, 0x05, 0x83),
654 PHYREGS(0x6408, 0x6008, 0x5C08, 0xE801, 0xE901, 0xEA01),
655 },
656 { .channel = 74,
657 .freq = 5370, /* MHz */
658 .unk2 = 3580,
659 RADIOREGS(0x71, 0x02, 0x19, 0x08, 0x75, 0x01, 0x04, 0x0A,
660 0x00, 0x87, 0x44, 0x44, 0x77, 0x00, 0x06, 0x05,
661 0x83, 0x77, 0x00, 0x06, 0x05, 0x83),
662 PHYREGS(0x6808, 0x6408, 0x6008, 0xE701, 0xE801, 0xE901),
663 },
664 { .channel = 76,
665 .freq = 5380, /* MHz */
666 .unk2 = 3587,
667 RADIOREGS(0x71, 0x02, 0x1A, 0x08, 0x6E, 0x01, 0x04, 0x0A,
668 0x00, 0x87, 0x33, 0x33, 0x77, 0x00, 0x06, 0x04,
669 0x82, 0x77, 0x00, 0x06, 0x04, 0x82),
670 PHYREGS(0x6C08, 0x6808, 0x6408, 0xE601, 0xE701, 0xE801),
671 },
672 { .channel = 78,
673 .freq = 5390, /* MHz */
674 .unk2 = 3593,
675 RADIOREGS(0x71, 0x02, 0x1B, 0x08, 0x6E, 0x01, 0x04, 0x0A,
676 0x00, 0x87, 0x33, 0x33, 0x77, 0x00, 0x06, 0x04,
677 0x82, 0x77, 0x00, 0x06, 0x04, 0x82),
678 PHYREGS(0x7008, 0x6C08, 0x6808, 0xE501, 0xE601, 0xE701),
679 },
680 { .channel = 80,
681 .freq = 5400, /* MHz */
682 .unk2 = 3600,
683 RADIOREGS(0x71, 0x02, 0x1C, 0x07, 0x67, 0x01, 0x04, 0x0A,
684 0x00, 0x86, 0x33, 0x33, 0x66, 0x00, 0x05, 0x04,
685 0x81, 0x66, 0x00, 0x05, 0x04, 0x81),
686 PHYREGS(0x7408, 0x7008, 0x6C08, 0xE501, 0xE501, 0xE601),
687 },
688 { .channel = 82,
689 .freq = 5410, /* MHz */
690 .unk2 = 3607,
691 RADIOREGS(0x71, 0x02, 0x1D, 0x07, 0x67, 0x01, 0x04, 0x0A,
692 0x00, 0x86, 0x33, 0x33, 0x66, 0x00, 0x05, 0x04,
693 0x81, 0x66, 0x00, 0x05, 0x04, 0x81),
694 PHYREGS(0x7808, 0x7408, 0x7008, 0xE401, 0xE501, 0xE501),
695 },
696 { .channel = 84,
697 .freq = 5420, /* MHz */
698 .unk2 = 3613,
699 RADIOREGS(0x71, 0x02, 0x1E, 0x07, 0x61, 0x01, 0x04, 0x0A,
700 0x00, 0x86, 0x22, 0x22, 0x66, 0x00, 0x05, 0x03,
701 0x80, 0x66, 0x00, 0x05, 0x03, 0x80),
702 PHYREGS(0x7C08, 0x7808, 0x7408, 0xE301, 0xE401, 0xE501),
703 },
704 { .channel = 86,
705 .freq = 5430, /* MHz */
706 .unk2 = 3620,
707 RADIOREGS(0x71, 0x02, 0x1F, 0x07, 0x61, 0x01, 0x04, 0x0A,
708 0x00, 0x86, 0x22, 0x22, 0x66, 0x00, 0x05, 0x03,
709 0x80, 0x66, 0x00, 0x05, 0x03, 0x80),
710 PHYREGS(0x8008, 0x7C08, 0x7808, 0xE201, 0xE301, 0xE401),
711 },
712 { .channel = 88,
713 .freq = 5440, /* MHz */
714 .unk2 = 3627,
715 RADIOREGS(0x71, 0x02, 0x20, 0x07, 0x5A, 0x01, 0x04, 0x0A,
716 0x00, 0x85, 0x22, 0x22, 0x55, 0x00, 0x04, 0x02,
717 0x80, 0x55, 0x00, 0x04, 0x02, 0x80),
718 PHYREGS(0x8408, 0x8008, 0x7C08, 0xE101, 0xE201, 0xE301),
719 },
720 { .channel = 90,
721 .freq = 5450, /* MHz */
722 .unk2 = 3633,
723 RADIOREGS(0x71, 0x02, 0x21, 0x07, 0x5A, 0x01, 0x04, 0x0A,
724 0x00, 0x85, 0x22, 0x22, 0x55, 0x00, 0x04, 0x02,
725 0x80, 0x55, 0x00, 0x04, 0x02, 0x80),
726 PHYREGS(0x8808, 0x8408, 0x8008, 0xE001, 0xE101, 0xE201),
727 },
728 { .channel = 92,
729 .freq = 5460, /* MHz */
730 .unk2 = 3640,
731 RADIOREGS(0x71, 0x02, 0x22, 0x06, 0x53, 0x01, 0x04, 0x0A,
732 0x00, 0x85, 0x11, 0x11, 0x55, 0x00, 0x04, 0x01,
733 0x80, 0x55, 0x00, 0x04, 0x01, 0x80),
734 PHYREGS(0x8C08, 0x8808, 0x8408, 0xDF01, 0xE001, 0xE101),
735 },
736 { .channel = 94,
737 .freq = 5470, /* MHz */
738 .unk2 = 3647,
739 RADIOREGS(0x71, 0x02, 0x23, 0x06, 0x53, 0x01, 0x04, 0x0A,
740 0x00, 0x85, 0x11, 0x11, 0x55, 0x00, 0x04, 0x01,
741 0x80, 0x55, 0x00, 0x04, 0x01, 0x80),
742 PHYREGS(0x9008, 0x8C08, 0x8808, 0xDE01, 0xDF01, 0xE001),
743 },
744 { .channel = 96,
745 .freq = 5480, /* MHz */
746 .unk2 = 3653,
747 RADIOREGS(0x71, 0x02, 0x24, 0x06, 0x4D, 0x01, 0x04, 0x0A,
748 0x00, 0x84, 0x11, 0x11, 0x44, 0x00, 0x03, 0x00,
749 0x80, 0x44, 0x00, 0x03, 0x00, 0x80),
750 PHYREGS(0x9408, 0x9008, 0x8C08, 0xDD01, 0xDE01, 0xDF01),
751 },
752 { .channel = 98,
753 .freq = 5490, /* MHz */
754 .unk2 = 3660,
755 RADIOREGS(0x71, 0x02, 0x25, 0x06, 0x4D, 0x01, 0x04, 0x0A,
756 0x00, 0x84, 0x11, 0x11, 0x44, 0x00, 0x03, 0x00,
757 0x80, 0x44, 0x00, 0x03, 0x00, 0x80),
758 PHYREGS(0x9808, 0x9408, 0x9008, 0xDD01, 0xDD01, 0xDE01),
759 },
760 { .channel = 100,
761 .freq = 5500, /* MHz */
762 .unk2 = 3667,
763 RADIOREGS(0x71, 0x02, 0x26, 0x06, 0x47, 0x01, 0x04, 0x0A,
764 0x00, 0x84, 0x00, 0x00, 0x44, 0x00, 0x03, 0x00,
765 0x80, 0x44, 0x00, 0x03, 0x00, 0x80),
766 PHYREGS(0x9C08, 0x9808, 0x9408, 0xDC01, 0xDD01, 0xDD01),
767 },
768 { .channel = 102,
769 .freq = 5510, /* MHz */
770 .unk2 = 3673,
771 RADIOREGS(0x71, 0x02, 0x27, 0x06, 0x47, 0x01, 0x04, 0x0A,
772 0x00, 0x84, 0x00, 0x00, 0x44, 0x00, 0x03, 0x00,
773 0x80, 0x44, 0x00, 0x03, 0x00, 0x80),
774 PHYREGS(0xA008, 0x9C08, 0x9808, 0xDB01, 0xDC01, 0xDD01),
775 },
776 { .channel = 104,
777 .freq = 5520, /* MHz */
778 .unk2 = 3680,
779 RADIOREGS(0x71, 0x02, 0x28, 0x05, 0x40, 0x01, 0x04, 0x0A,
780 0x00, 0x83, 0x00, 0x00, 0x33, 0x00, 0x02, 0x00,
781 0x80, 0x33, 0x00, 0x02, 0x00, 0x80),
782 PHYREGS(0xA408, 0xA008, 0x9C08, 0xDA01, 0xDB01, 0xDC01),
783 },
784 { .channel = 106,
785 .freq = 5530, /* MHz */
786 .unk2 = 3687,
787 RADIOREGS(0x71, 0x02, 0x29, 0x05, 0x40, 0x01, 0x04, 0x0A,
788 0x00, 0x83, 0x00, 0x00, 0x33, 0x00, 0x02, 0x00,
789 0x80, 0x33, 0x00, 0x02, 0x00, 0x80),
790 PHYREGS(0xA808, 0xA408, 0xA008, 0xD901, 0xDA01, 0xDB01),
791 },
792 { .channel = 108,
793 .freq = 5540, /* MHz */
794 .unk2 = 3693,
795 RADIOREGS(0x71, 0x02, 0x2A, 0x05, 0x3A, 0x01, 0x04, 0x0A,
796 0x00, 0x83, 0x00, 0x00, 0x33, 0x00, 0x02, 0x00,
797 0x80, 0x33, 0x00, 0x02, 0x00, 0x80),
798 PHYREGS(0xAC08, 0xA808, 0xA408, 0xD801, 0xD901, 0xDA01),
799 },
800 { .channel = 110,
801 .freq = 5550, /* MHz */
802 .unk2 = 3700,
803 RADIOREGS(0x71, 0x02, 0x2B, 0x05, 0x3A, 0x01, 0x04, 0x0A,
804 0x00, 0x83, 0x00, 0x00, 0x33, 0x00, 0x02, 0x00,
805 0x80, 0x33, 0x00, 0x02, 0x00, 0x80),
806 PHYREGS(0xB008, 0xAC08, 0xA808, 0xD701, 0xD801, 0xD901),
807 },
808 { .channel = 112,
809 .freq = 5560, /* MHz */
810 .unk2 = 3707,
811 RADIOREGS(0x71, 0x02, 0x2C, 0x05, 0x34, 0x01, 0x04, 0x0A,
812 0x00, 0x82, 0x00, 0x00, 0x22, 0x00, 0x01, 0x00,
813 0x80, 0x22, 0x00, 0x01, 0x00, 0x80),
814 PHYREGS(0xB408, 0xB008, 0xAC08, 0xD701, 0xD701, 0xD801),
815 },
816 { .channel = 114,
817 .freq = 5570, /* MHz */
818 .unk2 = 3713,
819 RADIOREGS(0x71, 0x02, 0x2D, 0x05, 0x34, 0x01, 0x04, 0x0A,
820 0x00, 0x82, 0x00, 0x00, 0x22, 0x00, 0x01, 0x00,
821 0x80, 0x22, 0x00, 0x01, 0x00, 0x80),
822 PHYREGS(0xB808, 0xB408, 0xB008, 0xD601, 0xD701, 0xD701),
823 },
824 { .channel = 116,
825 .freq = 5580, /* MHz */
826 .unk2 = 3720,
827 RADIOREGS(0x71, 0x02, 0x2E, 0x04, 0x2E, 0x01, 0x04, 0x0A,
828 0x00, 0x82, 0x00, 0x00, 0x22, 0x00, 0x01, 0x00,
829 0x80, 0x22, 0x00, 0x01, 0x00, 0x80),
830 PHYREGS(0xBC08, 0xB808, 0xB408, 0xD501, 0xD601, 0xD701),
831 },
832 { .channel = 118,
833 .freq = 5590, /* MHz */
834 .unk2 = 3727,
835 RADIOREGS(0x71, 0x02, 0x2F, 0x04, 0x2E, 0x01, 0x04, 0x0A,
836 0x00, 0x82, 0x00, 0x00, 0x22, 0x00, 0x01, 0x00,
837 0x80, 0x22, 0x00, 0x01, 0x00, 0x80),
838 PHYREGS(0xC008, 0xBC08, 0xB808, 0xD401, 0xD501, 0xD601),
839 },
840 { .channel = 120,
841 .freq = 5600, /* MHz */
842 .unk2 = 3733,
843 RADIOREGS(0x71, 0x02, 0x30, 0x04, 0x28, 0x01, 0x04, 0x0A,
844 0x00, 0x81, 0x00, 0x00, 0x11, 0x00, 0x01, 0x00,
845 0x80, 0x11, 0x00, 0x01, 0x00, 0x80),
846 PHYREGS(0xC408, 0xC008, 0xBC08, 0xD301, 0xD401, 0xD501),
847 },
848 { .channel = 122,
849 .freq = 5610, /* MHz */
850 .unk2 = 3740,
851 RADIOREGS(0x71, 0x02, 0x31, 0x04, 0x28, 0x01, 0x04, 0x0A,
852 0x00, 0x81, 0x00, 0x00, 0x11, 0x00, 0x01, 0x00,
853 0x80, 0x11, 0x00, 0x01, 0x00, 0x80),
854 PHYREGS(0xC808, 0xC408, 0xC008, 0xD201, 0xD301, 0xD401),
855 },
856 { .channel = 124,
857 .freq = 5620, /* MHz */
858 .unk2 = 3747,
859 RADIOREGS(0x71, 0x02, 0x32, 0x04, 0x21, 0x01, 0x04, 0x0A,
860 0x00, 0x81, 0x00, 0x00, 0x11, 0x00, 0x00, 0x00,
861 0x80, 0x11, 0x00, 0x00, 0x00, 0x80),
862 PHYREGS(0xCC08, 0xC808, 0xC408, 0xD201, 0xD201, 0xD301),
863 },
864 { .channel = 126,
865 .freq = 5630, /* MHz */
866 .unk2 = 3753,
867 RADIOREGS(0x71, 0x02, 0x33, 0x04, 0x21, 0x01, 0x04, 0x0A,
868 0x00, 0x81, 0x00, 0x00, 0x11, 0x00, 0x00, 0x00,
869 0x80, 0x11, 0x00, 0x00, 0x00, 0x80),
870 PHYREGS(0xD008, 0xCC08, 0xC808, 0xD101, 0xD201, 0xD201),
871 },
872 { .channel = 128,
873 .freq = 5640, /* MHz */
874 .unk2 = 3760,
875 RADIOREGS(0x71, 0x02, 0x34, 0x03, 0x1C, 0x01, 0x04, 0x0A,
876 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
877 0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
878 PHYREGS(0xD408, 0xD008, 0xCC08, 0xD001, 0xD101, 0xD201),
879 },
880 { .channel = 130,
881 .freq = 5650, /* MHz */
882 .unk2 = 3767,
883 RADIOREGS(0x71, 0x02, 0x35, 0x03, 0x1C, 0x01, 0x04, 0x0A,
884 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
885 0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
886 PHYREGS(0xD808, 0xD408, 0xD008, 0xCF01, 0xD001, 0xD101),
887 },
888 { .channel = 132,
889 .freq = 5660, /* MHz */
890 .unk2 = 3773,
891 RADIOREGS(0x71, 0x02, 0x36, 0x03, 0x16, 0x01, 0x04, 0x0A,
892 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
893 0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
894 PHYREGS(0xDC08, 0xD808, 0xD408, 0xCE01, 0xCF01, 0xD001),
895 },
896 { .channel = 134,
897 .freq = 5670, /* MHz */
898 .unk2 = 3780,
899 RADIOREGS(0x71, 0x02, 0x37, 0x03, 0x16, 0x01, 0x04, 0x0A,
900 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
901 0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
902 PHYREGS(0xE008, 0xDC08, 0xD808, 0xCE01, 0xCE01, 0xCF01),
903 },
904 { .channel = 136,
905 .freq = 5680, /* MHz */
906 .unk2 = 3787,
907 RADIOREGS(0x71, 0x02, 0x38, 0x03, 0x10, 0x01, 0x04, 0x0A,
908 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
909 0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
910 PHYREGS(0xE408, 0xE008, 0xDC08, 0xCD01, 0xCE01, 0xCE01),
911 },
912 { .channel = 138,
913 .freq = 5690, /* MHz */
914 .unk2 = 3793,
915 RADIOREGS(0x71, 0x02, 0x39, 0x03, 0x10, 0x01, 0x04, 0x0A,
916 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
917 0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
918 PHYREGS(0xE808, 0xE408, 0xE008, 0xCC01, 0xCD01, 0xCE01),
919 },
920 { .channel = 140,
921 .freq = 5700, /* MHz */
922 .unk2 = 3800,
923 RADIOREGS(0x71, 0x02, 0x3A, 0x02, 0x0A, 0x01, 0x04, 0x0A,
924 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
925 0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
926 PHYREGS(0xEC08, 0xE808, 0xE408, 0xCB01, 0xCC01, 0xCD01),
927 },
928 { .channel = 142,
929 .freq = 5710, /* MHz */
930 .unk2 = 3807,
931 RADIOREGS(0x71, 0x02, 0x3B, 0x02, 0x0A, 0x01, 0x04, 0x0A,
932 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
933 0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
934 PHYREGS(0xF008, 0xEC08, 0xE808, 0xCA01, 0xCB01, 0xCC01),
935 },
936 { .channel = 144,
937 .freq = 5720, /* MHz */
938 .unk2 = 3813,
939 RADIOREGS(0x71, 0x02, 0x3C, 0x02, 0x0A, 0x01, 0x04, 0x0A,
940 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
941 0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
942 PHYREGS(0xF408, 0xF008, 0xEC08, 0xC901, 0xCA01, 0xCB01),
943 },
944 { .channel = 145,
945 .freq = 5725, /* MHz */
946 .unk2 = 3817,
947 RADIOREGS(0x72, 0x04, 0x79, 0x02, 0x03, 0x01, 0x03, 0x14,
948 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
949 0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
950 PHYREGS(0xF608, 0xF208, 0xEE08, 0xC901, 0xCA01, 0xCB01),
951 },
952 { .channel = 146,
953 .freq = 5730, /* MHz */
954 .unk2 = 3820,
955 RADIOREGS(0x71, 0x02, 0x3D, 0x02, 0x0A, 0x01, 0x04, 0x0A,
956 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
957 0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
958 PHYREGS(0xF808, 0xF408, 0xF008, 0xC901, 0xC901, 0xCA01),
959 },
960 { .channel = 147,
961 .freq = 5735, /* MHz */
962 .unk2 = 3823,
963 RADIOREGS(0x72, 0x04, 0x7B, 0x02, 0x03, 0x01, 0x03, 0x14,
964 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
965 0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
966 PHYREGS(0xFA08, 0xF608, 0xF208, 0xC801, 0xC901, 0xCA01),
967 },
968 { .channel = 148,
969 .freq = 5740, /* MHz */
970 .unk2 = 3827,
971 RADIOREGS(0x71, 0x02, 0x3E, 0x02, 0x0A, 0x01, 0x04, 0x0A,
972 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
973 0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
974 PHYREGS(0xFC08, 0xF808, 0xF408, 0xC801, 0xC901, 0xC901),
975 },
976 { .channel = 149,
977 .freq = 5745, /* MHz */
978 .unk2 = 3830,
979 RADIOREGS(0x72, 0x04, 0x7D, 0x02, 0xFE, 0x00, 0x03, 0x14,
980 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
981 0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
982 PHYREGS(0xFE08, 0xFA08, 0xF608, 0xC801, 0xC801, 0xC901),
983 },
984 { .channel = 150,
985 .freq = 5750, /* MHz */
986 .unk2 = 3833,
987 RADIOREGS(0x71, 0x02, 0x3F, 0x02, 0x0A, 0x01, 0x04, 0x0A,
988 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
989 0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
990 PHYREGS(0x0009, 0xFC08, 0xF808, 0xC701, 0xC801, 0xC901),
991 },
992 { .channel = 151,
993 .freq = 5755, /* MHz */
994 .unk2 = 3837,
995 RADIOREGS(0x72, 0x04, 0x7F, 0x02, 0xFE, 0x00, 0x03, 0x14,
996 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
997 0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
998 PHYREGS(0x0209, 0xFE08, 0xFA08, 0xC701, 0xC801, 0xC801),
999 },
1000 { .channel = 152,
1001 .freq = 5760, /* MHz */
1002 .unk2 = 3840,
1003 RADIOREGS(0x71, 0x02, 0x40, 0x02, 0x0A, 0x01, 0x04, 0x0A,
1004 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1005 0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
1006 PHYREGS(0x0409, 0x0009, 0xFC08, 0xC601, 0xC701, 0xC801),
1007 },
1008 { .channel = 153,
1009 .freq = 5765, /* MHz */
1010 .unk2 = 3843,
1011 RADIOREGS(0x72, 0x04, 0x81, 0x02, 0xF8, 0x00, 0x03, 0x14,
1012 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1013 0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
1014 PHYREGS(0x0609, 0x0209, 0xFE08, 0xC601, 0xC701, 0xC801),
1015 },
1016 { .channel = 154,
1017 .freq = 5770, /* MHz */
1018 .unk2 = 3847,
1019 RADIOREGS(0x71, 0x02, 0x41, 0x02, 0x0A, 0x01, 0x04, 0x0A,
1020 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1021 0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
1022 PHYREGS(0x0809, 0x0409, 0x0009, 0xC601, 0xC601, 0xC701),
1023 },
1024 { .channel = 155,
1025 .freq = 5775, /* MHz */
1026 .unk2 = 3850,
1027 RADIOREGS(0x72, 0x04, 0x83, 0x02, 0xF8, 0x00, 0x03, 0x14,
1028 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1029 0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
1030 PHYREGS(0x0A09, 0x0609, 0x0209, 0xC501, 0xC601, 0xC701),
1031 },
1032 { .channel = 156,
1033 .freq = 5780, /* MHz */
1034 .unk2 = 3853,
1035 RADIOREGS(0x71, 0x02, 0x42, 0x02, 0x0A, 0x01, 0x04, 0x0A,
1036 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1037 0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
1038 PHYREGS(0x0C09, 0x0809, 0x0409, 0xC501, 0xC601, 0xC601),
1039 },
1040 { .channel = 157,
1041 .freq = 5785, /* MHz */
1042 .unk2 = 3857,
1043 RADIOREGS(0x72, 0x04, 0x85, 0x02, 0xF2, 0x00, 0x03, 0x14,
1044 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1045 0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
1046 PHYREGS(0x0E09, 0x0A09, 0x0609, 0xC401, 0xC501, 0xC601),
1047 },
1048 { .channel = 158,
1049 .freq = 5790, /* MHz */
1050 .unk2 = 3860,
1051 RADIOREGS(0x71, 0x02, 0x43, 0x02, 0x0A, 0x01, 0x04, 0x0A,
1052 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1053 0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
1054 PHYREGS(0x1009, 0x0C09, 0x0809, 0xC401, 0xC501, 0xC601),
1055 },
1056 { .channel = 159,
1057 .freq = 5795, /* MHz */
1058 .unk2 = 3863,
1059 RADIOREGS(0x72, 0x04, 0x87, 0x02, 0xF2, 0x00, 0x03, 0x14,
1060 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1061 0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
1062 PHYREGS(0x1209, 0x0E09, 0x0A09, 0xC401, 0xC401, 0xC501),
1063 },
1064 { .channel = 160,
1065 .freq = 5800, /* MHz */
1066 .unk2 = 3867,
1067 RADIOREGS(0x71, 0x02, 0x44, 0x01, 0x0A, 0x01, 0x04, 0x0A,
1068 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1069 0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
1070 PHYREGS(0x1409, 0x1009, 0x0C09, 0xC301, 0xC401, 0xC501),
1071 },
1072 { .channel = 161,
1073 .freq = 5805, /* MHz */
1074 .unk2 = 3870,
1075 RADIOREGS(0x72, 0x04, 0x89, 0x01, 0xED, 0x00, 0x03, 0x14,
1076 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1077 0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
1078 PHYREGS(0x1609, 0x1209, 0x0E09, 0xC301, 0xC401, 0xC401),
1079 },
1080 { .channel = 162,
1081 .freq = 5810, /* MHz */
1082 .unk2 = 3873,
1083 RADIOREGS(0x71, 0x02, 0x45, 0x01, 0x0A, 0x01, 0x04, 0x0A,
1084 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1085 0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
1086 PHYREGS(0x1809, 0x1409, 0x1009, 0xC201, 0xC301, 0xC401),
1087 },
1088 { .channel = 163,
1089 .freq = 5815, /* MHz */
1090 .unk2 = 3877,
1091 RADIOREGS(0x72, 0x04, 0x8B, 0x01, 0xED, 0x00, 0x03, 0x14,
1092 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1093 0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
1094 PHYREGS(0x1A09, 0x1609, 0x1209, 0xC201, 0xC301, 0xC401),
1095 },
1096 { .channel = 164,
1097 .freq = 5820, /* MHz */
1098 .unk2 = 3880,
1099 RADIOREGS(0x71, 0x02, 0x46, 0x01, 0x0A, 0x01, 0x04, 0x0A,
1100 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1101 0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
1102 PHYREGS(0x1C09, 0x1809, 0x1409, 0xC201, 0xC201, 0xC301),
1103 },
1104 { .channel = 165,
1105 .freq = 5825, /* MHz */
1106 .unk2 = 3883,
1107 RADIOREGS(0x72, 0x04, 0x8D, 0x01, 0xED, 0x00, 0x03, 0x14,
1108 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1109 0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
1110 PHYREGS(0x1E09, 0x1A09, 0x1609, 0xC101, 0xC201, 0xC301),
1111 },
1112 { .channel = 166,
1113 .freq = 5830, /* MHz */
1114 .unk2 = 3887,
1115 RADIOREGS(0x71, 0x02, 0x47, 0x01, 0x0A, 0x01, 0x04, 0x0A,
1116 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1117 0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
1118 PHYREGS(0x2009, 0x1C09, 0x1809, 0xC101, 0xC201, 0xC201),
1119 },
1120 { .channel = 168,
1121 .freq = 5840, /* MHz */
1122 .unk2 = 3893,
1123 RADIOREGS(0x71, 0x02, 0x48, 0x01, 0x0A, 0x01, 0x04, 0x0A,
1124 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1125 0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
1126 PHYREGS(0x2409, 0x2009, 0x1C09, 0xC001, 0xC101, 0xC201),
1127 },
1128 { .channel = 170,
1129 .freq = 5850, /* MHz */
1130 .unk2 = 3900,
1131 RADIOREGS(0x71, 0x02, 0x49, 0x01, 0xE0, 0x00, 0x04, 0x0A,
1132 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1133 0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
1134 PHYREGS(0x2809, 0x2409, 0x2009, 0xBF01, 0xC001, 0xC101),
1135 },
1136 { .channel = 172,
1137 .freq = 5860, /* MHz */
1138 .unk2 = 3907,
1139 RADIOREGS(0x71, 0x02, 0x4A, 0x01, 0xDE, 0x00, 0x04, 0x0A,
1140 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1141 0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
1142 PHYREGS(0x2C09, 0x2809, 0x2409, 0xBF01, 0xBF01, 0xC001),
1143 },
1144 { .channel = 174,
1145 .freq = 5870, /* MHz */
1146 .unk2 = 3913,
1147 RADIOREGS(0x71, 0x02, 0x4B, 0x00, 0xDB, 0x00, 0x04, 0x0A,
1148 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1149 0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
1150 PHYREGS(0x3009, 0x2C09, 0x2809, 0xBE01, 0xBF01, 0xBF01),
1151 },
1152 { .channel = 176,
1153 .freq = 5880, /* MHz */
1154 .unk2 = 3920,
1155 RADIOREGS(0x71, 0x02, 0x4C, 0x00, 0xD8, 0x00, 0x04, 0x0A,
1156 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1157 0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
1158 PHYREGS(0x3409, 0x3009, 0x2C09, 0xBD01, 0xBE01, 0xBF01),
1159 },
1160 { .channel = 178,
1161 .freq = 5890, /* MHz */
1162 .unk2 = 3927,
1163 RADIOREGS(0x71, 0x02, 0x4D, 0x00, 0xD6, 0x00, 0x04, 0x0A,
1164 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1165 0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
1166 PHYREGS(0x3809, 0x3409, 0x3009, 0xBC01, 0xBD01, 0xBE01),
1167 },
1168 { .channel = 180,
1169 .freq = 5900, /* MHz */
1170 .unk2 = 3933,
1171 RADIOREGS(0x71, 0x02, 0x4E, 0x00, 0xD3, 0x00, 0x04, 0x0A,
1172 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1173 0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
1174 PHYREGS(0x3C09, 0x3809, 0x3409, 0xBC01, 0xBC01, 0xBD01),
1175 },
1176 { .channel = 182,
1177 .freq = 5910, /* MHz */
1178 .unk2 = 3940,
1179 RADIOREGS(0x71, 0x02, 0x4F, 0x00, 0xD6, 0x00, 0x04, 0x0A,
1180 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1181 0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
1182 PHYREGS(0x4009, 0x3C09, 0x3809, 0xBB01, 0xBC01, 0xBC01),
1183 },
1184 { .channel = 1,
1185 .freq = 2412, /* MHz */
1186 .unk2 = 3216,
1187 RADIOREGS(0x73, 0x09, 0x6C, 0x0F, 0x00, 0x01, 0x07, 0x15,
1188 0x01, 0x8F, 0xFF, 0xFF, 0xFF, 0x88, 0x0D, 0x0C,
1189 0x80, 0xFF, 0x88, 0x0D, 0x0C, 0x80),
1190 PHYREGS(0xC903, 0xC503, 0xC103, 0x3A04, 0x3F04, 0x4304),
1191 },
1192 { .channel = 2,
1193 .freq = 2417, /* MHz */
1194 .unk2 = 3223,
1195 RADIOREGS(0x73, 0x09, 0x71, 0x0F, 0x00, 0x01, 0x07, 0x15,
1196 0x01, 0x8F, 0xFF, 0xFF, 0xFF, 0x88, 0x0C, 0x0B,
1197 0x80, 0xFF, 0x88, 0x0C, 0x0B, 0x80),
1198 PHYREGS(0xCB03, 0xC703, 0xC303, 0x3804, 0x3D04, 0x4104),
1199 },
1200 { .channel = 3,
1201 .freq = 2422, /* MHz */
1202 .unk2 = 3229,
1203 RADIOREGS(0x73, 0x09, 0x76, 0x0F, 0x00, 0x01, 0x07, 0x15,
1204 0x01, 0x8F, 0xFF, 0xFF, 0xFF, 0x88, 0x0C, 0x0A,
1205 0x80, 0xFF, 0x88, 0x0C, 0x0A, 0x80),
1206 PHYREGS(0xCD03, 0xC903, 0xC503, 0x3604, 0x3A04, 0x3F04),
1207 },
1208 { .channel = 4,
1209 .freq = 2427, /* MHz */
1210 .unk2 = 3236,
1211 RADIOREGS(0x73, 0x09, 0x7B, 0x0F, 0x00, 0x01, 0x07, 0x15,
1212 0x01, 0x8F, 0xFF, 0xFF, 0xFF, 0x88, 0x0C, 0x0A,
1213 0x80, 0xFF, 0x88, 0x0C, 0x0A, 0x80),
1214 PHYREGS(0xCF03, 0xCB03, 0xC703, 0x3404, 0x3804, 0x3D04),
1215 },
1216 { .channel = 5,
1217 .freq = 2432, /* MHz */
1218 .unk2 = 3243,
1219 RADIOREGS(0x73, 0x09, 0x80, 0x0F, 0x00, 0x01, 0x07, 0x15,
1220 0x01, 0x8F, 0xFF, 0xFF, 0xFF, 0x88, 0x0C, 0x09,
1221 0x80, 0xFF, 0x88, 0x0C, 0x09, 0x80),
1222 PHYREGS(0xD103, 0xCD03, 0xC903, 0x3104, 0x3604, 0x3A04),
1223 },
1224 { .channel = 6,
1225 .freq = 2437, /* MHz */
1226 .unk2 = 3249,
1227 RADIOREGS(0x73, 0x09, 0x85, 0x0F, 0x00, 0x01, 0x07, 0x15,
1228 0x01, 0x8F, 0xFF, 0xFF, 0xFF, 0x88, 0x0B, 0x08,
1229 0x80, 0xFF, 0x88, 0x0B, 0x08, 0x80),
1230 PHYREGS(0xD303, 0xCF03, 0xCB03, 0x2F04, 0x3404, 0x3804),
1231 },
1232 { .channel = 7,
1233 .freq = 2442, /* MHz */
1234 .unk2 = 3256,
1235 RADIOREGS(0x73, 0x09, 0x8A, 0x0F, 0x00, 0x01, 0x07, 0x15,
1236 0x01, 0x8F, 0xFF, 0xFF, 0xFF, 0x88, 0x0A, 0x07,
1237 0x80, 0xFF, 0x88, 0x0A, 0x07, 0x80),
1238 PHYREGS(0xD503, 0xD103, 0xCD03, 0x2D04, 0x3104, 0x3604),
1239 },
1240 { .channel = 8,
1241 .freq = 2447, /* MHz */
1242 .unk2 = 3263,
1243 RADIOREGS(0x73, 0x09, 0x8F, 0x0F, 0x00, 0x01, 0x07, 0x15,
1244 0x01, 0x8F, 0xFF, 0xFF, 0xFF, 0x88, 0x0A, 0x06,
1245 0x80, 0xFF, 0x88, 0x0A, 0x06, 0x80),
1246 PHYREGS(0xD703, 0xD303, 0xCF03, 0x2B04, 0x2F04, 0x3404),
1247 },
1248 { .channel = 9,
1249 .freq = 2452, /* MHz */
1250 .unk2 = 3269,
1251 RADIOREGS(0x73, 0x09, 0x94, 0x0F, 0x00, 0x01, 0x07, 0x15,
1252 0x01, 0x8F, 0xFF, 0xFF, 0xFF, 0x88, 0x09, 0x06,
1253 0x80, 0xFF, 0x88, 0x09, 0x06, 0x80),
1254 PHYREGS(0xD903, 0xD503, 0xD103, 0x2904, 0x2D04, 0x3104),
1255 },
1256 { .channel = 10,
1257 .freq = 2457, /* MHz */
1258 .unk2 = 3276,
1259 RADIOREGS(0x73, 0x09, 0x99, 0x0F, 0x00, 0x01, 0x07, 0x15,
1260 0x01, 0x8F, 0xFF, 0xFF, 0xFF, 0x88, 0x08, 0x05,
1261 0x80, 0xFF, 0x88, 0x08, 0x05, 0x80),
1262 PHYREGS(0xDB03, 0xD703, 0xD303, 0x2704, 0x2B04, 0x2F04),
1263 },
1264 { .channel = 11,
1265 .freq = 2462, /* MHz */
1266 .unk2 = 3283,
1267 RADIOREGS(0x73, 0x09, 0x9E, 0x0F, 0x00, 0x01, 0x07, 0x15,
1268 0x01, 0x8F, 0xFF, 0xFF, 0xFF, 0x88, 0x08, 0x04,
1269 0x80, 0xFF, 0x88, 0x08, 0x04, 0x80),
1270 PHYREGS(0xDD03, 0xD903, 0xD503, 0x2404, 0x2904, 0x2D04),
1271 },
1272 { .channel = 12,
1273 .freq = 2467, /* MHz */
1274 .unk2 = 3289,
1275 RADIOREGS(0x73, 0x09, 0xA3, 0x0F, 0x00, 0x01, 0x07, 0x15,
1276 0x01, 0x8F, 0xFF, 0xFF, 0xFF, 0x88, 0x08, 0x03,
1277 0x80, 0xFF, 0x88, 0x08, 0x03, 0x80),
1278 PHYREGS(0xDF03, 0xDB03, 0xD703, 0x2204, 0x2704, 0x2B04),
1279 },
1280 { .channel = 13,
1281 .freq = 2472, /* MHz */
1282 .unk2 = 3296,
1283 RADIOREGS(0x73, 0x09, 0xA8, 0x0F, 0x00, 0x01, 0x07, 0x15,
1284 0x01, 0x8F, 0xFF, 0xFF, 0xFF, 0x88, 0x07, 0x03,
1285 0x80, 0xFF, 0x88, 0x07, 0x03, 0x80),
1286 PHYREGS(0xE103, 0xDD03, 0xD903, 0x2004, 0x2404, 0x2904),
1287 },
1288 { .channel = 14,
1289 .freq = 2484, /* MHz */
1290 .unk2 = 3312,
1291 RADIOREGS(0x73, 0x09, 0xB4, 0x0F, 0xFF, 0x01, 0x07, 0x15,
1292 0x01, 0x8F, 0xFF, 0xFF, 0xFF, 0x88, 0x07, 0x01,
1293 0x80, 0xFF, 0x88, 0x07, 0x01, 0x80),
1294 PHYREGS(0xE603, 0xE203, 0xDE03, 0x1B04, 0x1F04, 0x2404),
1295 },
1296};
1297
1298void b2055_upload_inittab(struct b43_wldev *dev,
1299 bool ghz5, bool ignore_uploadflag)
1300{
1301 const struct b2055_inittab_entry *e;
1302 unsigned int i;
1303 u16 value;
1304
1305 for (i = 0; i < ARRAY_SIZE(b2055_inittab); i++) {
1306 e = &(b2055_inittab[i]);
1307 if (!(e->flags & B2055_INITTAB_ENTRY_OK))
1308 continue;
1309 if ((e->flags & B2055_INITTAB_UPLOAD) || ignore_uploadflag) {
1310 if (ghz5)
1311 value = e->ghz5;
1312 else
1313 value = e->ghz2;
1314 b43_radio_write16(dev, i, value);
1315 }
1316 }
1317}
1318
1319const struct b43_nphy_channeltab_entry_rev2 *
1320b43_nphy_get_chantabent_rev2(struct b43_wldev *dev, u8 channel)
1321{
1322 const struct b43_nphy_channeltab_entry_rev2 *e;
1323 unsigned int i;
1324
1325 for (i = 0; i < ARRAY_SIZE(b43_nphy_channeltab_rev2); i++) {
1326 e = &(b43_nphy_channeltab_rev2[i]);
1327 if (e->channel == channel)
1328 return e;
1329 }
1330
1331 return NULL;
1332}
diff --git a/drivers/net/wireless/b43/radio_2055.h b/drivers/net/wireless/b43/radio_2055.h
new file mode 100644
index 000000000000..d9bfa0f21b72
--- /dev/null
+++ b/drivers/net/wireless/b43/radio_2055.h
@@ -0,0 +1,254 @@
1#ifndef B43_RADIO_2055_H_
2#define B43_RADIO_2055_H_
3
4#include <linux/types.h>
5
6#include "tables_nphy.h"
7
8#define B2055_GEN_SPARE 0x00 /* GEN spare */
9#define B2055_SP_PINPD 0x02 /* SP PIN PD */
10#define B2055_C1_SP_RSSI 0x03 /* SP RSSI Core 1 */
11#define B2055_C1_SP_PDMISC 0x04 /* SP PD MISC Core 1 */
12#define B2055_C2_SP_RSSI 0x05 /* SP RSSI Core 2 */
13#define B2055_C2_SP_PDMISC 0x06 /* SP PD MISC Core 2 */
14#define B2055_C1_SP_RXGC1 0x07 /* SP RX GC1 Core 1 */
15#define B2055_C1_SP_RXGC2 0x08 /* SP RX GC2 Core 1 */
16#define B2055_C2_SP_RXGC1 0x09 /* SP RX GC1 Core 2 */
17#define B2055_C2_SP_RXGC2 0x0A /* SP RX GC2 Core 2 */
18#define B2055_C1_SP_LPFBWSEL 0x0B /* SP LPF BW select Core 1 */
19#define B2055_C2_SP_LPFBWSEL 0x0C /* SP LPF BW select Core 2 */
20#define B2055_C1_SP_TXGC1 0x0D /* SP TX GC1 Core 1 */
21#define B2055_C1_SP_TXGC2 0x0E /* SP TX GC2 Core 1 */
22#define B2055_C2_SP_TXGC1 0x0F /* SP TX GC1 Core 2 */
23#define B2055_C2_SP_TXGC2 0x10 /* SP TX GC2 Core 2 */
24#define B2055_MASTER1 0x11 /* Master control 1 */
25#define B2055_MASTER2 0x12 /* Master control 2 */
26#define B2055_PD_LGEN 0x13 /* PD LGEN */
27#define B2055_PD_PLLTS 0x14 /* PD PLL TS */
28#define B2055_C1_PD_LGBUF 0x15 /* PD Core 1 LGBUF */
29#define B2055_C1_PD_TX 0x16 /* PD Core 1 TX */
30#define B2055_C1_PD_RXTX 0x17 /* PD Core 1 RXTX */
31#define B2055_C1_PD_RSSIMISC 0x18 /* PD Core 1 RSSI MISC */
32#define B2055_C2_PD_LGBUF 0x19 /* PD Core 2 LGBUF */
33#define B2055_C2_PD_TX 0x1A /* PD Core 2 TX */
34#define B2055_C2_PD_RXTX 0x1B /* PD Core 2 RXTX */
35#define B2055_C2_PD_RSSIMISC 0x1C /* PD Core 2 RSSI MISC */
36#define B2055_PWRDET_LGEN 0x1D /* PWRDET LGEN */
37#define B2055_C1_PWRDET_LGBUF 0x1E /* PWRDET LGBUF Core 1 */
38#define B2055_C1_PWRDET_RXTX 0x1F /* PWRDET RXTX Core 1 */
39#define B2055_C2_PWRDET_LGBUF 0x20 /* PWRDET LGBUF Core 2 */
40#define B2055_C2_PWRDET_RXTX 0x21 /* PWRDET RXTX Core 2 */
41#define B2055_RRCCAL_CS 0x22 /* RRCCAL Control spare */
42#define B2055_RRCCAL_NOPTSEL 0x23 /* RRCCAL N OPT SEL */
43#define B2055_CAL_MISC 0x24 /* CAL MISC */
44#define B2055_CAL_COUT 0x25 /* CAL Counter out */
45#define B2055_CAL_COUT2 0x26 /* CAL Counter out 2 */
46#define B2055_CAL_CVARCTL 0x27 /* CAL CVAR Control */
47#define B2055_CAL_RVARCTL 0x28 /* CAL RVAR Control */
48#define B2055_CAL_LPOCTL 0x29 /* CAL LPO Control */
49#define B2055_CAL_TS 0x2A /* CAL TS */
50#define B2055_CAL_RCCALRTS 0x2B /* CAL RCCAL READ TS */
51#define B2055_CAL_RCALRTS 0x2C /* CAL RCAL READ TS */
52#define B2055_PADDRV 0x2D /* PAD driver */
53#define B2055_XOCTL1 0x2E /* XO Control 1 */
54#define B2055_XOCTL2 0x2F /* XO Control 2 */
55#define B2055_XOREGUL 0x30 /* XO Regulator */
56#define B2055_XOMISC 0x31 /* XO misc */
57#define B2055_PLL_LFC1 0x32 /* PLL LF C1 */
58#define B2055_PLL_CALVTH 0x33 /* PLL CAL VTH */
59#define B2055_PLL_LFC2 0x34 /* PLL LF C2 */
60#define B2055_PLL_REF 0x35 /* PLL reference */
61#define B2055_PLL_LFR1 0x36 /* PLL LF R1 */
62#define B2055_PLL_PFDCP 0x37 /* PLL PFD CP */
63#define B2055_PLL_IDAC_CPOPAMP 0x38 /* PLL IDAC CPOPAMP */
64#define B2055_PLL_CPREG 0x39 /* PLL CP Regulator */
65#define B2055_PLL_RCAL 0x3A /* PLL RCAL */
66#define B2055_RF_PLLMOD0 0x3B /* RF PLL MOD0 */
67#define B2055_RF_PLLMOD1 0x3C /* RF PLL MOD1 */
68#define B2055_RF_MMDIDAC1 0x3D /* RF MMD IDAC 1 */
69#define B2055_RF_MMDIDAC0 0x3E /* RF MMD IDAC 0 */
70#define B2055_RF_MMDSP 0x3F /* RF MMD spare */
71#define B2055_VCO_CAL1 0x40 /* VCO cal 1 */
72#define B2055_VCO_CAL2 0x41 /* VCO cal 2 */
73#define B2055_VCO_CAL3 0x42 /* VCO cal 3 */
74#define B2055_VCO_CAL4 0x43 /* VCO cal 4 */
75#define B2055_VCO_CAL5 0x44 /* VCO cal 5 */
76#define B2055_VCO_CAL6 0x45 /* VCO cal 6 */
77#define B2055_VCO_CAL7 0x46 /* VCO cal 7 */
78#define B2055_VCO_CAL8 0x47 /* VCO cal 8 */
79#define B2055_VCO_CAL9 0x48 /* VCO cal 9 */
80#define B2055_VCO_CAL10 0x49 /* VCO cal 10 */
81#define B2055_VCO_CAL11 0x4A /* VCO cal 11 */
82#define B2055_VCO_CAL12 0x4B /* VCO cal 12 */
83#define B2055_VCO_CAL13 0x4C /* VCO cal 13 */
84#define B2055_VCO_CAL14 0x4D /* VCO cal 14 */
85#define B2055_VCO_CAL15 0x4E /* VCO cal 15 */
86#define B2055_VCO_CAL16 0x4F /* VCO cal 16 */
87#define B2055_VCO_KVCO 0x50 /* VCO KVCO */
88#define B2055_VCO_CAPTAIL 0x51 /* VCO CAP TAIL */
89#define B2055_VCO_IDACVCO 0x52 /* VCO IDAC VCO */
90#define B2055_VCO_REG 0x53 /* VCO Regulator */
91#define B2055_PLL_RFVTH 0x54 /* PLL RF VTH */
92#define B2055_LGBUF_CENBUF 0x55 /* LGBUF CEN BUF */
93#define B2055_LGEN_TUNE1 0x56 /* LGEN tune 1 */
94#define B2055_LGEN_TUNE2 0x57 /* LGEN tune 2 */
95#define B2055_LGEN_IDAC1 0x58 /* LGEN IDAC 1 */
96#define B2055_LGEN_IDAC2 0x59 /* LGEN IDAC 2 */
97#define B2055_LGEN_BIASC 0x5A /* LGEN BIAS counter */
98#define B2055_LGEN_BIASIDAC 0x5B /* LGEN BIAS IDAC */
99#define B2055_LGEN_RCAL 0x5C /* LGEN RCAL */
100#define B2055_LGEN_DIV 0x5D /* LGEN div */
101#define B2055_LGEN_SPARE2 0x5E /* LGEN spare 2 */
102#define B2055_C1_LGBUF_ATUNE 0x5F /* Core 1 LGBUF A tune */
103#define B2055_C1_LGBUF_GTUNE 0x60 /* Core 1 LGBUF G tune */
104#define B2055_C1_LGBUF_DIV 0x61 /* Core 1 LGBUF div */
105#define B2055_C1_LGBUF_AIDAC 0x62 /* Core 1 LGBUF A IDAC */
106#define B2055_C1_LGBUF_GIDAC 0x63 /* Core 1 LGBUF G IDAC */
107#define B2055_C1_LGBUF_IDACFO 0x64 /* Core 1 LGBUF IDAC filter override */
108#define B2055_C1_LGBUF_SPARE 0x65 /* Core 1 LGBUF spare */
109#define B2055_C1_RX_RFSPC1 0x66 /* Core 1 RX RF SPC1 */
110#define B2055_C1_RX_RFR1 0x67 /* Core 1 RX RF reg 1 */
111#define B2055_C1_RX_RFR2 0x68 /* Core 1 RX RF reg 2 */
112#define B2055_C1_RX_RFRCAL 0x69 /* Core 1 RX RF RCAL */
113#define B2055_C1_RX_BB_BLCMP 0x6A /* Core 1 RX Baseband BUFI LPF CMP */
114#define B2055_C1_RX_BB_LPF 0x6B /* Core 1 RX Baseband LPF */
115#define B2055_C1_RX_BB_MIDACHP 0x6C /* Core 1 RX Baseband MIDAC High-pass */
116#define B2055_C1_RX_BB_VGA1IDAC 0x6D /* Core 1 RX Baseband VGA1 IDAC */
117#define B2055_C1_RX_BB_VGA2IDAC 0x6E /* Core 1 RX Baseband VGA2 IDAC */
118#define B2055_C1_RX_BB_VGA3IDAC 0x6F /* Core 1 RX Baseband VGA3 IDAC */
119#define B2055_C1_RX_BB_BUFOCTL 0x70 /* Core 1 RX Baseband BUFO Control */
120#define B2055_C1_RX_BB_RCCALCTL 0x71 /* Core 1 RX Baseband RCCAL Control */
121#define B2055_C1_RX_BB_RSSICTL1 0x72 /* Core 1 RX Baseband RSSI Control 1 */
122#define B2055_C1_RX_BB_RSSICTL2 0x73 /* Core 1 RX Baseband RSSI Control 2 */
123#define B2055_C1_RX_BB_RSSICTL3 0x74 /* Core 1 RX Baseband RSSI Control 3 */
124#define B2055_C1_RX_BB_RSSICTL4 0x75 /* Core 1 RX Baseband RSSI Control 4 */
125#define B2055_C1_RX_BB_RSSICTL5 0x76 /* Core 1 RX Baseband RSSI Control 5 */
126#define B2055_C1_RX_BB_REG 0x77 /* Core 1 RX Baseband Regulator */
127#define B2055_C1_RX_BB_SPARE1 0x78 /* Core 1 RX Baseband spare 1 */
128#define B2055_C1_RX_TXBBRCAL 0x79 /* Core 1 RX TX BB RCAL */
129#define B2055_C1_TX_RF_SPGA 0x7A /* Core 1 TX RF SGM PGA */
130#define B2055_C1_TX_RF_SPAD 0x7B /* Core 1 TX RF SGM PAD */
131#define B2055_C1_TX_RF_CNTPGA1 0x7C /* Core 1 TX RF counter PGA 1 */
132#define B2055_C1_TX_RF_CNTPAD1 0x7D /* Core 1 TX RF counter PAD 1 */
133#define B2055_C1_TX_RF_PGAIDAC 0x7E /* Core 1 TX RF PGA IDAC */
134#define B2055_C1_TX_PGAPADTN 0x7F /* Core 1 TX PGA PAD TN */
135#define B2055_C1_TX_PADIDAC1 0x80 /* Core 1 TX PAD IDAC 1 */
136#define B2055_C1_TX_PADIDAC2 0x81 /* Core 1 TX PAD IDAC 2 */
137#define B2055_C1_TX_MXBGTRIM 0x82 /* Core 1 TX MX B/G TRIM */
138#define B2055_C1_TX_RF_RCAL 0x83 /* Core 1 TX RF RCAL */
139#define B2055_C1_TX_RF_PADTSSI1 0x84 /* Core 1 TX RF PAD TSSI1 */
140#define B2055_C1_TX_RF_PADTSSI2 0x85 /* Core 1 TX RF PAD TSSI2 */
141#define B2055_C1_TX_RF_SPARE 0x86 /* Core 1 TX RF spare */
142#define B2055_C1_TX_RF_IQCAL1 0x87 /* Core 1 TX RF I/Q CAL 1 */
143#define B2055_C1_TX_RF_IQCAL2 0x88 /* Core 1 TX RF I/Q CAL 2 */
144#define B2055_C1_TXBB_RCCAL 0x89 /* Core 1 TXBB RC CAL Control */
145#define B2055_C1_TXBB_LPF1 0x8A /* Core 1 TXBB LPF 1 */
146#define B2055_C1_TX_VOSCNCL 0x8B /* Core 1 TX VOS CNCL */
147#define B2055_C1_TX_LPF_MXGMIDAC 0x8C /* Core 1 TX LPF MXGM IDAC */
148#define B2055_C1_TX_BB_MXGM 0x8D /* Core 1 TX BB MXGM */
149#define B2055_C2_LGBUF_ATUNE 0x8E /* Core 2 LGBUF A tune */
150#define B2055_C2_LGBUF_GTUNE 0x8F /* Core 2 LGBUF G tune */
151#define B2055_C2_LGBUF_DIV 0x90 /* Core 2 LGBUF div */
152#define B2055_C2_LGBUF_AIDAC 0x91 /* Core 2 LGBUF A IDAC */
153#define B2055_C2_LGBUF_GIDAC 0x92 /* Core 2 LGBUF G IDAC */
154#define B2055_C2_LGBUF_IDACFO 0x93 /* Core 2 LGBUF IDAC filter override */
155#define B2055_C2_LGBUF_SPARE 0x94 /* Core 2 LGBUF spare */
156#define B2055_C2_RX_RFSPC1 0x95 /* Core 2 RX RF SPC1 */
157#define B2055_C2_RX_RFR1 0x96 /* Core 2 RX RF reg 1 */
158#define B2055_C2_RX_RFR2 0x97 /* Core 2 RX RF reg 2 */
159#define B2055_C2_RX_RFRCAL 0x98 /* Core 2 RX RF RCAL */
160#define B2055_C2_RX_BB_BLCMP 0x99 /* Core 2 RX Baseband BUFI LPF CMP */
161#define B2055_C2_RX_BB_LPF 0x9A /* Core 2 RX Baseband LPF */
162#define B2055_C2_RX_BB_MIDACHP 0x9B /* Core 2 RX Baseband MIDAC High-pass */
163#define B2055_C2_RX_BB_VGA1IDAC 0x9C /* Core 2 RX Baseband VGA1 IDAC */
164#define B2055_C2_RX_BB_VGA2IDAC 0x9D /* Core 2 RX Baseband VGA2 IDAC */
165#define B2055_C2_RX_BB_VGA3IDAC 0x9E /* Core 2 RX Baseband VGA3 IDAC */
166#define B2055_C2_RX_BB_BUFOCTL 0x9F /* Core 2 RX Baseband BUFO Control */
167#define B2055_C2_RX_BB_RCCALCTL 0xA0 /* Core 2 RX Baseband RCCAL Control */
168#define B2055_C2_RX_BB_RSSICTL1 0xA1 /* Core 2 RX Baseband RSSI Control 1 */
169#define B2055_C2_RX_BB_RSSICTL2 0xA2 /* Core 2 RX Baseband RSSI Control 2 */
170#define B2055_C2_RX_BB_RSSICTL3 0xA3 /* Core 2 RX Baseband RSSI Control 3 */
171#define B2055_C2_RX_BB_RSSICTL4 0xA4 /* Core 2 RX Baseband RSSI Control 4 */
172#define B2055_C2_RX_BB_RSSICTL5 0xA5 /* Core 2 RX Baseband RSSI Control 5 */
173#define B2055_C2_RX_BB_REG 0xA6 /* Core 2 RX Baseband Regulator */
174#define B2055_C2_RX_BB_SPARE1 0xA7 /* Core 2 RX Baseband spare 1 */
175#define B2055_C2_RX_TXBBRCAL 0xA8 /* Core 2 RX TX BB RCAL */
176#define B2055_C2_TX_RF_SPGA 0xA9 /* Core 2 TX RF SGM PGA */
177#define B2055_C2_TX_RF_SPAD 0xAA /* Core 2 TX RF SGM PAD */
178#define B2055_C2_TX_RF_CNTPGA1 0xAB /* Core 2 TX RF counter PGA 1 */
179#define B2055_C2_TX_RF_CNTPAD1 0xAC /* Core 2 TX RF counter PAD 1 */
180#define B2055_C2_TX_RF_PGAIDAC 0xAD /* Core 2 TX RF PGA IDAC */
181#define B2055_C2_TX_PGAPADTN 0xAE /* Core 2 TX PGA PAD TN */
182#define B2055_C2_TX_PADIDAC1 0xAF /* Core 2 TX PAD IDAC 1 */
183#define B2055_C2_TX_PADIDAC2 0xB0 /* Core 2 TX PAD IDAC 2 */
184#define B2055_C2_TX_MXBGTRIM 0xB1 /* Core 2 TX MX B/G TRIM */
185#define B2055_C2_TX_RF_RCAL 0xB2 /* Core 2 TX RF RCAL */
186#define B2055_C2_TX_RF_PADTSSI1 0xB3 /* Core 2 TX RF PAD TSSI1 */
187#define B2055_C2_TX_RF_PADTSSI2 0xB4 /* Core 2 TX RF PAD TSSI2 */
188#define B2055_C2_TX_RF_SPARE 0xB5 /* Core 2 TX RF spare */
189#define B2055_C2_TX_RF_IQCAL1 0xB6 /* Core 2 TX RF I/Q CAL 1 */
190#define B2055_C2_TX_RF_IQCAL2 0xB7 /* Core 2 TX RF I/Q CAL 2 */
191#define B2055_C2_TXBB_RCCAL 0xB8 /* Core 2 TXBB RC CAL Control */
192#define B2055_C2_TXBB_LPF1 0xB9 /* Core 2 TXBB LPF 1 */
193#define B2055_C2_TX_VOSCNCL 0xBA /* Core 2 TX VOS CNCL */
194#define B2055_C2_TX_LPF_MXGMIDAC 0xBB /* Core 2 TX LPF MXGM IDAC */
195#define B2055_C2_TX_BB_MXGM 0xBC /* Core 2 TX BB MXGM */
196#define B2055_PRG_GCHP21 0xBD /* PRG GC HPVGA23 21 */
197#define B2055_PRG_GCHP22 0xBE /* PRG GC HPVGA23 22 */
198#define B2055_PRG_GCHP23 0xBF /* PRG GC HPVGA23 23 */
199#define B2055_PRG_GCHP24 0xC0 /* PRG GC HPVGA23 24 */
200#define B2055_PRG_GCHP25 0xC1 /* PRG GC HPVGA23 25 */
201#define B2055_PRG_GCHP26 0xC2 /* PRG GC HPVGA23 26 */
202#define B2055_PRG_GCHP27 0xC3 /* PRG GC HPVGA23 27 */
203#define B2055_PRG_GCHP28 0xC4 /* PRG GC HPVGA23 28 */
204#define B2055_PRG_GCHP29 0xC5 /* PRG GC HPVGA23 29 */
205#define B2055_PRG_GCHP30 0xC6 /* PRG GC HPVGA23 30 */
206#define B2055_C1_LNA_GAINBST 0xCD /* Core 1 LNA GAINBST */
207#define B2055_C1_B0NB_RSSIVCM 0xD2 /* Core 1 B0 narrow-band RSSI VCM */
208#define B2055_C1_GENSPARE2 0xD6 /* Core 1 GEN spare 2 */
209#define B2055_C2_LNA_GAINBST 0xD9 /* Core 2 LNA GAINBST */
210#define B2055_C2_B0NB_RSSIVCM 0xDE /* Core 2 B0 narrow-band RSSI VCM */
211#define B2055_C2_GENSPARE2 0xE2 /* Core 2 GEN spare 2 */
212
213struct b43_nphy_channeltab_entry_rev2 {
214 /* The channel number */
215 u8 channel;
216 /* The channel frequency in MHz */
217 u16 freq;
218 /* An unknown value */
219 u16 unk2;
220 /* Radio register values on channelswitch */
221 u8 radio_pll_ref;
222 u8 radio_rf_pllmod0;
223 u8 radio_rf_pllmod1;
224 u8 radio_vco_captail;
225 u8 radio_vco_cal1;
226 u8 radio_vco_cal2;
227 u8 radio_pll_lfc1;
228 u8 radio_pll_lfr1;
229 u8 radio_pll_lfc2;
230 u8 radio_lgbuf_cenbuf;
231 u8 radio_lgen_tune1;
232 u8 radio_lgen_tune2;
233 u8 radio_c1_lgbuf_atune;
234 u8 radio_c1_lgbuf_gtune;
235 u8 radio_c1_rx_rfr1;
236 u8 radio_c1_tx_pgapadtn;
237 u8 radio_c1_tx_mxbgtrim;
238 u8 radio_c2_lgbuf_atune;
239 u8 radio_c2_lgbuf_gtune;
240 u8 radio_c2_rx_rfr1;
241 u8 radio_c2_tx_pgapadtn;
242 u8 radio_c2_tx_mxbgtrim;
243 /* PHY register values on channelswitch */
244 struct b43_phy_n_sfo_cfg phy_regs;
245};
246
247/* Upload the default register value table.
248 * If "ghz5" is true, we upload the 5Ghz table. Otherwise the 2.4Ghz
249 * table is uploaded. If "ignore_uploadflag" is true, we upload any value
250 * and ignore the "UPLOAD" flag. */
251void b2055_upload_inittab(struct b43_wldev *dev,
252 bool ghz5, bool ignore_uploadflag);
253
254#endif /* B43_RADIO_2055_H_ */
diff --git a/drivers/net/wireless/b43/radio_2056.c b/drivers/net/wireless/b43/radio_2056.c
new file mode 100644
index 000000000000..d8563192ce56
--- /dev/null
+++ b/drivers/net/wireless/b43/radio_2056.c
@@ -0,0 +1,43 @@
1/*
2
3 Broadcom B43 wireless driver
4 IEEE 802.11n 2056 radio device data tables
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program; see the file COPYING. If not, write to
18 the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor,
19 Boston, MA 02110-1301, USA.
20
21*/
22
23#include "b43.h"
24#include "radio_2056.h"
25#include "phy_common.h"
26
27static const struct b43_nphy_channeltab_entry_rev3 b43_nphy_channeltab_rev3[] = {
28};
29
30const struct b43_nphy_channeltab_entry_rev3 *
31b43_nphy_get_chantabent_rev3(struct b43_wldev *dev, u16 freq)
32{
33 const struct b43_nphy_channeltab_entry_rev3 *e;
34 unsigned int i;
35
36 for (i = 0; i < ARRAY_SIZE(b43_nphy_channeltab_rev3); i++) {
37 e = &(b43_nphy_channeltab_rev3[i]);
38 if (e->freq == freq)
39 return e;
40 }
41
42 return NULL;
43}
diff --git a/drivers/net/wireless/b43/radio_2056.h b/drivers/net/wireless/b43/radio_2056.h
new file mode 100644
index 000000000000..fda6dafecb8c
--- /dev/null
+++ b/drivers/net/wireless/b43/radio_2056.h
@@ -0,0 +1,42 @@
1/*
2
3 Broadcom B43 wireless driver
4
5 Copyright (c) 2010 Rafał Miłecki <zajec5@gmail.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 2 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; see the file COPYING. If not, write to
19 the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor,
20 Boston, MA 02110-1301, USA.
21
22*/
23
24#ifndef B43_RADIO_2056_H_
25#define B43_RADIO_2056_H_
26
27#include <linux/types.h>
28
29#include "tables_nphy.h"
30
31struct b43_nphy_channeltab_entry_rev3 {
32 /* The channel number */
33 u8 channel;
34 /* The channel frequency in MHz */
35 u16 freq;
36 /* Radio register values on channelswitch */
37 /* TODO */
38 /* PHY register values on channelswitch */
39 struct b43_phy_n_sfo_cfg phy_regs;
40};
41
42#endif /* B43_RADIO_2056_H_ */
diff --git a/drivers/net/wireless/b43/tables_nphy.c b/drivers/net/wireless/b43/tables_nphy.c
index d96e870ab8fe..d60db078eae2 100644
--- a/drivers/net/wireless/b43/tables_nphy.c
+++ b/drivers/net/wireless/b43/tables_nphy.c
@@ -1,7 +1,7 @@
1/* 1/*
2 2
3 Broadcom B43 wireless driver 3 Broadcom B43 wireless driver
4 IEEE 802.11n PHY and radio device data tables 4 IEEE 802.11n PHY data tables
5 5
6 Copyright (c) 2008 Michael Buesch <mb@bu3sch.de> 6 Copyright (c) 2008 Michael Buesch <mb@bu3sch.de>
7 7
@@ -27,1315 +27,6 @@
27#include "phy_common.h" 27#include "phy_common.h"
28#include "phy_n.h" 28#include "phy_n.h"
29 29
30
31struct b2055_inittab_entry {
32 /* Value to write if we use the 5GHz band. */
33 u16 ghz5;
34 /* Value to write if we use the 2.4GHz band. */
35 u16 ghz2;
36 /* Flags */
37 u8 flags;
38#define B2055_INITTAB_ENTRY_OK 0x01
39#define B2055_INITTAB_UPLOAD 0x02
40};
41#define UPLOAD .flags = B2055_INITTAB_ENTRY_OK | B2055_INITTAB_UPLOAD
42#define NOUPLOAD .flags = B2055_INITTAB_ENTRY_OK
43
44static const struct b2055_inittab_entry b2055_inittab [] = {
45 [B2055_SP_PINPD] = { .ghz5 = 0x0080, .ghz2 = 0x0080, NOUPLOAD, },
46 [B2055_C1_SP_RSSI] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
47 [B2055_C1_SP_PDMISC] = { .ghz5 = 0x0027, .ghz2 = 0x0027, NOUPLOAD, },
48 [B2055_C2_SP_RSSI] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
49 [B2055_C2_SP_PDMISC] = { .ghz5 = 0x0027, .ghz2 = 0x0027, NOUPLOAD, },
50 [B2055_C1_SP_RXGC1] = { .ghz5 = 0x007F, .ghz2 = 0x007F, UPLOAD, },
51 [B2055_C1_SP_RXGC2] = { .ghz5 = 0x0007, .ghz2 = 0x0007, UPLOAD, },
52 [B2055_C2_SP_RXGC1] = { .ghz5 = 0x007F, .ghz2 = 0x007F, UPLOAD, },
53 [B2055_C2_SP_RXGC2] = { .ghz5 = 0x0007, .ghz2 = 0x0007, UPLOAD, },
54 [B2055_C1_SP_LPFBWSEL] = { .ghz5 = 0x0015, .ghz2 = 0x0015, NOUPLOAD, },
55 [B2055_C2_SP_LPFBWSEL] = { .ghz5 = 0x0015, .ghz2 = 0x0015, NOUPLOAD, },
56 [B2055_C1_SP_TXGC1] = { .ghz5 = 0x004F, .ghz2 = 0x004F, UPLOAD, },
57 [B2055_C1_SP_TXGC2] = { .ghz5 = 0x0005, .ghz2 = 0x0005, UPLOAD, },
58 [B2055_C2_SP_TXGC1] = { .ghz5 = 0x004F, .ghz2 = 0x004F, UPLOAD, },
59 [B2055_C2_SP_TXGC2] = { .ghz5 = 0x0005, .ghz2 = 0x0005, UPLOAD, },
60 [B2055_MASTER1] = { .ghz5 = 0x00D0, .ghz2 = 0x00D0, NOUPLOAD, },
61 [B2055_MASTER2] = { .ghz5 = 0x0002, .ghz2 = 0x0002, NOUPLOAD, },
62 [B2055_PD_LGEN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
63 [B2055_PD_PLLTS] = { .ghz5 = 0x0040, .ghz2 = 0x0040, NOUPLOAD, },
64 [B2055_C1_PD_LGBUF] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
65 [B2055_C1_PD_TX] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
66 [B2055_C1_PD_RXTX] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
67 [B2055_C1_PD_RSSIMISC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
68 [B2055_C2_PD_LGBUF] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
69 [B2055_C2_PD_TX] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
70 [B2055_C2_PD_RXTX] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
71 [B2055_C2_PD_RSSIMISC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
72 [B2055_PWRDET_LGEN] = { .ghz5 = 0x00C0, .ghz2 = 0x00C0, NOUPLOAD, },
73 [B2055_C1_PWRDET_LGBUF] = { .ghz5 = 0x00FF, .ghz2 = 0x00FF, NOUPLOAD, },
74 [B2055_C1_PWRDET_RXTX] = { .ghz5 = 0x00C0, .ghz2 = 0x00C0, NOUPLOAD, },
75 [B2055_C2_PWRDET_LGBUF] = { .ghz5 = 0x00FF, .ghz2 = 0x00FF, NOUPLOAD, },
76 [B2055_C2_PWRDET_RXTX] = { .ghz5 = 0x00C0, .ghz2 = 0x00C0, NOUPLOAD, },
77 [B2055_RRCCAL_CS] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
78 [B2055_RRCCAL_NOPTSEL] = { .ghz5 = 0x002C, .ghz2 = 0x002C, NOUPLOAD, },
79 [B2055_CAL_MISC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
80 [B2055_CAL_COUT] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
81 [B2055_CAL_COUT2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
82 [B2055_CAL_CVARCTL] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
83 [B2055_CAL_RVARCTL] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
84 [B2055_CAL_LPOCTL] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
85 [B2055_CAL_TS] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
86 [B2055_CAL_RCCALRTS] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
87 [B2055_CAL_RCALRTS] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
88 [B2055_PADDRV] = { .ghz5 = 0x00A4, .ghz2 = 0x00A4, NOUPLOAD, },
89 [B2055_XOCTL1] = { .ghz5 = 0x0038, .ghz2 = 0x0038, NOUPLOAD, },
90 [B2055_XOCTL2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
91 [B2055_XOREGUL] = { .ghz5 = 0x0004, .ghz2 = 0x0004, UPLOAD, },
92 [B2055_XOMISC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
93 [B2055_PLL_LFC1] = { .ghz5 = 0x000A, .ghz2 = 0x000A, NOUPLOAD, },
94 [B2055_PLL_CALVTH] = { .ghz5 = 0x0087, .ghz2 = 0x0087, NOUPLOAD, },
95 [B2055_PLL_LFC2] = { .ghz5 = 0x0009, .ghz2 = 0x0009, NOUPLOAD, },
96 [B2055_PLL_REF] = { .ghz5 = 0x0070, .ghz2 = 0x0070, NOUPLOAD, },
97 [B2055_PLL_LFR1] = { .ghz5 = 0x0011, .ghz2 = 0x0011, NOUPLOAD, },
98 [B2055_PLL_PFDCP] = { .ghz5 = 0x0018, .ghz2 = 0x0018, UPLOAD, },
99 [B2055_PLL_IDAC_CPOPAMP] = { .ghz5 = 0x0006, .ghz2 = 0x0006, NOUPLOAD, },
100 [B2055_PLL_CPREG] = { .ghz5 = 0x0004, .ghz2 = 0x0004, UPLOAD, },
101 [B2055_PLL_RCAL] = { .ghz5 = 0x0006, .ghz2 = 0x0006, NOUPLOAD, },
102 [B2055_RF_PLLMOD0] = { .ghz5 = 0x009E, .ghz2 = 0x009E, NOUPLOAD, },
103 [B2055_RF_PLLMOD1] = { .ghz5 = 0x0009, .ghz2 = 0x0009, NOUPLOAD, },
104 [B2055_RF_MMDIDAC1] = { .ghz5 = 0x00C8, .ghz2 = 0x00C8, UPLOAD, },
105 [B2055_RF_MMDIDAC0] = { .ghz5 = 0x0088, .ghz2 = 0x0088, NOUPLOAD, },
106 [B2055_RF_MMDSP] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
107 [B2055_VCO_CAL1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
108 [B2055_VCO_CAL2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
109 [B2055_VCO_CAL3] = { .ghz5 = 0x0001, .ghz2 = 0x0001, NOUPLOAD, },
110 [B2055_VCO_CAL4] = { .ghz5 = 0x0002, .ghz2 = 0x0002, NOUPLOAD, },
111 [B2055_VCO_CAL5] = { .ghz5 = 0x0096, .ghz2 = 0x0096, NOUPLOAD, },
112 [B2055_VCO_CAL6] = { .ghz5 = 0x003E, .ghz2 = 0x003E, NOUPLOAD, },
113 [B2055_VCO_CAL7] = { .ghz5 = 0x003E, .ghz2 = 0x003E, NOUPLOAD, },
114 [B2055_VCO_CAL8] = { .ghz5 = 0x0013, .ghz2 = 0x0013, NOUPLOAD, },
115 [B2055_VCO_CAL9] = { .ghz5 = 0x0002, .ghz2 = 0x0002, NOUPLOAD, },
116 [B2055_VCO_CAL10] = { .ghz5 = 0x0015, .ghz2 = 0x0015, NOUPLOAD, },
117 [B2055_VCO_CAL11] = { .ghz5 = 0x0007, .ghz2 = 0x0007, NOUPLOAD, },
118 [B2055_VCO_CAL12] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
119 [B2055_VCO_CAL13] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
120 [B2055_VCO_CAL14] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
121 [B2055_VCO_CAL15] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
122 [B2055_VCO_CAL16] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
123 [B2055_VCO_KVCO] = { .ghz5 = 0x0008, .ghz2 = 0x0008, NOUPLOAD, },
124 [B2055_VCO_CAPTAIL] = { .ghz5 = 0x0008, .ghz2 = 0x0008, NOUPLOAD, },
125 [B2055_VCO_IDACVCO] = { .ghz5 = 0x0006, .ghz2 = 0x0006, NOUPLOAD, },
126 [B2055_VCO_REG] = { .ghz5 = 0x0084, .ghz2 = 0x0084, UPLOAD, },
127 [B2055_PLL_RFVTH] = { .ghz5 = 0x00C3, .ghz2 = 0x00C3, NOUPLOAD, },
128 [B2055_LGBUF_CENBUF] = { .ghz5 = 0x008F, .ghz2 = 0x008F, NOUPLOAD, },
129 [B2055_LGEN_TUNE1] = { .ghz5 = 0x00FF, .ghz2 = 0x00FF, NOUPLOAD, },
130 [B2055_LGEN_TUNE2] = { .ghz5 = 0x00FF, .ghz2 = 0x00FF, NOUPLOAD, },
131 [B2055_LGEN_IDAC1] = { .ghz5 = 0x0088, .ghz2 = 0x0088, NOUPLOAD, },
132 [B2055_LGEN_IDAC2] = { .ghz5 = 0x0088, .ghz2 = 0x0088, NOUPLOAD, },
133 [B2055_LGEN_BIASC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
134 [B2055_LGEN_BIASIDAC] = { .ghz5 = 0x00CC, .ghz2 = 0x00CC, NOUPLOAD, },
135 [B2055_LGEN_RCAL] = { .ghz5 = 0x0006, .ghz2 = 0x0006, NOUPLOAD, },
136 [B2055_LGEN_DIV] = { .ghz5 = 0x0080, .ghz2 = 0x0080, NOUPLOAD, },
137 [B2055_LGEN_SPARE2] = { .ghz5 = 0x0080, .ghz2 = 0x0080, NOUPLOAD, },
138 [B2055_C1_LGBUF_ATUNE] = { .ghz5 = 0x00F8, .ghz2 = 0x00F8, NOUPLOAD, },
139 [B2055_C1_LGBUF_GTUNE] = { .ghz5 = 0x0088, .ghz2 = 0x0088, NOUPLOAD, },
140 [B2055_C1_LGBUF_DIV] = { .ghz5 = 0x0088, .ghz2 = 0x0088, NOUPLOAD, },
141 [B2055_C1_LGBUF_AIDAC] = { .ghz5 = 0x0088, .ghz2 = 0x0008, UPLOAD, },
142 [B2055_C1_LGBUF_GIDAC] = { .ghz5 = 0x0088, .ghz2 = 0x0088, NOUPLOAD, },
143 [B2055_C1_LGBUF_IDACFO] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
144 [B2055_C1_LGBUF_SPARE] = { .ghz5 = 0x0001, .ghz2 = 0x0001, UPLOAD, },
145 [B2055_C1_RX_RFSPC1] = { .ghz5 = 0x008A, .ghz2 = 0x008A, NOUPLOAD, },
146 [B2055_C1_RX_RFR1] = { .ghz5 = 0x0008, .ghz2 = 0x0008, NOUPLOAD, },
147 [B2055_C1_RX_RFR2] = { .ghz5 = 0x0083, .ghz2 = 0x0083, NOUPLOAD, },
148 [B2055_C1_RX_RFRCAL] = { .ghz5 = 0x0006, .ghz2 = 0x0006, NOUPLOAD, },
149 [B2055_C1_RX_BB_BLCMP] = { .ghz5 = 0x00A0, .ghz2 = 0x00A0, NOUPLOAD, },
150 [B2055_C1_RX_BB_LPF] = { .ghz5 = 0x000A, .ghz2 = 0x000A, NOUPLOAD, },
151 [B2055_C1_RX_BB_MIDACHP] = { .ghz5 = 0x0087, .ghz2 = 0x0087, UPLOAD, },
152 [B2055_C1_RX_BB_VGA1IDAC] = { .ghz5 = 0x002A, .ghz2 = 0x002A, NOUPLOAD, },
153 [B2055_C1_RX_BB_VGA2IDAC] = { .ghz5 = 0x002A, .ghz2 = 0x002A, NOUPLOAD, },
154 [B2055_C1_RX_BB_VGA3IDAC] = { .ghz5 = 0x002A, .ghz2 = 0x002A, NOUPLOAD, },
155 [B2055_C1_RX_BB_BUFOCTL] = { .ghz5 = 0x002A, .ghz2 = 0x002A, NOUPLOAD, },
156 [B2055_C1_RX_BB_RCCALCTL] = { .ghz5 = 0x0018, .ghz2 = 0x0018, NOUPLOAD, },
157 [B2055_C1_RX_BB_RSSICTL1] = { .ghz5 = 0x006A, .ghz2 = 0x006A, UPLOAD, },
158 [B2055_C1_RX_BB_RSSICTL2] = { .ghz5 = 0x00AB, .ghz2 = 0x00AB, UPLOAD, },
159 [B2055_C1_RX_BB_RSSICTL3] = { .ghz5 = 0x0013, .ghz2 = 0x0013, UPLOAD, },
160 [B2055_C1_RX_BB_RSSICTL4] = { .ghz5 = 0x00C1, .ghz2 = 0x00C1, UPLOAD, },
161 [B2055_C1_RX_BB_RSSICTL5] = { .ghz5 = 0x00AA, .ghz2 = 0x00AA, UPLOAD, },
162 [B2055_C1_RX_BB_REG] = { .ghz5 = 0x0087, .ghz2 = 0x0087, UPLOAD, },
163 [B2055_C1_RX_BB_SPARE1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
164 [B2055_C1_RX_TXBBRCAL] = { .ghz5 = 0x0006, .ghz2 = 0x0006, NOUPLOAD, },
165 [B2055_C1_TX_RF_SPGA] = { .ghz5 = 0x0007, .ghz2 = 0x0007, NOUPLOAD, },
166 [B2055_C1_TX_RF_SPAD] = { .ghz5 = 0x0007, .ghz2 = 0x0007, NOUPLOAD, },
167 [B2055_C1_TX_RF_CNTPGA1] = { .ghz5 = 0x0015, .ghz2 = 0x0015, NOUPLOAD, },
168 [B2055_C1_TX_RF_CNTPAD1] = { .ghz5 = 0x0055, .ghz2 = 0x0055, NOUPLOAD, },
169 [B2055_C1_TX_RF_PGAIDAC] = { .ghz5 = 0x0097, .ghz2 = 0x0097, UPLOAD, },
170 [B2055_C1_TX_PGAPADTN] = { .ghz5 = 0x0008, .ghz2 = 0x0008, NOUPLOAD, },
171 [B2055_C1_TX_PADIDAC1] = { .ghz5 = 0x0014, .ghz2 = 0x0014, UPLOAD, },
172 [B2055_C1_TX_PADIDAC2] = { .ghz5 = 0x0033, .ghz2 = 0x0033, NOUPLOAD, },
173 [B2055_C1_TX_MXBGTRIM] = { .ghz5 = 0x0088, .ghz2 = 0x0088, NOUPLOAD, },
174 [B2055_C1_TX_RF_RCAL] = { .ghz5 = 0x0006, .ghz2 = 0x0006, NOUPLOAD, },
175 [B2055_C1_TX_RF_PADTSSI1] = { .ghz5 = 0x0003, .ghz2 = 0x0003, UPLOAD, },
176 [B2055_C1_TX_RF_PADTSSI2] = { .ghz5 = 0x000A, .ghz2 = 0x000A, NOUPLOAD, },
177 [B2055_C1_TX_RF_SPARE] = { .ghz5 = 0x0003, .ghz2 = 0x0003, UPLOAD, },
178 [B2055_C1_TX_RF_IQCAL1] = { .ghz5 = 0x002A, .ghz2 = 0x002A, NOUPLOAD, },
179 [B2055_C1_TX_RF_IQCAL2] = { .ghz5 = 0x00A4, .ghz2 = 0x00A4, NOUPLOAD, },
180 [B2055_C1_TXBB_RCCAL] = { .ghz5 = 0x0018, .ghz2 = 0x0018, NOUPLOAD, },
181 [B2055_C1_TXBB_LPF1] = { .ghz5 = 0x0028, .ghz2 = 0x0028, NOUPLOAD, },
182 [B2055_C1_TX_VOSCNCL] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
183 [B2055_C1_TX_LPF_MXGMIDAC] = { .ghz5 = 0x004A, .ghz2 = 0x004A, NOUPLOAD, },
184 [B2055_C1_TX_BB_MXGM] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
185 [B2055_C2_LGBUF_ATUNE] = { .ghz5 = 0x00F8, .ghz2 = 0x00F8, NOUPLOAD, },
186 [B2055_C2_LGBUF_GTUNE] = { .ghz5 = 0x0088, .ghz2 = 0x0088, NOUPLOAD, },
187 [B2055_C2_LGBUF_DIV] = { .ghz5 = 0x0088, .ghz2 = 0x0088, NOUPLOAD, },
188 [B2055_C2_LGBUF_AIDAC] = { .ghz5 = 0x0088, .ghz2 = 0x0008, UPLOAD, },
189 [B2055_C2_LGBUF_GIDAC] = { .ghz5 = 0x0088, .ghz2 = 0x0088, NOUPLOAD, },
190 [B2055_C2_LGBUF_IDACFO] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
191 [B2055_C2_LGBUF_SPARE] = { .ghz5 = 0x0001, .ghz2 = 0x0001, UPLOAD, },
192 [B2055_C2_RX_RFSPC1] = { .ghz5 = 0x008A, .ghz2 = 0x008A, NOUPLOAD, },
193 [B2055_C2_RX_RFR1] = { .ghz5 = 0x0008, .ghz2 = 0x0008, NOUPLOAD, },
194 [B2055_C2_RX_RFR2] = { .ghz5 = 0x0083, .ghz2 = 0x0083, NOUPLOAD, },
195 [B2055_C2_RX_RFRCAL] = { .ghz5 = 0x0006, .ghz2 = 0x0006, NOUPLOAD, },
196 [B2055_C2_RX_BB_BLCMP] = { .ghz5 = 0x00A0, .ghz2 = 0x00A0, NOUPLOAD, },
197 [B2055_C2_RX_BB_LPF] = { .ghz5 = 0x000A, .ghz2 = 0x000A, NOUPLOAD, },
198 [B2055_C2_RX_BB_MIDACHP] = { .ghz5 = 0x0087, .ghz2 = 0x0087, UPLOAD, },
199 [B2055_C2_RX_BB_VGA1IDAC] = { .ghz5 = 0x002A, .ghz2 = 0x002A, NOUPLOAD, },
200 [B2055_C2_RX_BB_VGA2IDAC] = { .ghz5 = 0x002A, .ghz2 = 0x002A, NOUPLOAD, },
201 [B2055_C2_RX_BB_VGA3IDAC] = { .ghz5 = 0x002A, .ghz2 = 0x002A, NOUPLOAD, },
202 [B2055_C2_RX_BB_BUFOCTL] = { .ghz5 = 0x002A, .ghz2 = 0x002A, NOUPLOAD, },
203 [B2055_C2_RX_BB_RCCALCTL] = { .ghz5 = 0x0018, .ghz2 = 0x0018, NOUPLOAD, },
204 [B2055_C2_RX_BB_RSSICTL1] = { .ghz5 = 0x006A, .ghz2 = 0x006A, UPLOAD, },
205 [B2055_C2_RX_BB_RSSICTL2] = { .ghz5 = 0x00AB, .ghz2 = 0x00AB, UPLOAD, },
206 [B2055_C2_RX_BB_RSSICTL3] = { .ghz5 = 0x0013, .ghz2 = 0x0013, UPLOAD, },
207 [B2055_C2_RX_BB_RSSICTL4] = { .ghz5 = 0x00C1, .ghz2 = 0x00C1, UPLOAD, },
208 [B2055_C2_RX_BB_RSSICTL5] = { .ghz5 = 0x00AA, .ghz2 = 0x00AA, UPLOAD, },
209 [B2055_C2_RX_BB_REG] = { .ghz5 = 0x0087, .ghz2 = 0x0087, UPLOAD, },
210 [B2055_C2_RX_BB_SPARE1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
211 [B2055_C2_RX_TXBBRCAL] = { .ghz5 = 0x0006, .ghz2 = 0x0006, NOUPLOAD, },
212 [B2055_C2_TX_RF_SPGA] = { .ghz5 = 0x0007, .ghz2 = 0x0007, NOUPLOAD, },
213 [B2055_C2_TX_RF_SPAD] = { .ghz5 = 0x0007, .ghz2 = 0x0007, NOUPLOAD, },
214 [B2055_C2_TX_RF_CNTPGA1] = { .ghz5 = 0x0015, .ghz2 = 0x0015, NOUPLOAD, },
215 [B2055_C2_TX_RF_CNTPAD1] = { .ghz5 = 0x0055, .ghz2 = 0x0055, NOUPLOAD, },
216 [B2055_C2_TX_RF_PGAIDAC] = { .ghz5 = 0x0097, .ghz2 = 0x0097, UPLOAD, },
217 [B2055_C2_TX_PGAPADTN] = { .ghz5 = 0x0008, .ghz2 = 0x0008, NOUPLOAD, },
218 [B2055_C2_TX_PADIDAC1] = { .ghz5 = 0x0014, .ghz2 = 0x0014, UPLOAD, },
219 [B2055_C2_TX_PADIDAC2] = { .ghz5 = 0x0033, .ghz2 = 0x0033, NOUPLOAD, },
220 [B2055_C2_TX_MXBGTRIM] = { .ghz5 = 0x0088, .ghz2 = 0x0088, NOUPLOAD, },
221 [B2055_C2_TX_RF_RCAL] = { .ghz5 = 0x0006, .ghz2 = 0x0006, NOUPLOAD, },
222 [B2055_C2_TX_RF_PADTSSI1] = { .ghz5 = 0x0003, .ghz2 = 0x0003, UPLOAD, },
223 [B2055_C2_TX_RF_PADTSSI2] = { .ghz5 = 0x000A, .ghz2 = 0x000A, NOUPLOAD, },
224 [B2055_C2_TX_RF_SPARE] = { .ghz5 = 0x0003, .ghz2 = 0x0003, UPLOAD, },
225 [B2055_C2_TX_RF_IQCAL1] = { .ghz5 = 0x002A, .ghz2 = 0x002A, NOUPLOAD, },
226 [B2055_C2_TX_RF_IQCAL2] = { .ghz5 = 0x00A4, .ghz2 = 0x00A4, NOUPLOAD, },
227 [B2055_C2_TXBB_RCCAL] = { .ghz5 = 0x0018, .ghz2 = 0x0018, NOUPLOAD, },
228 [B2055_C2_TXBB_LPF1] = { .ghz5 = 0x0028, .ghz2 = 0x0028, NOUPLOAD, },
229 [B2055_C2_TX_VOSCNCL] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
230 [B2055_C2_TX_LPF_MXGMIDAC] = { .ghz5 = 0x004A, .ghz2 = 0x004A, NOUPLOAD, },
231 [B2055_C2_TX_BB_MXGM] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
232 [B2055_PRG_GCHP21] = { .ghz5 = 0x0071, .ghz2 = 0x0071, NOUPLOAD, },
233 [B2055_PRG_GCHP22] = { .ghz5 = 0x0072, .ghz2 = 0x0072, NOUPLOAD, },
234 [B2055_PRG_GCHP23] = { .ghz5 = 0x0073, .ghz2 = 0x0073, NOUPLOAD, },
235 [B2055_PRG_GCHP24] = { .ghz5 = 0x0074, .ghz2 = 0x0074, NOUPLOAD, },
236 [B2055_PRG_GCHP25] = { .ghz5 = 0x0075, .ghz2 = 0x0075, NOUPLOAD, },
237 [B2055_PRG_GCHP26] = { .ghz5 = 0x0076, .ghz2 = 0x0076, NOUPLOAD, },
238 [B2055_PRG_GCHP27] = { .ghz5 = 0x0077, .ghz2 = 0x0077, NOUPLOAD, },
239 [B2055_PRG_GCHP28] = { .ghz5 = 0x0078, .ghz2 = 0x0078, NOUPLOAD, },
240 [B2055_PRG_GCHP29] = { .ghz5 = 0x0079, .ghz2 = 0x0079, NOUPLOAD, },
241 [B2055_PRG_GCHP30] = { .ghz5 = 0x007A, .ghz2 = 0x007A, NOUPLOAD, },
242 [0xC7] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
243 [0xC8] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
244 [0xC9] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
245 [0xCA] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
246 [0xCB] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
247 [0xCC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
248 [B2055_C1_LNA_GAINBST] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
249 [0xCE] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
250 [0xCF] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
251 [0xD0] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
252 [0xD1] = { .ghz5 = 0x0018, .ghz2 = 0x0018, NOUPLOAD, },
253 [B2055_C1_B0NB_RSSIVCM] = { .ghz5 = 0x0088, .ghz2 = 0x0088, NOUPLOAD, },
254 [0xD3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
255 [0xD4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
256 [0xD5] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
257 [B2055_C1_GENSPARE2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
258 [0xD7] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
259 [0xD8] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
260 [B2055_C2_LNA_GAINBST] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
261 [0xDA] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
262 [0xDB] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
263 [0xDC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
264 [0xDD] = { .ghz5 = 0x0018, .ghz2 = 0x0018, NOUPLOAD, },
265 [B2055_C2_B0NB_RSSIVCM] = { .ghz5 = 0x0088, .ghz2 = 0x0088, NOUPLOAD, },
266 [0xDF] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
267 [0xE0] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
268 [0xE1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
269 [B2055_C2_GENSPARE2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
270};
271
272
273void b2055_upload_inittab(struct b43_wldev *dev,
274 bool ghz5, bool ignore_uploadflag)
275{
276 const struct b2055_inittab_entry *e;
277 unsigned int i;
278 u16 value;
279
280 for (i = 0; i < ARRAY_SIZE(b2055_inittab); i++) {
281 e = &(b2055_inittab[i]);
282 if (!(e->flags & B2055_INITTAB_ENTRY_OK))
283 continue;
284 if ((e->flags & B2055_INITTAB_UPLOAD) || ignore_uploadflag) {
285 if (ghz5)
286 value = e->ghz5;
287 else
288 value = e->ghz2;
289 b43_radio_write16(dev, i, value);
290 }
291 }
292}
293
294
295#define RADIOREGS(r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11, \
296 r12, r13, r14, r15, r16, r17, r18, r19, r20, r21) \
297 .radio_pll_ref = r0, \
298 .radio_rf_pllmod0 = r1, \
299 .radio_rf_pllmod1 = r2, \
300 .radio_vco_captail = r3, \
301 .radio_vco_cal1 = r4, \
302 .radio_vco_cal2 = r5, \
303 .radio_pll_lfc1 = r6, \
304 .radio_pll_lfr1 = r7, \
305 .radio_pll_lfc2 = r8, \
306 .radio_lgbuf_cenbuf = r9, \
307 .radio_lgen_tune1 = r10, \
308 .radio_lgen_tune2 = r11, \
309 .radio_c1_lgbuf_atune = r12, \
310 .radio_c1_lgbuf_gtune = r13, \
311 .radio_c1_rx_rfr1 = r14, \
312 .radio_c1_tx_pgapadtn = r15, \
313 .radio_c1_tx_mxbgtrim = r16, \
314 .radio_c2_lgbuf_atune = r17, \
315 .radio_c2_lgbuf_gtune = r18, \
316 .radio_c2_rx_rfr1 = r19, \
317 .radio_c2_tx_pgapadtn = r20, \
318 .radio_c2_tx_mxbgtrim = r21
319
320#define PHYREGS(r0, r1, r2, r3, r4, r5) \
321 .phy_regs.phy_bw1a = r0, \
322 .phy_regs.phy_bw2 = r1, \
323 .phy_regs.phy_bw3 = r2, \
324 .phy_regs.phy_bw4 = r3, \
325 .phy_regs.phy_bw5 = r4, \
326 .phy_regs.phy_bw6 = r5
327
328static const struct b43_nphy_channeltab_entry_rev2 b43_nphy_channeltab[] = {
329 { .channel = 184,
330 .freq = 4920, /* MHz */
331 .unk2 = 3280,
332 RADIOREGS(0x71, 0x01, 0xEC, 0x0F, 0xFF, 0x01, 0x04, 0x0A,
333 0x00, 0x8F, 0xFF, 0xFF, 0xFF, 0x00, 0x0F, 0x0F,
334 0x8F, 0xFF, 0x00, 0x0F, 0x0F, 0x8F),
335 PHYREGS(0xB407, 0xB007, 0xAC07, 0x1402, 0x1502, 0x1602),
336 },
337 { .channel = 186,
338 .freq = 4930, /* MHz */
339 .unk2 = 3287,
340 RADIOREGS(0x71, 0x01, 0xED, 0x0F, 0xFF, 0x01, 0x04, 0x0A,
341 0x00, 0x8F, 0xFF, 0xFF, 0xFF, 0x00, 0x0F, 0x0F,
342 0x8F, 0xFF, 0x00, 0x0F, 0x0F, 0x8F),
343 PHYREGS(0xB807, 0xB407, 0xB007, 0x1302, 0x1402, 0x1502),
344 },
345 { .channel = 188,
346 .freq = 4940, /* MHz */
347 .unk2 = 3293,
348 RADIOREGS(0x71, 0x01, 0xEE, 0x0F, 0xFF, 0x01, 0x04, 0x0A,
349 0x00, 0x8F, 0xEE, 0xEE, 0xFF, 0x00, 0x0F, 0x0F,
350 0x8F, 0xFF, 0x00, 0x0F, 0x0F, 0x8F),
351 PHYREGS(0xBC07, 0xB807, 0xB407, 0x1202, 0x1302, 0x1402),
352 },
353 { .channel = 190,
354 .freq = 4950, /* MHz */
355 .unk2 = 3300,
356 RADIOREGS(0x71, 0x01, 0xEF, 0x0F, 0xFF, 0x01, 0x04, 0x0A,
357 0x00, 0x8F, 0xEE, 0xEE, 0xFF, 0x00, 0x0F, 0x0F,
358 0x8F, 0xFF, 0x00, 0x0F, 0x0F, 0x8F),
359 PHYREGS(0xC007, 0xBC07, 0xB807, 0x1102, 0x1202, 0x1302),
360 },
361 { .channel = 192,
362 .freq = 4960, /* MHz */
363 .unk2 = 3307,
364 RADIOREGS(0x71, 0x01, 0xF0, 0x0F, 0xFF, 0x01, 0x04, 0x0A,
365 0x00, 0x8F, 0xEE, 0xEE, 0xFF, 0x00, 0x0F, 0x0F,
366 0x8F, 0xFF, 0x00, 0x0F, 0x0F, 0x8F),
367 PHYREGS(0xC407, 0xC007, 0xBC07, 0x0F02, 0x1102, 0x1202),
368 },
369 { .channel = 194,
370 .freq = 4970, /* MHz */
371 .unk2 = 3313,
372 RADIOREGS(0x71, 0x01, 0xF1, 0x0F, 0xFF, 0x01, 0x04, 0x0A,
373 0x00, 0x8F, 0xEE, 0xEE, 0xFF, 0x00, 0x0F, 0x0F,
374 0x8F, 0xFF, 0x00, 0x0F, 0x0F, 0x8F),
375 PHYREGS(0xC807, 0xC407, 0xC007, 0x0E02, 0x0F02, 0x1102),
376 },
377 { .channel = 196,
378 .freq = 4980, /* MHz */
379 .unk2 = 3320,
380 RADIOREGS(0x71, 0x01, 0xF2, 0x0E, 0xFF, 0x01, 0x04, 0x0A,
381 0x00, 0x8F, 0xDD, 0xDD, 0xFF, 0x00, 0x0F, 0x0F,
382 0x8F, 0xFF, 0x00, 0x0F, 0x0F, 0x8F),
383 PHYREGS(0xCC07, 0xC807, 0xC407, 0x0D02, 0x0E02, 0x0F02),
384 },
385 { .channel = 198,
386 .freq = 4990, /* MHz */
387 .unk2 = 3327,
388 RADIOREGS(0x71, 0x01, 0xF3, 0x0E, 0xFF, 0x01, 0x04, 0x0A,
389 0x00, 0x8F, 0xDD, 0xDD, 0xFF, 0x00, 0x0F, 0x0F,
390 0x8F, 0xFF, 0x00, 0x0F, 0x0F, 0x8F),
391 PHYREGS(0xD007, 0xCC07, 0xC807, 0x0C02, 0x0D02, 0x0E02),
392 },
393 { .channel = 200,
394 .freq = 5000, /* MHz */
395 .unk2 = 3333,
396 RADIOREGS(0x71, 0x01, 0xF4, 0x0E, 0xFF, 0x01, 0x04, 0x0A,
397 0x00, 0x8F, 0xDD, 0xDD, 0xFF, 0x00, 0x0F, 0x0F,
398 0x8F, 0xFF, 0x00, 0x0F, 0x0F, 0x8F),
399 PHYREGS(0xD407, 0xD007, 0xCC07, 0x0B02, 0x0C02, 0x0D02),
400 },
401 { .channel = 202,
402 .freq = 5010, /* MHz */
403 .unk2 = 3340,
404 RADIOREGS(0x71, 0x01, 0xF5, 0x0E, 0xFF, 0x01, 0x04, 0x0A,
405 0x00, 0x8F, 0xDD, 0xDD, 0xFF, 0x00, 0x0F, 0x0F,
406 0x8F, 0xFF, 0x00, 0x0F, 0x0F, 0x8F),
407 PHYREGS(0xD807, 0xD407, 0xD007, 0x0A02, 0x0B02, 0x0C02),
408 },
409 { .channel = 204,
410 .freq = 5020, /* MHz */
411 .unk2 = 3347,
412 RADIOREGS(0x71, 0x01, 0xF6, 0x0E, 0xF7, 0x01, 0x04, 0x0A,
413 0x00, 0x8F, 0xCC, 0xCC, 0xFF, 0x00, 0x0F, 0x0F,
414 0x8F, 0xFF, 0x00, 0x0F, 0x0F, 0x8F),
415 PHYREGS(0xDC07, 0xD807, 0xD407, 0x0902, 0x0A02, 0x0B02),
416 },
417 { .channel = 206,
418 .freq = 5030, /* MHz */
419 .unk2 = 3353,
420 RADIOREGS(0x71, 0x01, 0xF7, 0x0E, 0xF7, 0x01, 0x04, 0x0A,
421 0x00, 0x8F, 0xCC, 0xCC, 0xFF, 0x00, 0x0F, 0x0F,
422 0x8F, 0xFF, 0x00, 0x0F, 0x0F, 0x8F),
423 PHYREGS(0xE007, 0xDC07, 0xD807, 0x0802, 0x0902, 0x0A02),
424 },
425 { .channel = 208,
426 .freq = 5040, /* MHz */
427 .unk2 = 3360,
428 RADIOREGS(0x71, 0x01, 0xF8, 0x0D, 0xEF, 0x01, 0x04, 0x0A,
429 0x00, 0x8F, 0xCC, 0xCC, 0xFF, 0x00, 0x0F, 0x0F,
430 0x8F, 0xFF, 0x00, 0x0F, 0x0F, 0x8F),
431 PHYREGS(0xE407, 0xE007, 0xDC07, 0x0702, 0x0802, 0x0902),
432 },
433 { .channel = 210,
434 .freq = 5050, /* MHz */
435 .unk2 = 3367,
436 RADIOREGS(0x71, 0x01, 0xF9, 0x0D, 0xEF, 0x01, 0x04, 0x0A,
437 0x00, 0x8F, 0xCC, 0xCC, 0xFF, 0x00, 0x0F, 0x0F,
438 0x8F, 0xFF, 0x00, 0x0F, 0x0F, 0x8F),
439 PHYREGS(0xE807, 0xE407, 0xE007, 0x0602, 0x0702, 0x0802),
440 },
441 { .channel = 212,
442 .freq = 5060, /* MHz */
443 .unk2 = 3373,
444 RADIOREGS(0x71, 0x01, 0xFA, 0x0D, 0xE6, 0x01, 0x04, 0x0A,
445 0x00, 0x8F, 0xBB, 0xBB, 0xFF, 0x00, 0x0E, 0x0F,
446 0x8E, 0xFF, 0x00, 0x0E, 0x0F, 0x8E),
447 PHYREGS(0xEC07, 0xE807, 0xE407, 0x0502, 0x0602, 0x0702),
448 },
449 { .channel = 214,
450 .freq = 5070, /* MHz */
451 .unk2 = 3380,
452 RADIOREGS(0x71, 0x01, 0xFB, 0x0D, 0xE6, 0x01, 0x04, 0x0A,
453 0x00, 0x8F, 0xBB, 0xBB, 0xFF, 0x00, 0x0E, 0x0F,
454 0x8E, 0xFF, 0x00, 0x0E, 0x0F, 0x8E),
455 PHYREGS(0xF007, 0xEC07, 0xE807, 0x0402, 0x0502, 0x0602),
456 },
457 { .channel = 216,
458 .freq = 5080, /* MHz */
459 .unk2 = 3387,
460 RADIOREGS(0x71, 0x01, 0xFC, 0x0D, 0xDE, 0x01, 0x04, 0x0A,
461 0x00, 0x8E, 0xBB, 0xBB, 0xEE, 0x00, 0x0E, 0x0F,
462 0x8D, 0xEE, 0x00, 0x0E, 0x0F, 0x8D),
463 PHYREGS(0xF407, 0xF007, 0xEC07, 0x0302, 0x0402, 0x0502),
464 },
465 { .channel = 218,
466 .freq = 5090, /* MHz */
467 .unk2 = 3393,
468 RADIOREGS(0x71, 0x01, 0xFD, 0x0D, 0xDE, 0x01, 0x04, 0x0A,
469 0x00, 0x8E, 0xBB, 0xBB, 0xEE, 0x00, 0x0E, 0x0F,
470 0x8D, 0xEE, 0x00, 0x0E, 0x0F, 0x8D),
471 PHYREGS(0xF807, 0xF407, 0xF007, 0x0202, 0x0302, 0x0402),
472 },
473 { .channel = 220,
474 .freq = 5100, /* MHz */
475 .unk2 = 3400,
476 RADIOREGS(0x71, 0x01, 0xFE, 0x0C, 0xD6, 0x01, 0x04, 0x0A,
477 0x00, 0x8E, 0xAA, 0xAA, 0xEE, 0x00, 0x0D, 0x0F,
478 0x8D, 0xEE, 0x00, 0x0D, 0x0F, 0x8D),
479 PHYREGS(0xFC07, 0xF807, 0xF407, 0x0102, 0x0202, 0x0302),
480 },
481 { .channel = 222,
482 .freq = 5110, /* MHz */
483 .unk2 = 3407,
484 RADIOREGS(0x71, 0x01, 0xFF, 0x0C, 0xD6, 0x01, 0x04, 0x0A,
485 0x00, 0x8E, 0xAA, 0xAA, 0xEE, 0x00, 0x0D, 0x0F,
486 0x8D, 0xEE, 0x00, 0x0D, 0x0F, 0x8D),
487 PHYREGS(0x0008, 0xFC07, 0xF807, 0x0002, 0x0102, 0x0202),
488 },
489 { .channel = 224,
490 .freq = 5120, /* MHz */
491 .unk2 = 3413,
492 RADIOREGS(0x71, 0x02, 0x00, 0x0C, 0xCE, 0x01, 0x04, 0x0A,
493 0x00, 0x8D, 0xAA, 0xAA, 0xDD, 0x00, 0x0D, 0x0F,
494 0x8C, 0xDD, 0x00, 0x0D, 0x0F, 0x8C),
495 PHYREGS(0x0408, 0x0008, 0xFC07, 0xFF01, 0x0002, 0x0102),
496 },
497 { .channel = 226,
498 .freq = 5130, /* MHz */
499 .unk2 = 3420,
500 RADIOREGS(0x71, 0x02, 0x01, 0x0C, 0xCE, 0x01, 0x04, 0x0A,
501 0x00, 0x8D, 0xAA, 0xAA, 0xDD, 0x00, 0x0D, 0x0F,
502 0x8C, 0xDD, 0x00, 0x0D, 0x0F, 0x8C),
503 PHYREGS(0x0808, 0x0408, 0x0008, 0xFE01, 0xFF01, 0x0002),
504 },
505 { .channel = 228,
506 .freq = 5140, /* MHz */
507 .unk2 = 3427,
508 RADIOREGS(0x71, 0x02, 0x02, 0x0C, 0xC6, 0x01, 0x04, 0x0A,
509 0x00, 0x8D, 0x99, 0x99, 0xDD, 0x00, 0x0C, 0x0E,
510 0x8B, 0xDD, 0x00, 0x0C, 0x0E, 0x8B),
511 PHYREGS(0x0C08, 0x0808, 0x0408, 0xFD01, 0xFE01, 0xFF01),
512 },
513 { .channel = 32,
514 .freq = 5160, /* MHz */
515 .unk2 = 3440,
516 RADIOREGS(0x71, 0x02, 0x04, 0x0B, 0xBE, 0x01, 0x04, 0x0A,
517 0x00, 0x8C, 0x99, 0x99, 0xCC, 0x00, 0x0B, 0x0D,
518 0x8A, 0xCC, 0x00, 0x0B, 0x0D, 0x8A),
519 PHYREGS(0x1408, 0x1008, 0x0C08, 0xFB01, 0xFC01, 0xFD01),
520 },
521 { .channel = 34,
522 .freq = 5170, /* MHz */
523 .unk2 = 3447,
524 RADIOREGS(0x71, 0x02, 0x05, 0x0B, 0xBE, 0x01, 0x04, 0x0A,
525 0x00, 0x8C, 0x99, 0x99, 0xCC, 0x00, 0x0B, 0x0D,
526 0x8A, 0xCC, 0x00, 0x0B, 0x0D, 0x8A),
527 PHYREGS(0x1808, 0x1408, 0x1008, 0xFA01, 0xFB01, 0xFC01),
528 },
529 { .channel = 36,
530 .freq = 5180, /* MHz */
531 .unk2 = 3453,
532 RADIOREGS(0x71, 0x02, 0x06, 0x0B, 0xB6, 0x01, 0x04, 0x0A,
533 0x00, 0x8C, 0x88, 0x88, 0xCC, 0x00, 0x0B, 0x0C,
534 0x89, 0xCC, 0x00, 0x0B, 0x0C, 0x89),
535 PHYREGS(0x1C08, 0x1808, 0x1408, 0xF901, 0xFA01, 0xFB01),
536 },
537 { .channel = 38,
538 .freq = 5190, /* MHz */
539 .unk2 = 3460,
540 RADIOREGS(0x71, 0x02, 0x07, 0x0B, 0xB6, 0x01, 0x04, 0x0A,
541 0x00, 0x8C, 0x88, 0x88, 0xCC, 0x00, 0x0B, 0x0C,
542 0x89, 0xCC, 0x00, 0x0B, 0x0C, 0x89),
543 PHYREGS(0x2008, 0x1C08, 0x1808, 0xF801, 0xF901, 0xFA01),
544 },
545 { .channel = 40,
546 .freq = 5200, /* MHz */
547 .unk2 = 3467,
548 RADIOREGS(0x71, 0x02, 0x08, 0x0B, 0xAF, 0x01, 0x04, 0x0A,
549 0x00, 0x8B, 0x88, 0x88, 0xBB, 0x00, 0x0A, 0x0B,
550 0x89, 0xBB, 0x00, 0x0A, 0x0B, 0x89),
551 PHYREGS(0x2408, 0x2008, 0x1C08, 0xF701, 0xF801, 0xF901),
552 },
553 { .channel = 42,
554 .freq = 5210, /* MHz */
555 .unk2 = 3473,
556 RADIOREGS(0x71, 0x02, 0x09, 0x0B, 0xAF, 0x01, 0x04, 0x0A,
557 0x00, 0x8B, 0x88, 0x88, 0xBB, 0x00, 0x0A, 0x0B,
558 0x89, 0xBB, 0x00, 0x0A, 0x0B, 0x89),
559 PHYREGS(0x2808, 0x2408, 0x2008, 0xF601, 0xF701, 0xF801),
560 },
561 { .channel = 44,
562 .freq = 5220, /* MHz */
563 .unk2 = 3480,
564 RADIOREGS(0x71, 0x02, 0x0A, 0x0A, 0xA7, 0x01, 0x04, 0x0A,
565 0x00, 0x8B, 0x77, 0x77, 0xBB, 0x00, 0x09, 0x0A,
566 0x88, 0xBB, 0x00, 0x09, 0x0A, 0x88),
567 PHYREGS(0x2C08, 0x2808, 0x2408, 0xF501, 0xF601, 0xF701),
568 },
569 { .channel = 46,
570 .freq = 5230, /* MHz */
571 .unk2 = 3487,
572 RADIOREGS(0x71, 0x02, 0x0B, 0x0A, 0xA7, 0x01, 0x04, 0x0A,
573 0x00, 0x8B, 0x77, 0x77, 0xBB, 0x00, 0x09, 0x0A,
574 0x88, 0xBB, 0x00, 0x09, 0x0A, 0x88),
575 PHYREGS(0x3008, 0x2C08, 0x2808, 0xF401, 0xF501, 0xF601),
576 },
577 { .channel = 48,
578 .freq = 5240, /* MHz */
579 .unk2 = 3493,
580 RADIOREGS(0x71, 0x02, 0x0C, 0x0A, 0xA0, 0x01, 0x04, 0x0A,
581 0x00, 0x8A, 0x77, 0x77, 0xAA, 0x00, 0x09, 0x0A,
582 0x87, 0xAA, 0x00, 0x09, 0x0A, 0x87),
583 PHYREGS(0x3408, 0x3008, 0x2C08, 0xF301, 0xF401, 0xF501),
584 },
585 { .channel = 50,
586 .freq = 5250, /* MHz */
587 .unk2 = 3500,
588 RADIOREGS(0x71, 0x02, 0x0D, 0x0A, 0xA0, 0x01, 0x04, 0x0A,
589 0x00, 0x8A, 0x77, 0x77, 0xAA, 0x00, 0x09, 0x0A,
590 0x87, 0xAA, 0x00, 0x09, 0x0A, 0x87),
591 PHYREGS(0x3808, 0x3408, 0x3008, 0xF201, 0xF301, 0xF401),
592 },
593 { .channel = 52,
594 .freq = 5260, /* MHz */
595 .unk2 = 3507,
596 RADIOREGS(0x71, 0x02, 0x0E, 0x0A, 0x98, 0x01, 0x04, 0x0A,
597 0x00, 0x8A, 0x66, 0x66, 0xAA, 0x00, 0x08, 0x09,
598 0x87, 0xAA, 0x00, 0x08, 0x09, 0x87),
599 PHYREGS(0x3C08, 0x3808, 0x3408, 0xF101, 0xF201, 0xF301),
600 },
601 { .channel = 54,
602 .freq = 5270, /* MHz */
603 .unk2 = 3513,
604 RADIOREGS(0x71, 0x02, 0x0F, 0x0A, 0x98, 0x01, 0x04, 0x0A,
605 0x00, 0x8A, 0x66, 0x66, 0xAA, 0x00, 0x08, 0x09,
606 0x87, 0xAA, 0x00, 0x08, 0x09, 0x87),
607 PHYREGS(0x4008, 0x3C08, 0x3808, 0xF001, 0xF101, 0xF201),
608 },
609 { .channel = 56,
610 .freq = 5280, /* MHz */
611 .unk2 = 3520,
612 RADIOREGS(0x71, 0x02, 0x10, 0x09, 0x91, 0x01, 0x04, 0x0A,
613 0x00, 0x89, 0x66, 0x66, 0x99, 0x00, 0x08, 0x08,
614 0x86, 0x99, 0x00, 0x08, 0x08, 0x86),
615 PHYREGS(0x4408, 0x4008, 0x3C08, 0xF001, 0xF001, 0xF101),
616 },
617 { .channel = 58,
618 .freq = 5290, /* MHz */
619 .unk2 = 3527,
620 RADIOREGS(0x71, 0x02, 0x11, 0x09, 0x91, 0x01, 0x04, 0x0A,
621 0x00, 0x89, 0x66, 0x66, 0x99, 0x00, 0x08, 0x08,
622 0x86, 0x99, 0x00, 0x08, 0x08, 0x86),
623 PHYREGS(0x4808, 0x4408, 0x4008, 0xEF01, 0xF001, 0xF001),
624 },
625 { .channel = 60,
626 .freq = 5300, /* MHz */
627 .unk2 = 3533,
628 RADIOREGS(0x71, 0x02, 0x12, 0x09, 0x8A, 0x01, 0x04, 0x0A,
629 0x00, 0x89, 0x55, 0x55, 0x99, 0x00, 0x08, 0x07,
630 0x85, 0x99, 0x00, 0x08, 0x07, 0x85),
631 PHYREGS(0x4C08, 0x4808, 0x4408, 0xEE01, 0xEF01, 0xF001),
632 },
633 { .channel = 62,
634 .freq = 5310, /* MHz */
635 .unk2 = 3540,
636 RADIOREGS(0x71, 0x02, 0x13, 0x09, 0x8A, 0x01, 0x04, 0x0A,
637 0x00, 0x89, 0x55, 0x55, 0x99, 0x00, 0x08, 0x07,
638 0x85, 0x99, 0x00, 0x08, 0x07, 0x85),
639 PHYREGS(0x5008, 0x4C08, 0x4808, 0xED01, 0xEE01, 0xEF01),
640 },
641 { .channel = 64,
642 .freq = 5320, /* MHz */
643 .unk2 = 3547,
644 RADIOREGS(0x71, 0x02, 0x14, 0x09, 0x83, 0x01, 0x04, 0x0A,
645 0x00, 0x88, 0x55, 0x55, 0x88, 0x00, 0x07, 0x07,
646 0x84, 0x88, 0x00, 0x07, 0x07, 0x84),
647 PHYREGS(0x5408, 0x5008, 0x4C08, 0xEC01, 0xED01, 0xEE01),
648 },
649 { .channel = 66,
650 .freq = 5330, /* MHz */
651 .unk2 = 3553,
652 RADIOREGS(0x71, 0x02, 0x15, 0x09, 0x83, 0x01, 0x04, 0x0A,
653 0x00, 0x88, 0x55, 0x55, 0x88, 0x00, 0x07, 0x07,
654 0x84, 0x88, 0x00, 0x07, 0x07, 0x84),
655 PHYREGS(0x5808, 0x5408, 0x5008, 0xEB01, 0xEC01, 0xED01),
656 },
657 { .channel = 68,
658 .freq = 5340, /* MHz */
659 .unk2 = 3560,
660 RADIOREGS(0x71, 0x02, 0x16, 0x08, 0x7C, 0x01, 0x04, 0x0A,
661 0x00, 0x88, 0x44, 0x44, 0x88, 0x00, 0x07, 0x06,
662 0x84, 0x88, 0x00, 0x07, 0x06, 0x84),
663 PHYREGS(0x5C08, 0x5808, 0x5408, 0xEA01, 0xEB01, 0xEC01),
664 },
665 { .channel = 70,
666 .freq = 5350, /* MHz */
667 .unk2 = 3567,
668 RADIOREGS(0x71, 0x02, 0x17, 0x08, 0x7C, 0x01, 0x04, 0x0A,
669 0x00, 0x88, 0x44, 0x44, 0x88, 0x00, 0x07, 0x06,
670 0x84, 0x88, 0x00, 0x07, 0x06, 0x84),
671 PHYREGS(0x6008, 0x5C08, 0x5808, 0xE901, 0xEA01, 0xEB01),
672 },
673 { .channel = 72,
674 .freq = 5360, /* MHz */
675 .unk2 = 3573,
676 RADIOREGS(0x71, 0x02, 0x18, 0x08, 0x75, 0x01, 0x04, 0x0A,
677 0x00, 0x87, 0x44, 0x44, 0x77, 0x00, 0x06, 0x05,
678 0x83, 0x77, 0x00, 0x06, 0x05, 0x83),
679 PHYREGS(0x6408, 0x6008, 0x5C08, 0xE801, 0xE901, 0xEA01),
680 },
681 { .channel = 74,
682 .freq = 5370, /* MHz */
683 .unk2 = 3580,
684 RADIOREGS(0x71, 0x02, 0x19, 0x08, 0x75, 0x01, 0x04, 0x0A,
685 0x00, 0x87, 0x44, 0x44, 0x77, 0x00, 0x06, 0x05,
686 0x83, 0x77, 0x00, 0x06, 0x05, 0x83),
687 PHYREGS(0x6808, 0x6408, 0x6008, 0xE701, 0xE801, 0xE901),
688 },
689 { .channel = 76,
690 .freq = 5380, /* MHz */
691 .unk2 = 3587,
692 RADIOREGS(0x71, 0x02, 0x1A, 0x08, 0x6E, 0x01, 0x04, 0x0A,
693 0x00, 0x87, 0x33, 0x33, 0x77, 0x00, 0x06, 0x04,
694 0x82, 0x77, 0x00, 0x06, 0x04, 0x82),
695 PHYREGS(0x6C08, 0x6808, 0x6408, 0xE601, 0xE701, 0xE801),
696 },
697 { .channel = 78,
698 .freq = 5390, /* MHz */
699 .unk2 = 3593,
700 RADIOREGS(0x71, 0x02, 0x1B, 0x08, 0x6E, 0x01, 0x04, 0x0A,
701 0x00, 0x87, 0x33, 0x33, 0x77, 0x00, 0x06, 0x04,
702 0x82, 0x77, 0x00, 0x06, 0x04, 0x82),
703 PHYREGS(0x7008, 0x6C08, 0x6808, 0xE501, 0xE601, 0xE701),
704 },
705 { .channel = 80,
706 .freq = 5400, /* MHz */
707 .unk2 = 3600,
708 RADIOREGS(0x71, 0x02, 0x1C, 0x07, 0x67, 0x01, 0x04, 0x0A,
709 0x00, 0x86, 0x33, 0x33, 0x66, 0x00, 0x05, 0x04,
710 0x81, 0x66, 0x00, 0x05, 0x04, 0x81),
711 PHYREGS(0x7408, 0x7008, 0x6C08, 0xE501, 0xE501, 0xE601),
712 },
713 { .channel = 82,
714 .freq = 5410, /* MHz */
715 .unk2 = 3607,
716 RADIOREGS(0x71, 0x02, 0x1D, 0x07, 0x67, 0x01, 0x04, 0x0A,
717 0x00, 0x86, 0x33, 0x33, 0x66, 0x00, 0x05, 0x04,
718 0x81, 0x66, 0x00, 0x05, 0x04, 0x81),
719 PHYREGS(0x7808, 0x7408, 0x7008, 0xE401, 0xE501, 0xE501),
720 },
721 { .channel = 84,
722 .freq = 5420, /* MHz */
723 .unk2 = 3613,
724 RADIOREGS(0x71, 0x02, 0x1E, 0x07, 0x61, 0x01, 0x04, 0x0A,
725 0x00, 0x86, 0x22, 0x22, 0x66, 0x00, 0x05, 0x03,
726 0x80, 0x66, 0x00, 0x05, 0x03, 0x80),
727 PHYREGS(0x7C08, 0x7808, 0x7408, 0xE301, 0xE401, 0xE501),
728 },
729 { .channel = 86,
730 .freq = 5430, /* MHz */
731 .unk2 = 3620,
732 RADIOREGS(0x71, 0x02, 0x1F, 0x07, 0x61, 0x01, 0x04, 0x0A,
733 0x00, 0x86, 0x22, 0x22, 0x66, 0x00, 0x05, 0x03,
734 0x80, 0x66, 0x00, 0x05, 0x03, 0x80),
735 PHYREGS(0x8008, 0x7C08, 0x7808, 0xE201, 0xE301, 0xE401),
736 },
737 { .channel = 88,
738 .freq = 5440, /* MHz */
739 .unk2 = 3627,
740 RADIOREGS(0x71, 0x02, 0x20, 0x07, 0x5A, 0x01, 0x04, 0x0A,
741 0x00, 0x85, 0x22, 0x22, 0x55, 0x00, 0x04, 0x02,
742 0x80, 0x55, 0x00, 0x04, 0x02, 0x80),
743 PHYREGS(0x8408, 0x8008, 0x7C08, 0xE101, 0xE201, 0xE301),
744 },
745 { .channel = 90,
746 .freq = 5450, /* MHz */
747 .unk2 = 3633,
748 RADIOREGS(0x71, 0x02, 0x21, 0x07, 0x5A, 0x01, 0x04, 0x0A,
749 0x00, 0x85, 0x22, 0x22, 0x55, 0x00, 0x04, 0x02,
750 0x80, 0x55, 0x00, 0x04, 0x02, 0x80),
751 PHYREGS(0x8808, 0x8408, 0x8008, 0xE001, 0xE101, 0xE201),
752 },
753 { .channel = 92,
754 .freq = 5460, /* MHz */
755 .unk2 = 3640,
756 RADIOREGS(0x71, 0x02, 0x22, 0x06, 0x53, 0x01, 0x04, 0x0A,
757 0x00, 0x85, 0x11, 0x11, 0x55, 0x00, 0x04, 0x01,
758 0x80, 0x55, 0x00, 0x04, 0x01, 0x80),
759 PHYREGS(0x8C08, 0x8808, 0x8408, 0xDF01, 0xE001, 0xE101),
760 },
761 { .channel = 94,
762 .freq = 5470, /* MHz */
763 .unk2 = 3647,
764 RADIOREGS(0x71, 0x02, 0x23, 0x06, 0x53, 0x01, 0x04, 0x0A,
765 0x00, 0x85, 0x11, 0x11, 0x55, 0x00, 0x04, 0x01,
766 0x80, 0x55, 0x00, 0x04, 0x01, 0x80),
767 PHYREGS(0x9008, 0x8C08, 0x8808, 0xDE01, 0xDF01, 0xE001),
768 },
769 { .channel = 96,
770 .freq = 5480, /* MHz */
771 .unk2 = 3653,
772 RADIOREGS(0x71, 0x02, 0x24, 0x06, 0x4D, 0x01, 0x04, 0x0A,
773 0x00, 0x84, 0x11, 0x11, 0x44, 0x00, 0x03, 0x00,
774 0x80, 0x44, 0x00, 0x03, 0x00, 0x80),
775 PHYREGS(0x9408, 0x9008, 0x8C08, 0xDD01, 0xDE01, 0xDF01),
776 },
777 { .channel = 98,
778 .freq = 5490, /* MHz */
779 .unk2 = 3660,
780 RADIOREGS(0x71, 0x02, 0x25, 0x06, 0x4D, 0x01, 0x04, 0x0A,
781 0x00, 0x84, 0x11, 0x11, 0x44, 0x00, 0x03, 0x00,
782 0x80, 0x44, 0x00, 0x03, 0x00, 0x80),
783 PHYREGS(0x9808, 0x9408, 0x9008, 0xDD01, 0xDD01, 0xDE01),
784 },
785 { .channel = 100,
786 .freq = 5500, /* MHz */
787 .unk2 = 3667,
788 RADIOREGS(0x71, 0x02, 0x26, 0x06, 0x47, 0x01, 0x04, 0x0A,
789 0x00, 0x84, 0x00, 0x00, 0x44, 0x00, 0x03, 0x00,
790 0x80, 0x44, 0x00, 0x03, 0x00, 0x80),
791 PHYREGS(0x9C08, 0x9808, 0x9408, 0xDC01, 0xDD01, 0xDD01),
792 },
793 { .channel = 102,
794 .freq = 5510, /* MHz */
795 .unk2 = 3673,
796 RADIOREGS(0x71, 0x02, 0x27, 0x06, 0x47, 0x01, 0x04, 0x0A,
797 0x00, 0x84, 0x00, 0x00, 0x44, 0x00, 0x03, 0x00,
798 0x80, 0x44, 0x00, 0x03, 0x00, 0x80),
799 PHYREGS(0xA008, 0x9C08, 0x9808, 0xDB01, 0xDC01, 0xDD01),
800 },
801 { .channel = 104,
802 .freq = 5520, /* MHz */
803 .unk2 = 3680,
804 RADIOREGS(0x71, 0x02, 0x28, 0x05, 0x40, 0x01, 0x04, 0x0A,
805 0x00, 0x83, 0x00, 0x00, 0x33, 0x00, 0x02, 0x00,
806 0x80, 0x33, 0x00, 0x02, 0x00, 0x80),
807 PHYREGS(0xA408, 0xA008, 0x9C08, 0xDA01, 0xDB01, 0xDC01),
808 },
809 { .channel = 106,
810 .freq = 5530, /* MHz */
811 .unk2 = 3687,
812 RADIOREGS(0x71, 0x02, 0x29, 0x05, 0x40, 0x01, 0x04, 0x0A,
813 0x00, 0x83, 0x00, 0x00, 0x33, 0x00, 0x02, 0x00,
814 0x80, 0x33, 0x00, 0x02, 0x00, 0x80),
815 PHYREGS(0xA808, 0xA408, 0xA008, 0xD901, 0xDA01, 0xDB01),
816 },
817 { .channel = 108,
818 .freq = 5540, /* MHz */
819 .unk2 = 3693,
820 RADIOREGS(0x71, 0x02, 0x2A, 0x05, 0x3A, 0x01, 0x04, 0x0A,
821 0x00, 0x83, 0x00, 0x00, 0x33, 0x00, 0x02, 0x00,
822 0x80, 0x33, 0x00, 0x02, 0x00, 0x80),
823 PHYREGS(0xAC08, 0xA808, 0xA408, 0xD801, 0xD901, 0xDA01),
824 },
825 { .channel = 110,
826 .freq = 5550, /* MHz */
827 .unk2 = 3700,
828 RADIOREGS(0x71, 0x02, 0x2B, 0x05, 0x3A, 0x01, 0x04, 0x0A,
829 0x00, 0x83, 0x00, 0x00, 0x33, 0x00, 0x02, 0x00,
830 0x80, 0x33, 0x00, 0x02, 0x00, 0x80),
831 PHYREGS(0xB008, 0xAC08, 0xA808, 0xD701, 0xD801, 0xD901),
832 },
833 { .channel = 112,
834 .freq = 5560, /* MHz */
835 .unk2 = 3707,
836 RADIOREGS(0x71, 0x02, 0x2C, 0x05, 0x34, 0x01, 0x04, 0x0A,
837 0x00, 0x82, 0x00, 0x00, 0x22, 0x00, 0x01, 0x00,
838 0x80, 0x22, 0x00, 0x01, 0x00, 0x80),
839 PHYREGS(0xB408, 0xB008, 0xAC08, 0xD701, 0xD701, 0xD801),
840 },
841 { .channel = 114,
842 .freq = 5570, /* MHz */
843 .unk2 = 3713,
844 RADIOREGS(0x71, 0x02, 0x2D, 0x05, 0x34, 0x01, 0x04, 0x0A,
845 0x00, 0x82, 0x00, 0x00, 0x22, 0x00, 0x01, 0x00,
846 0x80, 0x22, 0x00, 0x01, 0x00, 0x80),
847 PHYREGS(0xB808, 0xB408, 0xB008, 0xD601, 0xD701, 0xD701),
848 },
849 { .channel = 116,
850 .freq = 5580, /* MHz */
851 .unk2 = 3720,
852 RADIOREGS(0x71, 0x02, 0x2E, 0x04, 0x2E, 0x01, 0x04, 0x0A,
853 0x00, 0x82, 0x00, 0x00, 0x22, 0x00, 0x01, 0x00,
854 0x80, 0x22, 0x00, 0x01, 0x00, 0x80),
855 PHYREGS(0xBC08, 0xB808, 0xB408, 0xD501, 0xD601, 0xD701),
856 },
857 { .channel = 118,
858 .freq = 5590, /* MHz */
859 .unk2 = 3727,
860 RADIOREGS(0x71, 0x02, 0x2F, 0x04, 0x2E, 0x01, 0x04, 0x0A,
861 0x00, 0x82, 0x00, 0x00, 0x22, 0x00, 0x01, 0x00,
862 0x80, 0x22, 0x00, 0x01, 0x00, 0x80),
863 PHYREGS(0xC008, 0xBC08, 0xB808, 0xD401, 0xD501, 0xD601),
864 },
865 { .channel = 120,
866 .freq = 5600, /* MHz */
867 .unk2 = 3733,
868 RADIOREGS(0x71, 0x02, 0x30, 0x04, 0x28, 0x01, 0x04, 0x0A,
869 0x00, 0x81, 0x00, 0x00, 0x11, 0x00, 0x01, 0x00,
870 0x80, 0x11, 0x00, 0x01, 0x00, 0x80),
871 PHYREGS(0xC408, 0xC008, 0xBC08, 0xD301, 0xD401, 0xD501),
872 },
873 { .channel = 122,
874 .freq = 5610, /* MHz */
875 .unk2 = 3740,
876 RADIOREGS(0x71, 0x02, 0x31, 0x04, 0x28, 0x01, 0x04, 0x0A,
877 0x00, 0x81, 0x00, 0x00, 0x11, 0x00, 0x01, 0x00,
878 0x80, 0x11, 0x00, 0x01, 0x00, 0x80),
879 PHYREGS(0xC808, 0xC408, 0xC008, 0xD201, 0xD301, 0xD401),
880 },
881 { .channel = 124,
882 .freq = 5620, /* MHz */
883 .unk2 = 3747,
884 RADIOREGS(0x71, 0x02, 0x32, 0x04, 0x21, 0x01, 0x04, 0x0A,
885 0x00, 0x81, 0x00, 0x00, 0x11, 0x00, 0x00, 0x00,
886 0x80, 0x11, 0x00, 0x00, 0x00, 0x80),
887 PHYREGS(0xCC08, 0xC808, 0xC408, 0xD201, 0xD201, 0xD301),
888 },
889 { .channel = 126,
890 .freq = 5630, /* MHz */
891 .unk2 = 3753,
892 RADIOREGS(0x71, 0x02, 0x33, 0x04, 0x21, 0x01, 0x04, 0x0A,
893 0x00, 0x81, 0x00, 0x00, 0x11, 0x00, 0x00, 0x00,
894 0x80, 0x11, 0x00, 0x00, 0x00, 0x80),
895 PHYREGS(0xD008, 0xCC08, 0xC808, 0xD101, 0xD201, 0xD201),
896 },
897 { .channel = 128,
898 .freq = 5640, /* MHz */
899 .unk2 = 3760,
900 RADIOREGS(0x71, 0x02, 0x34, 0x03, 0x1C, 0x01, 0x04, 0x0A,
901 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
902 0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
903 PHYREGS(0xD408, 0xD008, 0xCC08, 0xD001, 0xD101, 0xD201),
904 },
905 { .channel = 130,
906 .freq = 5650, /* MHz */
907 .unk2 = 3767,
908 RADIOREGS(0x71, 0x02, 0x35, 0x03, 0x1C, 0x01, 0x04, 0x0A,
909 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
910 0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
911 PHYREGS(0xD808, 0xD408, 0xD008, 0xCF01, 0xD001, 0xD101),
912 },
913 { .channel = 132,
914 .freq = 5660, /* MHz */
915 .unk2 = 3773,
916 RADIOREGS(0x71, 0x02, 0x36, 0x03, 0x16, 0x01, 0x04, 0x0A,
917 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
918 0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
919 PHYREGS(0xDC08, 0xD808, 0xD408, 0xCE01, 0xCF01, 0xD001),
920 },
921 { .channel = 134,
922 .freq = 5670, /* MHz */
923 .unk2 = 3780,
924 RADIOREGS(0x71, 0x02, 0x37, 0x03, 0x16, 0x01, 0x04, 0x0A,
925 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
926 0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
927 PHYREGS(0xE008, 0xDC08, 0xD808, 0xCE01, 0xCE01, 0xCF01),
928 },
929 { .channel = 136,
930 .freq = 5680, /* MHz */
931 .unk2 = 3787,
932 RADIOREGS(0x71, 0x02, 0x38, 0x03, 0x10, 0x01, 0x04, 0x0A,
933 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
934 0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
935 PHYREGS(0xE408, 0xE008, 0xDC08, 0xCD01, 0xCE01, 0xCE01),
936 },
937 { .channel = 138,
938 .freq = 5690, /* MHz */
939 .unk2 = 3793,
940 RADIOREGS(0x71, 0x02, 0x39, 0x03, 0x10, 0x01, 0x04, 0x0A,
941 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
942 0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
943 PHYREGS(0xE808, 0xE408, 0xE008, 0xCC01, 0xCD01, 0xCE01),
944 },
945 { .channel = 140,
946 .freq = 5700, /* MHz */
947 .unk2 = 3800,
948 RADIOREGS(0x71, 0x02, 0x3A, 0x02, 0x0A, 0x01, 0x04, 0x0A,
949 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
950 0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
951 PHYREGS(0xEC08, 0xE808, 0xE408, 0xCB01, 0xCC01, 0xCD01),
952 },
953 { .channel = 142,
954 .freq = 5710, /* MHz */
955 .unk2 = 3807,
956 RADIOREGS(0x71, 0x02, 0x3B, 0x02, 0x0A, 0x01, 0x04, 0x0A,
957 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
958 0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
959 PHYREGS(0xF008, 0xEC08, 0xE808, 0xCA01, 0xCB01, 0xCC01),
960 },
961 { .channel = 144,
962 .freq = 5720, /* MHz */
963 .unk2 = 3813,
964 RADIOREGS(0x71, 0x02, 0x3C, 0x02, 0x0A, 0x01, 0x04, 0x0A,
965 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
966 0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
967 PHYREGS(0xF408, 0xF008, 0xEC08, 0xC901, 0xCA01, 0xCB01),
968 },
969 { .channel = 145,
970 .freq = 5725, /* MHz */
971 .unk2 = 3817,
972 RADIOREGS(0x72, 0x04, 0x79, 0x02, 0x03, 0x01, 0x03, 0x14,
973 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
974 0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
975 PHYREGS(0xF608, 0xF208, 0xEE08, 0xC901, 0xCA01, 0xCB01),
976 },
977 { .channel = 146,
978 .freq = 5730, /* MHz */
979 .unk2 = 3820,
980 RADIOREGS(0x71, 0x02, 0x3D, 0x02, 0x0A, 0x01, 0x04, 0x0A,
981 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
982 0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
983 PHYREGS(0xF808, 0xF408, 0xF008, 0xC901, 0xC901, 0xCA01),
984 },
985 { .channel = 147,
986 .freq = 5735, /* MHz */
987 .unk2 = 3823,
988 RADIOREGS(0x72, 0x04, 0x7B, 0x02, 0x03, 0x01, 0x03, 0x14,
989 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
990 0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
991 PHYREGS(0xFA08, 0xF608, 0xF208, 0xC801, 0xC901, 0xCA01),
992 },
993 { .channel = 148,
994 .freq = 5740, /* MHz */
995 .unk2 = 3827,
996 RADIOREGS(0x71, 0x02, 0x3E, 0x02, 0x0A, 0x01, 0x04, 0x0A,
997 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
998 0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
999 PHYREGS(0xFC08, 0xF808, 0xF408, 0xC801, 0xC901, 0xC901),
1000 },
1001 { .channel = 149,
1002 .freq = 5745, /* MHz */
1003 .unk2 = 3830,
1004 RADIOREGS(0x72, 0x04, 0x7D, 0x02, 0xFE, 0x00, 0x03, 0x14,
1005 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1006 0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
1007 PHYREGS(0xFE08, 0xFA08, 0xF608, 0xC801, 0xC801, 0xC901),
1008 },
1009 { .channel = 150,
1010 .freq = 5750, /* MHz */
1011 .unk2 = 3833,
1012 RADIOREGS(0x71, 0x02, 0x3F, 0x02, 0x0A, 0x01, 0x04, 0x0A,
1013 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1014 0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
1015 PHYREGS(0x0009, 0xFC08, 0xF808, 0xC701, 0xC801, 0xC901),
1016 },
1017 { .channel = 151,
1018 .freq = 5755, /* MHz */
1019 .unk2 = 3837,
1020 RADIOREGS(0x72, 0x04, 0x7F, 0x02, 0xFE, 0x00, 0x03, 0x14,
1021 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1022 0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
1023 PHYREGS(0x0209, 0xFE08, 0xFA08, 0xC701, 0xC801, 0xC801),
1024 },
1025 { .channel = 152,
1026 .freq = 5760, /* MHz */
1027 .unk2 = 3840,
1028 RADIOREGS(0x71, 0x02, 0x40, 0x02, 0x0A, 0x01, 0x04, 0x0A,
1029 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1030 0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
1031 PHYREGS(0x0409, 0x0009, 0xFC08, 0xC601, 0xC701, 0xC801),
1032 },
1033 { .channel = 153,
1034 .freq = 5765, /* MHz */
1035 .unk2 = 3843,
1036 RADIOREGS(0x72, 0x04, 0x81, 0x02, 0xF8, 0x00, 0x03, 0x14,
1037 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1038 0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
1039 PHYREGS(0x0609, 0x0209, 0xFE08, 0xC601, 0xC701, 0xC801),
1040 },
1041 { .channel = 154,
1042 .freq = 5770, /* MHz */
1043 .unk2 = 3847,
1044 RADIOREGS(0x71, 0x02, 0x41, 0x02, 0x0A, 0x01, 0x04, 0x0A,
1045 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1046 0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
1047 PHYREGS(0x0809, 0x0409, 0x0009, 0xC601, 0xC601, 0xC701),
1048 },
1049 { .channel = 155,
1050 .freq = 5775, /* MHz */
1051 .unk2 = 3850,
1052 RADIOREGS(0x72, 0x04, 0x83, 0x02, 0xF8, 0x00, 0x03, 0x14,
1053 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1054 0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
1055 PHYREGS(0x0A09, 0x0609, 0x0209, 0xC501, 0xC601, 0xC701),
1056 },
1057 { .channel = 156,
1058 .freq = 5780, /* MHz */
1059 .unk2 = 3853,
1060 RADIOREGS(0x71, 0x02, 0x42, 0x02, 0x0A, 0x01, 0x04, 0x0A,
1061 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1062 0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
1063 PHYREGS(0x0C09, 0x0809, 0x0409, 0xC501, 0xC601, 0xC601),
1064 },
1065 { .channel = 157,
1066 .freq = 5785, /* MHz */
1067 .unk2 = 3857,
1068 RADIOREGS(0x72, 0x04, 0x85, 0x02, 0xF2, 0x00, 0x03, 0x14,
1069 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1070 0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
1071 PHYREGS(0x0E09, 0x0A09, 0x0609, 0xC401, 0xC501, 0xC601),
1072 },
1073 { .channel = 158,
1074 .freq = 5790, /* MHz */
1075 .unk2 = 3860,
1076 RADIOREGS(0x71, 0x02, 0x43, 0x02, 0x0A, 0x01, 0x04, 0x0A,
1077 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1078 0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
1079 PHYREGS(0x1009, 0x0C09, 0x0809, 0xC401, 0xC501, 0xC601),
1080 },
1081 { .channel = 159,
1082 .freq = 5795, /* MHz */
1083 .unk2 = 3863,
1084 RADIOREGS(0x72, 0x04, 0x87, 0x02, 0xF2, 0x00, 0x03, 0x14,
1085 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1086 0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
1087 PHYREGS(0x1209, 0x0E09, 0x0A09, 0xC401, 0xC401, 0xC501),
1088 },
1089 { .channel = 160,
1090 .freq = 5800, /* MHz */
1091 .unk2 = 3867,
1092 RADIOREGS(0x71, 0x02, 0x44, 0x01, 0x0A, 0x01, 0x04, 0x0A,
1093 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1094 0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
1095 PHYREGS(0x1409, 0x1009, 0x0C09, 0xC301, 0xC401, 0xC501),
1096 },
1097 { .channel = 161,
1098 .freq = 5805, /* MHz */
1099 .unk2 = 3870,
1100 RADIOREGS(0x72, 0x04, 0x89, 0x01, 0xED, 0x00, 0x03, 0x14,
1101 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1102 0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
1103 PHYREGS(0x1609, 0x1209, 0x0E09, 0xC301, 0xC401, 0xC401),
1104 },
1105 { .channel = 162,
1106 .freq = 5810, /* MHz */
1107 .unk2 = 3873,
1108 RADIOREGS(0x71, 0x02, 0x45, 0x01, 0x0A, 0x01, 0x04, 0x0A,
1109 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1110 0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
1111 PHYREGS(0x1809, 0x1409, 0x1009, 0xC201, 0xC301, 0xC401),
1112 },
1113 { .channel = 163,
1114 .freq = 5815, /* MHz */
1115 .unk2 = 3877,
1116 RADIOREGS(0x72, 0x04, 0x8B, 0x01, 0xED, 0x00, 0x03, 0x14,
1117 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1118 0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
1119 PHYREGS(0x1A09, 0x1609, 0x1209, 0xC201, 0xC301, 0xC401),
1120 },
1121 { .channel = 164,
1122 .freq = 5820, /* MHz */
1123 .unk2 = 3880,
1124 RADIOREGS(0x71, 0x02, 0x46, 0x01, 0x0A, 0x01, 0x04, 0x0A,
1125 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1126 0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
1127 PHYREGS(0x1C09, 0x1809, 0x1409, 0xC201, 0xC201, 0xC301),
1128 },
1129 { .channel = 165,
1130 .freq = 5825, /* MHz */
1131 .unk2 = 3883,
1132 RADIOREGS(0x72, 0x04, 0x8D, 0x01, 0xED, 0x00, 0x03, 0x14,
1133 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1134 0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
1135 PHYREGS(0x1E09, 0x1A09, 0x1609, 0xC101, 0xC201, 0xC301),
1136 },
1137 { .channel = 166,
1138 .freq = 5830, /* MHz */
1139 .unk2 = 3887,
1140 RADIOREGS(0x71, 0x02, 0x47, 0x01, 0x0A, 0x01, 0x04, 0x0A,
1141 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1142 0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
1143 PHYREGS(0x2009, 0x1C09, 0x1809, 0xC101, 0xC201, 0xC201),
1144 },
1145 { .channel = 168,
1146 .freq = 5840, /* MHz */
1147 .unk2 = 3893,
1148 RADIOREGS(0x71, 0x02, 0x48, 0x01, 0x0A, 0x01, 0x04, 0x0A,
1149 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1150 0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
1151 PHYREGS(0x2409, 0x2009, 0x1C09, 0xC001, 0xC101, 0xC201),
1152 },
1153 { .channel = 170,
1154 .freq = 5850, /* MHz */
1155 .unk2 = 3900,
1156 RADIOREGS(0x71, 0x02, 0x49, 0x01, 0xE0, 0x00, 0x04, 0x0A,
1157 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1158 0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
1159 PHYREGS(0x2809, 0x2409, 0x2009, 0xBF01, 0xC001, 0xC101),
1160 },
1161 { .channel = 172,
1162 .freq = 5860, /* MHz */
1163 .unk2 = 3907,
1164 RADIOREGS(0x71, 0x02, 0x4A, 0x01, 0xDE, 0x00, 0x04, 0x0A,
1165 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1166 0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
1167 PHYREGS(0x2C09, 0x2809, 0x2409, 0xBF01, 0xBF01, 0xC001),
1168 },
1169 { .channel = 174,
1170 .freq = 5870, /* MHz */
1171 .unk2 = 3913,
1172 RADIOREGS(0x71, 0x02, 0x4B, 0x00, 0xDB, 0x00, 0x04, 0x0A,
1173 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1174 0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
1175 PHYREGS(0x3009, 0x2C09, 0x2809, 0xBE01, 0xBF01, 0xBF01),
1176 },
1177 { .channel = 176,
1178 .freq = 5880, /* MHz */
1179 .unk2 = 3920,
1180 RADIOREGS(0x71, 0x02, 0x4C, 0x00, 0xD8, 0x00, 0x04, 0x0A,
1181 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1182 0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
1183 PHYREGS(0x3409, 0x3009, 0x2C09, 0xBD01, 0xBE01, 0xBF01),
1184 },
1185 { .channel = 178,
1186 .freq = 5890, /* MHz */
1187 .unk2 = 3927,
1188 RADIOREGS(0x71, 0x02, 0x4D, 0x00, 0xD6, 0x00, 0x04, 0x0A,
1189 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1190 0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
1191 PHYREGS(0x3809, 0x3409, 0x3009, 0xBC01, 0xBD01, 0xBE01),
1192 },
1193 { .channel = 180,
1194 .freq = 5900, /* MHz */
1195 .unk2 = 3933,
1196 RADIOREGS(0x71, 0x02, 0x4E, 0x00, 0xD3, 0x00, 0x04, 0x0A,
1197 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1198 0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
1199 PHYREGS(0x3C09, 0x3809, 0x3409, 0xBC01, 0xBC01, 0xBD01),
1200 },
1201 { .channel = 182,
1202 .freq = 5910, /* MHz */
1203 .unk2 = 3940,
1204 RADIOREGS(0x71, 0x02, 0x4F, 0x00, 0xD6, 0x00, 0x04, 0x0A,
1205 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1206 0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
1207 PHYREGS(0x4009, 0x3C09, 0x3809, 0xBB01, 0xBC01, 0xBC01),
1208 },
1209 { .channel = 1,
1210 .freq = 2412, /* MHz */
1211 .unk2 = 3216,
1212 RADIOREGS(0x73, 0x09, 0x6C, 0x0F, 0x00, 0x01, 0x07, 0x15,
1213 0x01, 0x8F, 0xFF, 0xFF, 0xFF, 0x88, 0x0D, 0x0C,
1214 0x80, 0xFF, 0x88, 0x0D, 0x0C, 0x80),
1215 PHYREGS(0xC903, 0xC503, 0xC103, 0x3A04, 0x3F04, 0x4304),
1216 },
1217 { .channel = 2,
1218 .freq = 2417, /* MHz */
1219 .unk2 = 3223,
1220 RADIOREGS(0x73, 0x09, 0x71, 0x0F, 0x00, 0x01, 0x07, 0x15,
1221 0x01, 0x8F, 0xFF, 0xFF, 0xFF, 0x88, 0x0C, 0x0B,
1222 0x80, 0xFF, 0x88, 0x0C, 0x0B, 0x80),
1223 PHYREGS(0xCB03, 0xC703, 0xC303, 0x3804, 0x3D04, 0x4104),
1224 },
1225 { .channel = 3,
1226 .freq = 2422, /* MHz */
1227 .unk2 = 3229,
1228 RADIOREGS(0x73, 0x09, 0x76, 0x0F, 0x00, 0x01, 0x07, 0x15,
1229 0x01, 0x8F, 0xFF, 0xFF, 0xFF, 0x88, 0x0C, 0x0A,
1230 0x80, 0xFF, 0x88, 0x0C, 0x0A, 0x80),
1231 PHYREGS(0xCD03, 0xC903, 0xC503, 0x3604, 0x3A04, 0x3F04),
1232 },
1233 { .channel = 4,
1234 .freq = 2427, /* MHz */
1235 .unk2 = 3236,
1236 RADIOREGS(0x73, 0x09, 0x7B, 0x0F, 0x00, 0x01, 0x07, 0x15,
1237 0x01, 0x8F, 0xFF, 0xFF, 0xFF, 0x88, 0x0C, 0x0A,
1238 0x80, 0xFF, 0x88, 0x0C, 0x0A, 0x80),
1239 PHYREGS(0xCF03, 0xCB03, 0xC703, 0x3404, 0x3804, 0x3D04),
1240 },
1241 { .channel = 5,
1242 .freq = 2432, /* MHz */
1243 .unk2 = 3243,
1244 RADIOREGS(0x73, 0x09, 0x80, 0x0F, 0x00, 0x01, 0x07, 0x15,
1245 0x01, 0x8F, 0xFF, 0xFF, 0xFF, 0x88, 0x0C, 0x09,
1246 0x80, 0xFF, 0x88, 0x0C, 0x09, 0x80),
1247 PHYREGS(0xD103, 0xCD03, 0xC903, 0x3104, 0x3604, 0x3A04),
1248 },
1249 { .channel = 6,
1250 .freq = 2437, /* MHz */
1251 .unk2 = 3249,
1252 RADIOREGS(0x73, 0x09, 0x85, 0x0F, 0x00, 0x01, 0x07, 0x15,
1253 0x01, 0x8F, 0xFF, 0xFF, 0xFF, 0x88, 0x0B, 0x08,
1254 0x80, 0xFF, 0x88, 0x0B, 0x08, 0x80),
1255 PHYREGS(0xD303, 0xCF03, 0xCB03, 0x2F04, 0x3404, 0x3804),
1256 },
1257 { .channel = 7,
1258 .freq = 2442, /* MHz */
1259 .unk2 = 3256,
1260 RADIOREGS(0x73, 0x09, 0x8A, 0x0F, 0x00, 0x01, 0x07, 0x15,
1261 0x01, 0x8F, 0xFF, 0xFF, 0xFF, 0x88, 0x0A, 0x07,
1262 0x80, 0xFF, 0x88, 0x0A, 0x07, 0x80),
1263 PHYREGS(0xD503, 0xD103, 0xCD03, 0x2D04, 0x3104, 0x3604),
1264 },
1265 { .channel = 8,
1266 .freq = 2447, /* MHz */
1267 .unk2 = 3263,
1268 RADIOREGS(0x73, 0x09, 0x8F, 0x0F, 0x00, 0x01, 0x07, 0x15,
1269 0x01, 0x8F, 0xFF, 0xFF, 0xFF, 0x88, 0x0A, 0x06,
1270 0x80, 0xFF, 0x88, 0x0A, 0x06, 0x80),
1271 PHYREGS(0xD703, 0xD303, 0xCF03, 0x2B04, 0x2F04, 0x3404),
1272 },
1273 { .channel = 9,
1274 .freq = 2452, /* MHz */
1275 .unk2 = 3269,
1276 RADIOREGS(0x73, 0x09, 0x94, 0x0F, 0x00, 0x01, 0x07, 0x15,
1277 0x01, 0x8F, 0xFF, 0xFF, 0xFF, 0x88, 0x09, 0x06,
1278 0x80, 0xFF, 0x88, 0x09, 0x06, 0x80),
1279 PHYREGS(0xD903, 0xD503, 0xD103, 0x2904, 0x2D04, 0x3104),
1280 },
1281 { .channel = 10,
1282 .freq = 2457, /* MHz */
1283 .unk2 = 3276,
1284 RADIOREGS(0x73, 0x09, 0x99, 0x0F, 0x00, 0x01, 0x07, 0x15,
1285 0x01, 0x8F, 0xFF, 0xFF, 0xFF, 0x88, 0x08, 0x05,
1286 0x80, 0xFF, 0x88, 0x08, 0x05, 0x80),
1287 PHYREGS(0xDB03, 0xD703, 0xD303, 0x2704, 0x2B04, 0x2F04),
1288 },
1289 { .channel = 11,
1290 .freq = 2462, /* MHz */
1291 .unk2 = 3283,
1292 RADIOREGS(0x73, 0x09, 0x9E, 0x0F, 0x00, 0x01, 0x07, 0x15,
1293 0x01, 0x8F, 0xFF, 0xFF, 0xFF, 0x88, 0x08, 0x04,
1294 0x80, 0xFF, 0x88, 0x08, 0x04, 0x80),
1295 PHYREGS(0xDD03, 0xD903, 0xD503, 0x2404, 0x2904, 0x2D04),
1296 },
1297 { .channel = 12,
1298 .freq = 2467, /* MHz */
1299 .unk2 = 3289,
1300 RADIOREGS(0x73, 0x09, 0xA3, 0x0F, 0x00, 0x01, 0x07, 0x15,
1301 0x01, 0x8F, 0xFF, 0xFF, 0xFF, 0x88, 0x08, 0x03,
1302 0x80, 0xFF, 0x88, 0x08, 0x03, 0x80),
1303 PHYREGS(0xDF03, 0xDB03, 0xD703, 0x2204, 0x2704, 0x2B04),
1304 },
1305 { .channel = 13,
1306 .freq = 2472, /* MHz */
1307 .unk2 = 3296,
1308 RADIOREGS(0x73, 0x09, 0xA8, 0x0F, 0x00, 0x01, 0x07, 0x15,
1309 0x01, 0x8F, 0xFF, 0xFF, 0xFF, 0x88, 0x07, 0x03,
1310 0x80, 0xFF, 0x88, 0x07, 0x03, 0x80),
1311 PHYREGS(0xE103, 0xDD03, 0xD903, 0x2004, 0x2404, 0x2904),
1312 },
1313 { .channel = 14,
1314 .freq = 2484, /* MHz */
1315 .unk2 = 3312,
1316 RADIOREGS(0x73, 0x09, 0xB4, 0x0F, 0xFF, 0x01, 0x07, 0x15,
1317 0x01, 0x8F, 0xFF, 0xFF, 0xFF, 0x88, 0x07, 0x01,
1318 0x80, 0xFF, 0x88, 0x07, 0x01, 0x80),
1319 PHYREGS(0xE603, 0xE203, 0xDE03, 0x1B04, 0x1F04, 0x2404),
1320 },
1321};
1322
1323const struct b43_nphy_channeltab_entry_rev2 *
1324b43_nphy_get_chantabent_rev2(struct b43_wldev *dev, u8 channel)
1325{
1326 const struct b43_nphy_channeltab_entry_rev2 *e;
1327 unsigned int i;
1328
1329 for (i = 0; i < ARRAY_SIZE(b43_nphy_channeltab); i++) {
1330 e = &(b43_nphy_channeltab[i]);
1331 if (e->channel == channel)
1332 return e;
1333 }
1334
1335 return NULL;
1336}
1337
1338
1339static const u8 b43_ntab_adjustpower0[] = { 30static const u8 b43_ntab_adjustpower0[] = {
1340 0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x01, 0x01, 31 0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x01, 0x01,
1341 0x02, 0x02, 0x02, 0x02, 0x03, 0x03, 0x03, 0x03, 32 0x02, 0x02, 0x02, 0x02, 0x03, 0x03, 0x03, 0x03,
diff --git a/drivers/net/wireless/b43/tables_nphy.h b/drivers/net/wireless/b43/tables_nphy.h
index 8fc1da9f8fe5..4ec593ba3eef 100644
--- a/drivers/net/wireless/b43/tables_nphy.h
+++ b/drivers/net/wireless/b43/tables_nphy.h
@@ -3,7 +3,6 @@
3 3
4#include <linux/types.h> 4#include <linux/types.h>
5 5
6
7struct b43_phy_n_sfo_cfg { 6struct b43_phy_n_sfo_cfg {
8 u16 phy_bw1a; 7 u16 phy_bw1a;
9 u16 phy_bw2; 8 u16 phy_bw2;
@@ -13,52 +12,6 @@ struct b43_phy_n_sfo_cfg {
13 u16 phy_bw6; 12 u16 phy_bw6;
14}; 13};
15 14
16struct b43_nphy_channeltab_entry_rev2 {
17 /* The channel number */
18 u8 channel;
19 /* The channel frequency in MHz */
20 u16 freq;
21 /* An unknown value */
22 u16 unk2;
23 /* Radio register values on channelswitch */
24 u8 radio_pll_ref;
25 u8 radio_rf_pllmod0;
26 u8 radio_rf_pllmod1;
27 u8 radio_vco_captail;
28 u8 radio_vco_cal1;
29 u8 radio_vco_cal2;
30 u8 radio_pll_lfc1;
31 u8 radio_pll_lfr1;
32 u8 radio_pll_lfc2;
33 u8 radio_lgbuf_cenbuf;
34 u8 radio_lgen_tune1;
35 u8 radio_lgen_tune2;
36 u8 radio_c1_lgbuf_atune;
37 u8 radio_c1_lgbuf_gtune;
38 u8 radio_c1_rx_rfr1;
39 u8 radio_c1_tx_pgapadtn;
40 u8 radio_c1_tx_mxbgtrim;
41 u8 radio_c2_lgbuf_atune;
42 u8 radio_c2_lgbuf_gtune;
43 u8 radio_c2_rx_rfr1;
44 u8 radio_c2_tx_pgapadtn;
45 u8 radio_c2_tx_mxbgtrim;
46 /* PHY register values on channelswitch */
47 struct b43_phy_n_sfo_cfg phy_regs;
48};
49
50struct b43_nphy_channeltab_entry_rev3 {
51 /* The channel number */
52 u8 channel;
53 /* The channel frequency in MHz */
54 u16 freq;
55 /* Radio register values on channelswitch */
56 /* TODO */
57 /* PHY register values on channelswitch */
58 struct b43_phy_n_sfo_cfg phy_regs;
59};
60
61
62struct b43_wldev; 15struct b43_wldev;
63 16
64struct nphy_txiqcal_ladder { 17struct nphy_txiqcal_ladder {
@@ -82,18 +35,12 @@ struct nphy_rf_control_override_rev3 {
82 u8 val_addr1; 35 u8 val_addr1;
83}; 36};
84 37
85/* Upload the default register value table. 38/* Get the NPHY Channel Switch Table entry for a channel.
86 * If "ghz5" is true, we upload the 5Ghz table. Otherwise the 2.4Ghz
87 * table is uploaded. If "ignore_uploadflag" is true, we upload any value
88 * and ignore the "UPLOAD" flag. */
89void b2055_upload_inittab(struct b43_wldev *dev,
90 bool ghz5, bool ignore_uploadflag);
91
92
93/* Get the NPHY Channel Switch Table entry for a channel number.
94 * Returns NULL on failure to find an entry. */ 39 * Returns NULL on failure to find an entry. */
95const struct b43_nphy_channeltab_entry_rev2 * 40const struct b43_nphy_channeltab_entry_rev2 *
96b43_nphy_get_chantabent_rev2(struct b43_wldev *dev, u8 channel); 41b43_nphy_get_chantabent_rev2(struct b43_wldev *dev, u8 channel);
42const struct b43_nphy_channeltab_entry_rev3 *
43b43_nphy_get_chantabent_rev3(struct b43_wldev *dev, u16 freq);
97 44
98 45
99/* The N-PHY tables. */ 46/* The N-PHY tables. */
diff --git a/drivers/net/wireless/ipw2x00/ipw2200.c b/drivers/net/wireless/ipw2x00/ipw2200.c
index 0f2508384c75..8d6ed5f6f46f 100644
--- a/drivers/net/wireless/ipw2x00/ipw2200.c
+++ b/drivers/net/wireless/ipw2x00/ipw2200.c
@@ -11470,6 +11470,10 @@ static int ipw_net_init(struct net_device *dev)
11470 bg_band->channels = kcalloc(geo->bg_channels, 11470 bg_band->channels = kcalloc(geo->bg_channels,
11471 sizeof(struct ieee80211_channel), 11471 sizeof(struct ieee80211_channel),
11472 GFP_KERNEL); 11472 GFP_KERNEL);
11473 if (!bg_band->channels) {
11474 rc = -ENOMEM;
11475 goto out;
11476 }
11473 /* translate geo->bg to bg_band.channels */ 11477 /* translate geo->bg to bg_band.channels */
11474 for (i = 0; i < geo->bg_channels; i++) { 11478 for (i = 0; i < geo->bg_channels; i++) {
11475 bg_band->channels[i].band = IEEE80211_BAND_2GHZ; 11479 bg_band->channels[i].band = IEEE80211_BAND_2GHZ;
@@ -11505,6 +11509,10 @@ static int ipw_net_init(struct net_device *dev)
11505 a_band->channels = kcalloc(geo->a_channels, 11509 a_band->channels = kcalloc(geo->a_channels,
11506 sizeof(struct ieee80211_channel), 11510 sizeof(struct ieee80211_channel),
11507 GFP_KERNEL); 11511 GFP_KERNEL);
11512 if (!a_band->channels) {
11513 rc = -ENOMEM;
11514 goto out;
11515 }
11508 /* translate geo->bg to a_band.channels */ 11516 /* translate geo->bg to a_band.channels */
11509 for (i = 0; i < geo->a_channels; i++) { 11517 for (i = 0; i < geo->a_channels; i++) {
11510 a_band->channels[i].band = IEEE80211_BAND_2GHZ; 11518 a_band->channels[i].band = IEEE80211_BAND_2GHZ;
diff --git a/drivers/net/wireless/iwlwifi/Makefile b/drivers/net/wireless/iwlwifi/Makefile
index 493163925a45..63edbe2e557f 100644
--- a/drivers/net/wireless/iwlwifi/Makefile
+++ b/drivers/net/wireless/iwlwifi/Makefile
@@ -12,7 +12,7 @@ obj-$(CONFIG_IWLAGN) += iwlagn.o
12iwlagn-objs := iwl-agn.o iwl-agn-rs.o iwl-agn-led.o iwl-agn-ict.o 12iwlagn-objs := iwl-agn.o iwl-agn-rs.o iwl-agn-led.o iwl-agn-ict.o
13iwlagn-objs += iwl-agn-ucode.o iwl-agn-hcmd.o iwl-agn-tx.o 13iwlagn-objs += iwl-agn-ucode.o iwl-agn-hcmd.o iwl-agn-tx.o
14iwlagn-objs += iwl-agn-lib.o iwl-agn-rx.o iwl-agn-calib.o 14iwlagn-objs += iwl-agn-lib.o iwl-agn-rx.o iwl-agn-calib.o
15iwlagn-objs += iwl-agn-tt.o 15iwlagn-objs += iwl-agn-tt.o iwl-agn-sta.o iwl-agn-eeprom.o
16iwlagn-$(CONFIG_IWLWIFI_DEBUGFS) += iwl-agn-debugfs.o 16iwlagn-$(CONFIG_IWLWIFI_DEBUGFS) += iwl-agn-debugfs.o
17 17
18iwlagn-$(CONFIG_IWL4965) += iwl-4965.o 18iwlagn-$(CONFIG_IWL4965) += iwl-4965.o
diff --git a/drivers/net/wireless/iwlwifi/iwl-1000.c b/drivers/net/wireless/iwlwifi/iwl-1000.c
index 674fb93ae17f..db540910b110 100644
--- a/drivers/net/wireless/iwlwifi/iwl-1000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-1000.c
@@ -50,14 +50,20 @@
50 50
51/* Highest firmware API version supported */ 51/* Highest firmware API version supported */
52#define IWL1000_UCODE_API_MAX 3 52#define IWL1000_UCODE_API_MAX 3
53#define IWL100_UCODE_API_MAX 5
53 54
54/* Lowest firmware API version supported */ 55/* Lowest firmware API version supported */
55#define IWL1000_UCODE_API_MIN 1 56#define IWL1000_UCODE_API_MIN 1
57#define IWL100_UCODE_API_MIN 5
56 58
57#define IWL1000_FW_PRE "iwlwifi-1000-" 59#define IWL1000_FW_PRE "iwlwifi-1000-"
58#define _IWL1000_MODULE_FIRMWARE(api) IWL1000_FW_PRE #api ".ucode" 60#define _IWL1000_MODULE_FIRMWARE(api) IWL1000_FW_PRE #api ".ucode"
59#define IWL1000_MODULE_FIRMWARE(api) _IWL1000_MODULE_FIRMWARE(api) 61#define IWL1000_MODULE_FIRMWARE(api) _IWL1000_MODULE_FIRMWARE(api)
60 62
63#define IWL100_FW_PRE "iwlwifi-100-"
64#define _IWL100_MODULE_FIRMWARE(api) IWL100_FW_PRE #api ".ucode"
65#define IWL100_MODULE_FIRMWARE(api) _IWL100_MODULE_FIRMWARE(api)
66
61 67
62/* 68/*
63 * For 1000, use advance thermal throttling critical temperature threshold, 69 * For 1000, use advance thermal throttling critical temperature threshold,
@@ -120,13 +126,13 @@ static int iwl1000_hw_set_hw_params(struct iwl_priv *priv)
120{ 126{
121 if (priv->cfg->mod_params->num_of_queues >= IWL_MIN_NUM_QUEUES && 127 if (priv->cfg->mod_params->num_of_queues >= IWL_MIN_NUM_QUEUES &&
122 priv->cfg->mod_params->num_of_queues <= IWLAGN_NUM_QUEUES) 128 priv->cfg->mod_params->num_of_queues <= IWLAGN_NUM_QUEUES)
123 priv->cfg->num_of_queues = 129 priv->cfg->base_params->num_of_queues =
124 priv->cfg->mod_params->num_of_queues; 130 priv->cfg->mod_params->num_of_queues;
125 131
126 priv->hw_params.max_txq_num = priv->cfg->num_of_queues; 132 priv->hw_params.max_txq_num = priv->cfg->base_params->num_of_queues;
127 priv->hw_params.dma_chnl_num = FH50_TCSR_CHNL_NUM; 133 priv->hw_params.dma_chnl_num = FH50_TCSR_CHNL_NUM;
128 priv->hw_params.scd_bc_tbls_size = 134 priv->hw_params.scd_bc_tbls_size =
129 priv->cfg->num_of_queues * 135 priv->cfg->base_params->num_of_queues *
130 sizeof(struct iwlagn_scd_bc_tbl); 136 sizeof(struct iwlagn_scd_bc_tbl);
131 priv->hw_params.tfd_size = sizeof(struct iwl_tfd); 137 priv->hw_params.tfd_size = sizeof(struct iwl_tfd);
132 priv->hw_params.max_stations = IWLAGN_STATION_COUNT; 138 priv->hw_params.max_stations = IWLAGN_STATION_COUNT;
@@ -145,8 +151,7 @@ static int iwl1000_hw_set_hw_params(struct iwl_priv *priv)
145 priv->hw_params.valid_tx_ant = priv->cfg->valid_tx_ant; 151 priv->hw_params.valid_tx_ant = priv->cfg->valid_tx_ant;
146 priv->hw_params.valid_rx_ant = priv->cfg->valid_rx_ant; 152 priv->hw_params.valid_rx_ant = priv->cfg->valid_rx_ant;
147 153
148 if (priv->cfg->ops->lib->temp_ops.set_ct_kill) 154 iwl1000_set_ct_threshold(priv);
149 priv->cfg->ops->lib->temp_ops.set_ct_kill(priv);
150 155
151 /* Set initial sensitivity parameters */ 156 /* Set initial sensitivity parameters */
152 /* Set initial calibration set */ 157 /* Set initial calibration set */
@@ -189,9 +194,7 @@ static struct iwl_lib_ops iwl1000_lib = {
189 .update_chain_flags = iwl_update_chain_flags, 194 .update_chain_flags = iwl_update_chain_flags,
190 .apm_ops = { 195 .apm_ops = {
191 .init = iwl_apm_init, 196 .init = iwl_apm_init,
192 .stop = iwl_apm_stop,
193 .config = iwl1000_nic_config, 197 .config = iwl1000_nic_config,
194 .set_pwr_src = iwl_set_pwr_src,
195 }, 198 },
196 .eeprom_ops = { 199 .eeprom_ops = {
197 .regulatory_bands = { 200 .regulatory_bands = {
@@ -203,7 +206,6 @@ static struct iwl_lib_ops iwl1000_lib = {
203 EEPROM_REG_BAND_24_HT40_CHANNELS, 206 EEPROM_REG_BAND_24_HT40_CHANNELS,
204 EEPROM_REG_BAND_52_HT40_CHANNELS 207 EEPROM_REG_BAND_52_HT40_CHANNELS
205 }, 208 },
206 .verify_signature = iwlcore_eeprom_verify_signature,
207 .acquire_semaphore = iwlcore_eeprom_acquire_semaphore, 209 .acquire_semaphore = iwlcore_eeprom_acquire_semaphore,
208 .release_semaphore = iwlcore_eeprom_release_semaphore, 210 .release_semaphore = iwlcore_eeprom_release_semaphore,
209 .calib_version = iwlagn_eeprom_calib_version, 211 .calib_version = iwlagn_eeprom_calib_version,
@@ -214,7 +216,6 @@ static struct iwl_lib_ops iwl1000_lib = {
214 .config_ap = iwl_config_ap, 216 .config_ap = iwl_config_ap,
215 .temp_ops = { 217 .temp_ops = {
216 .temperature = iwlagn_temperature, 218 .temperature = iwlagn_temperature,
217 .set_ct_kill = iwl1000_set_ct_threshold,
218 }, 219 },
219 .manage_ibss_station = iwlagn_manage_ibss_station, 220 .manage_ibss_station = iwlagn_manage_ibss_station,
220 .update_bcast_stations = iwl_update_bcast_stations, 221 .update_bcast_stations = iwl_update_bcast_stations,
@@ -223,6 +224,7 @@ static struct iwl_lib_ops iwl1000_lib = {
223 .tx_stats_read = iwl_ucode_tx_stats_read, 224 .tx_stats_read = iwl_ucode_tx_stats_read,
224 .general_stats_read = iwl_ucode_general_stats_read, 225 .general_stats_read = iwl_ucode_general_stats_read,
225 .bt_stats_read = iwl_ucode_bt_stats_read, 226 .bt_stats_read = iwl_ucode_bt_stats_read,
227 .reply_tx_error = iwl_reply_tx_error_read,
226 }, 228 },
227 .recover_from_tx_stall = iwl_bg_monitor_recover, 229 .recover_from_tx_stall = iwl_bg_monitor_recover,
228 .check_plcp_health = iwl_good_plcp_health, 230 .check_plcp_health = iwl_good_plcp_health,
@@ -243,29 +245,16 @@ static const struct iwl_ops iwl1000_ops = {
243 .led = &iwlagn_led_ops, 245 .led = &iwlagn_led_ops,
244}; 246};
245 247
246struct iwl_cfg iwl1000_bgn_cfg = { 248static struct iwl_base_params iwl1000_base_params = {
247 .name = "Intel(R) Centrino(R) Wireless-N 1000 BGN",
248 .fw_name_pre = IWL1000_FW_PRE,
249 .ucode_api_max = IWL1000_UCODE_API_MAX,
250 .ucode_api_min = IWL1000_UCODE_API_MIN,
251 .sku = IWL_SKU_G|IWL_SKU_N,
252 .ops = &iwl1000_ops,
253 .eeprom_size = OTP_LOW_IMAGE_SIZE,
254 .eeprom_ver = EEPROM_1000_EEPROM_VERSION,
255 .eeprom_calib_ver = EEPROM_1000_TX_POWER_VERSION,
256 .num_of_queues = IWLAGN_NUM_QUEUES, 249 .num_of_queues = IWLAGN_NUM_QUEUES,
257 .num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES, 250 .num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES,
258 .mod_params = &iwlagn_mod_params, 251 .eeprom_size = OTP_LOW_IMAGE_SIZE,
259 .valid_tx_ant = ANT_A,
260 .valid_rx_ant = ANT_AB,
261 .pll_cfg_val = CSR50_ANA_PLL_CFG_VAL, 252 .pll_cfg_val = CSR50_ANA_PLL_CFG_VAL,
262 .set_l0s = true, 253 .set_l0s = true,
263 .use_bsm = false, 254 .use_bsm = false,
264 .max_ll_items = OTP_MAX_LL_ITEMS_1000, 255 .max_ll_items = OTP_MAX_LL_ITEMS_1000,
265 .shadow_ram_support = false, 256 .shadow_ram_support = false,
266 .ht_greenfield_support = true,
267 .led_compensation = 51, 257 .led_compensation = 51,
268 .use_rts_for_aggregation = true, /* use rts/cts protection */
269 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, 258 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
270 .support_ct_kill_exit = true, 259 .support_ct_kill_exit = true,
271 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_EXT_LONG_THRESHOLD_DEF, 260 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_EXT_LONG_THRESHOLD_DEF,
@@ -276,6 +265,26 @@ struct iwl_cfg iwl1000_bgn_cfg = {
276 .sensitivity_calib_by_driver = true, 265 .sensitivity_calib_by_driver = true,
277 .chain_noise_calib_by_driver = true, 266 .chain_noise_calib_by_driver = true,
278}; 267};
268static struct iwl_ht_params iwl1000_ht_params = {
269 .ht_greenfield_support = true,
270 .use_rts_for_aggregation = true, /* use rts/cts protection */
271};
272
273struct iwl_cfg iwl1000_bgn_cfg = {
274 .name = "Intel(R) Centrino(R) Wireless-N 1000 BGN",
275 .fw_name_pre = IWL1000_FW_PRE,
276 .ucode_api_max = IWL1000_UCODE_API_MAX,
277 .ucode_api_min = IWL1000_UCODE_API_MIN,
278 .sku = IWL_SKU_G|IWL_SKU_N,
279 .valid_tx_ant = ANT_A,
280 .valid_rx_ant = ANT_AB,
281 .eeprom_ver = EEPROM_1000_EEPROM_VERSION,
282 .eeprom_calib_ver = EEPROM_1000_TX_POWER_VERSION,
283 .ops = &iwl1000_ops,
284 .mod_params = &iwlagn_mod_params,
285 .base_params = &iwl1000_base_params,
286 .ht_params = &iwl1000_ht_params,
287};
279 288
280struct iwl_cfg iwl1000_bg_cfg = { 289struct iwl_cfg iwl1000_bg_cfg = {
281 .name = "Intel(R) Centrino(R) Wireless-N 1000 BG", 290 .name = "Intel(R) Centrino(R) Wireless-N 1000 BG",
@@ -283,30 +292,45 @@ struct iwl_cfg iwl1000_bg_cfg = {
283 .ucode_api_max = IWL1000_UCODE_API_MAX, 292 .ucode_api_max = IWL1000_UCODE_API_MAX,
284 .ucode_api_min = IWL1000_UCODE_API_MIN, 293 .ucode_api_min = IWL1000_UCODE_API_MIN,
285 .sku = IWL_SKU_G, 294 .sku = IWL_SKU_G,
295 .valid_tx_ant = ANT_A,
296 .valid_rx_ant = ANT_AB,
297 .eeprom_ver = EEPROM_1000_EEPROM_VERSION,
298 .eeprom_calib_ver = EEPROM_1000_TX_POWER_VERSION,
286 .ops = &iwl1000_ops, 299 .ops = &iwl1000_ops,
287 .eeprom_size = OTP_LOW_IMAGE_SIZE, 300 .mod_params = &iwlagn_mod_params,
301 .base_params = &iwl1000_base_params,
302};
303
304struct iwl_cfg iwl100_bgn_cfg = {
305 .name = "Intel(R) 100 Series 1x1 BGN",
306 .fw_name_pre = IWL100_FW_PRE,
307 .ucode_api_max = IWL100_UCODE_API_MAX,
308 .ucode_api_min = IWL100_UCODE_API_MIN,
309 .sku = IWL_SKU_G|IWL_SKU_N,
310 .valid_tx_ant = ANT_A,
311 .valid_rx_ant = ANT_A,
288 .eeprom_ver = EEPROM_1000_EEPROM_VERSION, 312 .eeprom_ver = EEPROM_1000_EEPROM_VERSION,
289 .eeprom_calib_ver = EEPROM_1000_TX_POWER_VERSION, 313 .eeprom_calib_ver = EEPROM_1000_TX_POWER_VERSION,
290 .num_of_queues = IWLAGN_NUM_QUEUES, 314 .ops = &iwl1000_ops,
291 .num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES,
292 .mod_params = &iwlagn_mod_params, 315 .mod_params = &iwlagn_mod_params,
316 .base_params = &iwl1000_base_params,
317 .ht_params = &iwl1000_ht_params,
318};
319
320struct iwl_cfg iwl100_bg_cfg = {
321 .name = "Intel(R) 100 Series 1x1 BG",
322 .fw_name_pre = IWL100_FW_PRE,
323 .ucode_api_max = IWL100_UCODE_API_MAX,
324 .ucode_api_min = IWL100_UCODE_API_MIN,
325 .sku = IWL_SKU_G,
293 .valid_tx_ant = ANT_A, 326 .valid_tx_ant = ANT_A,
294 .valid_rx_ant = ANT_AB, 327 .valid_rx_ant = ANT_A,
295 .pll_cfg_val = CSR50_ANA_PLL_CFG_VAL, 328 .eeprom_ver = EEPROM_1000_EEPROM_VERSION,
296 .set_l0s = true, 329 .eeprom_calib_ver = EEPROM_1000_TX_POWER_VERSION,
297 .use_bsm = false, 330 .ops = &iwl1000_ops,
298 .max_ll_items = OTP_MAX_LL_ITEMS_1000, 331 .mod_params = &iwlagn_mod_params,
299 .shadow_ram_support = false, 332 .base_params = &iwl1000_base_params,
300 .led_compensation = 51,
301 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
302 .support_ct_kill_exit = true,
303 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_EXT_LONG_THRESHOLD_DEF,
304 .chain_noise_scale = 1000,
305 .monitor_recover_period = IWL_DEF_MONITORING_PERIOD,
306 .max_event_log_size = 128,
307 .ucode_tracing = true,
308 .sensitivity_calib_by_driver = true,
309 .chain_noise_calib_by_driver = true,
310}; 333};
311 334
312MODULE_FIRMWARE(IWL1000_MODULE_FIRMWARE(IWL1000_UCODE_API_MAX)); 335MODULE_FIRMWARE(IWL1000_MODULE_FIRMWARE(IWL1000_UCODE_API_MAX));
336MODULE_FIRMWARE(IWL100_MODULE_FIRMWARE(IWL100_UCODE_API_MAX));
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.c b/drivers/net/wireless/iwlwifi/iwl-3945.c
index 5d09686c3389..176e52577673 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945.c
+++ b/drivers/net/wireless/iwlwifi/iwl-3945.c
@@ -87,6 +87,15 @@ const struct iwl3945_rate_info iwl3945_rates[IWL_RATE_COUNT_3945] = {
87 IWL_DECLARE_RATE_INFO(54, 48, INV, 48, INV, 48, INV),/* 54mbps */ 87 IWL_DECLARE_RATE_INFO(54, 48, INV, 48, INV, 48, INV),/* 54mbps */
88}; 88};
89 89
90static inline u8 iwl3945_get_prev_ieee_rate(u8 rate_index)
91{
92 u8 rate = iwl3945_rates[rate_index].prev_ieee;
93
94 if (rate == IWL_RATE_INVALID)
95 rate = rate_index;
96 return rate;
97}
98
90/* 1 = enable the iwl3945_disable_events() function */ 99/* 1 = enable the iwl3945_disable_events() function */
91#define IWL_EVT_DISABLE (0) 100#define IWL_EVT_DISABLE (0)
92#define IWL_EVT_DISABLE_SIZE (1532/32) 101#define IWL_EVT_DISABLE_SIZE (1532/32)
@@ -339,7 +348,7 @@ static void iwl3945_rx_reply_tx(struct iwl_priv *priv,
339 IWL_DEBUG_TX_REPLY(priv, "Tx queue reclaim %d\n", index); 348 IWL_DEBUG_TX_REPLY(priv, "Tx queue reclaim %d\n", index);
340 iwl3945_tx_queue_reclaim(priv, txq_id, index); 349 iwl3945_tx_queue_reclaim(priv, txq_id, index);
341 350
342 if (iwl_check_bits(status, TX_ABORT_REQUIRED_MSK)) 351 if (status & TX_ABORT_REQUIRED_MSK)
343 IWL_ERR(priv, "TODO: Implement Tx ABORT REQUIRED!!!\n"); 352 IWL_ERR(priv, "TODO: Implement Tx ABORT REQUIRED!!!\n");
344} 353}
345 354
@@ -406,7 +415,7 @@ static bool iwl3945_good_plcp_health(struct iwl_priv *priv,
406 unsigned int plcp_msec; 415 unsigned int plcp_msec;
407 unsigned long plcp_received_jiffies; 416 unsigned long plcp_received_jiffies;
408 417
409 if (priv->cfg->plcp_delta_threshold == 418 if (priv->cfg->base_params->plcp_delta_threshold ==
410 IWL_MAX_PLCP_ERR_THRESHOLD_DISABLE) { 419 IWL_MAX_PLCP_ERR_THRESHOLD_DISABLE) {
411 IWL_DEBUG_RADIO(priv, "plcp_err check disabled\n"); 420 IWL_DEBUG_RADIO(priv, "plcp_err check disabled\n");
412 return rc; 421 return rc;
@@ -432,7 +441,7 @@ static bool iwl3945_good_plcp_health(struct iwl_priv *priv,
432 441
433 if ((combined_plcp_delta > 0) && 442 if ((combined_plcp_delta > 0) &&
434 ((combined_plcp_delta * 100) / plcp_msec) > 443 ((combined_plcp_delta * 100) / plcp_msec) >
435 priv->cfg->plcp_delta_threshold) { 444 priv->cfg->base_params->plcp_delta_threshold) {
436 /* 445 /*
437 * if plcp_err exceed the threshold, the following 446 * if plcp_err exceed the threshold, the following
438 * data is printed in csv format: 447 * data is printed in csv format:
@@ -444,7 +453,7 @@ static bool iwl3945_good_plcp_health(struct iwl_priv *priv,
444 */ 453 */
445 IWL_DEBUG_RADIO(priv, "plcp_err exceeded %u, " 454 IWL_DEBUG_RADIO(priv, "plcp_err exceeded %u, "
446 "%u, %d, %u mSecs\n", 455 "%u, %d, %u mSecs\n",
447 priv->cfg->plcp_delta_threshold, 456 priv->cfg->base_params->plcp_delta_threshold,
448 le32_to_cpu(current_stat.rx.ofdm.plcp_err), 457 le32_to_cpu(current_stat.rx.ofdm.plcp_err),
449 combined_plcp_delta, plcp_msec); 458 combined_plcp_delta, plcp_msec);
450 /* 459 /*
@@ -807,9 +816,12 @@ static u8 iwl3945_sync_sta(struct iwl_priv *priv, int sta_id, u16 tx_rate)
807 return sta_id; 816 return sta_id;
808} 817}
809 818
810static int iwl3945_set_pwr_src(struct iwl_priv *priv, enum iwl_pwr_src src) 819static void iwl3945_set_pwr_vmain(struct iwl_priv *priv)
811{ 820{
812 if (src == IWL_PWR_SRC_VAUX) { 821/*
822 * (for documentation purposes)
823 * to set power to V_AUX, do
824
813 if (pci_pme_capable(priv->pci_dev, PCI_D3cold)) { 825 if (pci_pme_capable(priv->pci_dev, PCI_D3cold)) {
814 iwl_set_bits_mask_prph(priv, APMG_PS_CTRL_REG, 826 iwl_set_bits_mask_prph(priv, APMG_PS_CTRL_REG,
815 APMG_PS_CTRL_VAL_PWR_SRC_VAUX, 827 APMG_PS_CTRL_VAL_PWR_SRC_VAUX,
@@ -819,16 +831,14 @@ static int iwl3945_set_pwr_src(struct iwl_priv *priv, enum iwl_pwr_src src)
819 CSR_GPIO_IN_VAL_VAUX_PWR_SRC, 831 CSR_GPIO_IN_VAL_VAUX_PWR_SRC,
820 CSR_GPIO_IN_BIT_AUX_POWER, 5000); 832 CSR_GPIO_IN_BIT_AUX_POWER, 5000);
821 } 833 }
822 } else { 834 */
823 iwl_set_bits_mask_prph(priv, APMG_PS_CTRL_REG,
824 APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
825 ~APMG_PS_CTRL_MSK_PWR_SRC);
826 835
827 iwl_poll_bit(priv, CSR_GPIO_IN, CSR_GPIO_IN_VAL_VMAIN_PWR_SRC, 836 iwl_set_bits_mask_prph(priv, APMG_PS_CTRL_REG,
828 CSR_GPIO_IN_BIT_AUX_POWER, 5000); /* uS */ 837 APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
829 } 838 ~APMG_PS_CTRL_MSK_PWR_SRC);
830 839
831 return 0; 840 iwl_poll_bit(priv, CSR_GPIO_IN, CSR_GPIO_IN_VAL_VMAIN_PWR_SRC,
841 CSR_GPIO_IN_BIT_AUX_POWER, 5000); /* uS */
832} 842}
833 843
834static int iwl3945_rx_init(struct iwl_priv *priv, struct iwl_rx_queue *rxq) 844static int iwl3945_rx_init(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
@@ -1022,9 +1032,7 @@ int iwl3945_hw_nic_init(struct iwl_priv *priv)
1022 priv->cfg->ops->lib->apm_ops.init(priv); 1032 priv->cfg->ops->lib->apm_ops.init(priv);
1023 spin_unlock_irqrestore(&priv->lock, flags); 1033 spin_unlock_irqrestore(&priv->lock, flags);
1024 1034
1025 rc = priv->cfg->ops->lib->apm_ops.set_pwr_src(priv, IWL_PWR_SRC_VMAIN); 1035 iwl3945_set_pwr_vmain(priv);
1026 if (rc)
1027 return rc;
1028 1036
1029 priv->cfg->ops->lib->apm_ops.config(priv); 1037 priv->cfg->ops->lib->apm_ops.config(priv);
1030 1038
@@ -1763,8 +1771,7 @@ static int iwl3945_send_rxon_assoc(struct iwl_priv *priv,
1763 * function correctly transitions out of the RXON_ASSOC_MSK state if 1771 * function correctly transitions out of the RXON_ASSOC_MSK state if
1764 * a HW tune is required based on the RXON structure changes. 1772 * a HW tune is required based on the RXON structure changes.
1765 */ 1773 */
1766static int iwl3945_commit_rxon(struct iwl_priv *priv, 1774int iwl3945_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
1767 struct iwl_rxon_context *ctx)
1768{ 1775{
1769 /* cast away the const for active_rxon in this function */ 1776 /* cast away the const for active_rxon in this function */
1770 struct iwl3945_rxon_cmd *active_rxon = (void *)&ctx->active; 1777 struct iwl3945_rxon_cmd *active_rxon = (void *)&ctx->active;
@@ -2300,6 +2307,32 @@ static u16 iwl3945_build_addsta_hcmd(const struct iwl_addsta_cmd *cmd, u8 *data)
2300 return (u16)sizeof(struct iwl3945_addsta_cmd); 2307 return (u16)sizeof(struct iwl3945_addsta_cmd);
2301} 2308}
2302 2309
2310static int iwl3945_add_bssid_station(struct iwl_priv *priv,
2311 const u8 *addr, u8 *sta_id_r)
2312{
2313 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
2314 int ret;
2315 u8 sta_id;
2316 unsigned long flags;
2317
2318 if (sta_id_r)
2319 *sta_id_r = IWL_INVALID_STATION;
2320
2321 ret = iwl_add_station_common(priv, ctx, addr, 0, NULL, &sta_id);
2322 if (ret) {
2323 IWL_ERR(priv, "Unable to add station %pM\n", addr);
2324 return ret;
2325 }
2326
2327 if (sta_id_r)
2328 *sta_id_r = sta_id;
2329
2330 spin_lock_irqsave(&priv->sta_lock, flags);
2331 priv->stations[sta_id].used |= IWL_STA_LOCAL;
2332 spin_unlock_irqrestore(&priv->sta_lock, flags);
2333
2334 return 0;
2335}
2303static int iwl3945_manage_ibss_station(struct iwl_priv *priv, 2336static int iwl3945_manage_ibss_station(struct iwl_priv *priv,
2304 struct ieee80211_vif *vif, bool add) 2337 struct ieee80211_vif *vif, bool add)
2305{ 2338{
@@ -2307,10 +2340,8 @@ static int iwl3945_manage_ibss_station(struct iwl_priv *priv,
2307 int ret; 2340 int ret;
2308 2341
2309 if (add) { 2342 if (add) {
2310 ret = iwl_add_bssid_station( 2343 ret = iwl3945_add_bssid_station(priv, vif->bss_conf.bssid,
2311 priv, &priv->contexts[IWL_RXON_CTX_BSS], 2344 &vif_priv->ibss_bssid_sta_id);
2312 vif->bss_conf.bssid, false,
2313 &vif_priv->ibss_bssid_sta_id);
2314 if (ret) 2345 if (ret)
2315 return ret; 2346 return ret;
2316 2347
@@ -2421,7 +2452,7 @@ int iwl3945_hw_set_hw_params(struct iwl_priv *priv)
2421 } 2452 }
2422 2453
2423 /* Assign number of Usable TX queues */ 2454 /* Assign number of Usable TX queues */
2424 priv->hw_params.max_txq_num = priv->cfg->num_of_queues; 2455 priv->hw_params.max_txq_num = priv->cfg->base_params->num_of_queues;
2425 2456
2426 priv->hw_params.tfd_size = sizeof(struct iwl3945_tfd); 2457 priv->hw_params.tfd_size = sizeof(struct iwl3945_tfd);
2427 priv->hw_params.rx_page_order = get_order(IWL_RX_BUF_SIZE_3K); 2458 priv->hw_params.rx_page_order = get_order(IWL_RX_BUF_SIZE_3K);
@@ -2673,9 +2704,7 @@ static struct iwl_lib_ops iwl3945_lib = {
2673 .dump_nic_error_log = iwl3945_dump_nic_error_log, 2704 .dump_nic_error_log = iwl3945_dump_nic_error_log,
2674 .apm_ops = { 2705 .apm_ops = {
2675 .init = iwl3945_apm_init, 2706 .init = iwl3945_apm_init,
2676 .stop = iwl_apm_stop,
2677 .config = iwl3945_nic_config, 2707 .config = iwl3945_nic_config,
2678 .set_pwr_src = iwl3945_set_pwr_src,
2679 }, 2708 },
2680 .eeprom_ops = { 2709 .eeprom_ops = {
2681 .regulatory_bands = { 2710 .regulatory_bands = {
@@ -2687,7 +2716,6 @@ static struct iwl_lib_ops iwl3945_lib = {
2687 EEPROM_REGULATORY_BAND_NO_HT40, 2716 EEPROM_REGULATORY_BAND_NO_HT40,
2688 EEPROM_REGULATORY_BAND_NO_HT40, 2717 EEPROM_REGULATORY_BAND_NO_HT40,
2689 }, 2718 },
2690 .verify_signature = iwlcore_eeprom_verify_signature,
2691 .acquire_semaphore = iwl3945_eeprom_acquire_semaphore, 2719 .acquire_semaphore = iwl3945_eeprom_acquire_semaphore,
2692 .release_semaphore = iwl3945_eeprom_release_semaphore, 2720 .release_semaphore = iwl3945_eeprom_release_semaphore,
2693 .query_addr = iwlcore_eeprom_query_addr, 2721 .query_addr = iwlcore_eeprom_query_addr,
@@ -2713,6 +2741,7 @@ static struct iwl_hcmd_utils_ops iwl3945_hcmd_utils = {
2713 .build_addsta_hcmd = iwl3945_build_addsta_hcmd, 2741 .build_addsta_hcmd = iwl3945_build_addsta_hcmd,
2714 .tx_cmd_protection = iwlcore_tx_cmd_protection, 2742 .tx_cmd_protection = iwlcore_tx_cmd_protection,
2715 .request_scan = iwl3945_request_scan, 2743 .request_scan = iwl3945_request_scan,
2744 .post_scan = iwl3945_post_scan,
2716}; 2745};
2717 2746
2718static const struct iwl_ops iwl3945_ops = { 2747static const struct iwl_ops iwl3945_ops = {
@@ -2722,22 +2751,13 @@ static const struct iwl_ops iwl3945_ops = {
2722 .led = &iwl3945_led_ops, 2751 .led = &iwl3945_led_ops,
2723}; 2752};
2724 2753
2725static struct iwl_cfg iwl3945_bg_cfg = { 2754static struct iwl_base_params iwl3945_base_params = {
2726 .name = "3945BG",
2727 .fw_name_pre = IWL3945_FW_PRE,
2728 .ucode_api_max = IWL3945_UCODE_API_MAX,
2729 .ucode_api_min = IWL3945_UCODE_API_MIN,
2730 .sku = IWL_SKU_G,
2731 .eeprom_size = IWL3945_EEPROM_IMG_SIZE, 2755 .eeprom_size = IWL3945_EEPROM_IMG_SIZE,
2732 .eeprom_ver = EEPROM_3945_EEPROM_VERSION,
2733 .ops = &iwl3945_ops,
2734 .num_of_queues = IWL39_NUM_QUEUES, 2756 .num_of_queues = IWL39_NUM_QUEUES,
2735 .mod_params = &iwl3945_mod_params,
2736 .pll_cfg_val = CSR39_ANA_PLL_CFG_VAL, 2757 .pll_cfg_val = CSR39_ANA_PLL_CFG_VAL,
2737 .set_l0s = false, 2758 .set_l0s = false,
2738 .use_bsm = true, 2759 .use_bsm = true,
2739 .use_isr_legacy = true, 2760 .use_isr_legacy = true,
2740 .ht_greenfield_support = false,
2741 .led_compensation = 64, 2761 .led_compensation = 64,
2742 .broken_powersave = true, 2762 .broken_powersave = true,
2743 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF, 2763 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF,
@@ -2746,25 +2766,28 @@ static struct iwl_cfg iwl3945_bg_cfg = {
2746 .tx_power_by_driver = true, 2766 .tx_power_by_driver = true,
2747}; 2767};
2748 2768
2769static struct iwl_cfg iwl3945_bg_cfg = {
2770 .name = "3945BG",
2771 .fw_name_pre = IWL3945_FW_PRE,
2772 .ucode_api_max = IWL3945_UCODE_API_MAX,
2773 .ucode_api_min = IWL3945_UCODE_API_MIN,
2774 .sku = IWL_SKU_G,
2775 .eeprom_ver = EEPROM_3945_EEPROM_VERSION,
2776 .ops = &iwl3945_ops,
2777 .mod_params = &iwl3945_mod_params,
2778 .base_params = &iwl3945_base_params,
2779};
2780
2749static struct iwl_cfg iwl3945_abg_cfg = { 2781static struct iwl_cfg iwl3945_abg_cfg = {
2750 .name = "3945ABG", 2782 .name = "3945ABG",
2751 .fw_name_pre = IWL3945_FW_PRE, 2783 .fw_name_pre = IWL3945_FW_PRE,
2752 .ucode_api_max = IWL3945_UCODE_API_MAX, 2784 .ucode_api_max = IWL3945_UCODE_API_MAX,
2753 .ucode_api_min = IWL3945_UCODE_API_MIN, 2785 .ucode_api_min = IWL3945_UCODE_API_MIN,
2754 .sku = IWL_SKU_A|IWL_SKU_G, 2786 .sku = IWL_SKU_A|IWL_SKU_G,
2755 .eeprom_size = IWL3945_EEPROM_IMG_SIZE,
2756 .eeprom_ver = EEPROM_3945_EEPROM_VERSION, 2787 .eeprom_ver = EEPROM_3945_EEPROM_VERSION,
2757 .ops = &iwl3945_ops, 2788 .ops = &iwl3945_ops,
2758 .num_of_queues = IWL39_NUM_QUEUES,
2759 .mod_params = &iwl3945_mod_params, 2789 .mod_params = &iwl3945_mod_params,
2760 .use_isr_legacy = true, 2790 .base_params = &iwl3945_base_params,
2761 .ht_greenfield_support = false,
2762 .led_compensation = 64,
2763 .broken_powersave = true,
2764 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF,
2765 .monitor_recover_period = IWL_DEF_MONITORING_PERIOD,
2766 .max_event_log_size = 512,
2767 .tx_power_by_driver = true,
2768}; 2791};
2769 2792
2770DEFINE_PCI_DEVICE_TABLE(iwl3945_hw_card_ids) = { 2793DEFINE_PCI_DEVICE_TABLE(iwl3945_hw_card_ids) = {
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.h b/drivers/net/wireless/iwlwifi/iwl-3945.h
index bb2aeebf3652..09391f0ee61f 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945.h
+++ b/drivers/net/wireless/iwlwifi/iwl-3945.h
@@ -138,8 +138,6 @@ enum iwl3945_antenna {
138#define DEFAULT_SHORT_RETRY_LIMIT 7U 138#define DEFAULT_SHORT_RETRY_LIMIT 7U
139#define DEFAULT_LONG_RETRY_LIMIT 4U 139#define DEFAULT_LONG_RETRY_LIMIT 4U
140 140
141#include "iwl-agn-rs.h"
142
143#define IWL_TX_FIFO_AC0 0 141#define IWL_TX_FIFO_AC0 0
144#define IWL_TX_FIFO_AC1 1 142#define IWL_TX_FIFO_AC1 1
145#define IWL_TX_FIFO_AC2 2 143#define IWL_TX_FIFO_AC2 2
@@ -271,6 +269,9 @@ extern void iwl3945_post_associate(struct iwl_priv *priv,
271extern void iwl3945_config_ap(struct iwl_priv *priv, 269extern void iwl3945_config_ap(struct iwl_priv *priv,
272 struct ieee80211_vif *vif); 270 struct ieee80211_vif *vif);
273 271
272extern int iwl3945_commit_rxon(struct iwl_priv *priv,
273 struct iwl_rxon_context *ctx);
274
274/** 275/**
275 * iwl3945_hw_find_station - Find station id for a given BSSID 276 * iwl3945_hw_find_station - Find station id for a given BSSID
276 * @bssid: MAC address of station ID to find 277 * @bssid: MAC address of station ID to find
@@ -295,7 +296,11 @@ extern const struct iwl_channel_info *iwl3945_get_channel_info(
295extern int iwl3945_rs_next_rate(struct iwl_priv *priv, int rate); 296extern int iwl3945_rs_next_rate(struct iwl_priv *priv, int rate);
296 297
297/* scanning */ 298/* scanning */
298void iwl3945_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif); 299int iwl3945_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif);
300void iwl3945_post_scan(struct iwl_priv *priv);
301
302/* rates */
303extern const struct iwl3945_rate_info iwl3945_rates[IWL_RATE_COUNT_3945];
299 304
300/* Requires full declaration of iwl_priv before including */ 305/* Requires full declaration of iwl_priv before including */
301#include "iwl-io.h" 306#include "iwl-io.h"
diff --git a/drivers/net/wireless/iwlwifi/iwl-4965.c b/drivers/net/wireless/iwlwifi/iwl-4965.c
index 1d6a46d4db59..b207e3e9299f 100644
--- a/drivers/net/wireless/iwlwifi/iwl-4965.c
+++ b/drivers/net/wireless/iwlwifi/iwl-4965.c
@@ -43,7 +43,7 @@
43#include "iwl-core.h" 43#include "iwl-core.h"
44#include "iwl-io.h" 44#include "iwl-io.h"
45#include "iwl-helpers.h" 45#include "iwl-helpers.h"
46#include "iwl-calib.h" 46#include "iwl-agn-calib.h"
47#include "iwl-sta.h" 47#include "iwl-sta.h"
48#include "iwl-agn-led.h" 48#include "iwl-agn-led.h"
49#include "iwl-agn.h" 49#include "iwl-agn.h"
@@ -647,13 +647,13 @@ static int iwl4965_hw_set_hw_params(struct iwl_priv *priv)
647{ 647{
648 if (priv->cfg->mod_params->num_of_queues >= IWL_MIN_NUM_QUEUES && 648 if (priv->cfg->mod_params->num_of_queues >= IWL_MIN_NUM_QUEUES &&
649 priv->cfg->mod_params->num_of_queues <= IWL49_NUM_QUEUES) 649 priv->cfg->mod_params->num_of_queues <= IWL49_NUM_QUEUES)
650 priv->cfg->num_of_queues = 650 priv->cfg->base_params->num_of_queues =
651 priv->cfg->mod_params->num_of_queues; 651 priv->cfg->mod_params->num_of_queues;
652 652
653 priv->hw_params.max_txq_num = priv->cfg->num_of_queues; 653 priv->hw_params.max_txq_num = priv->cfg->base_params->num_of_queues;
654 priv->hw_params.dma_chnl_num = FH49_TCSR_CHNL_NUM; 654 priv->hw_params.dma_chnl_num = FH49_TCSR_CHNL_NUM;
655 priv->hw_params.scd_bc_tbls_size = 655 priv->hw_params.scd_bc_tbls_size =
656 priv->cfg->num_of_queues * 656 priv->cfg->base_params->num_of_queues *
657 sizeof(struct iwl4965_scd_bc_tbl); 657 sizeof(struct iwl4965_scd_bc_tbl);
658 priv->hw_params.tfd_size = sizeof(struct iwl_tfd); 658 priv->hw_params.tfd_size = sizeof(struct iwl_tfd);
659 priv->hw_params.max_stations = IWL4965_STATION_COUNT; 659 priv->hw_params.max_stations = IWL4965_STATION_COUNT;
@@ -669,8 +669,8 @@ static int iwl4965_hw_set_hw_params(struct iwl_priv *priv)
669 priv->hw_params.rx_chains_num = num_of_ant(priv->cfg->valid_rx_ant); 669 priv->hw_params.rx_chains_num = num_of_ant(priv->cfg->valid_rx_ant);
670 priv->hw_params.valid_tx_ant = priv->cfg->valid_tx_ant; 670 priv->hw_params.valid_tx_ant = priv->cfg->valid_tx_ant;
671 priv->hw_params.valid_rx_ant = priv->cfg->valid_rx_ant; 671 priv->hw_params.valid_rx_ant = priv->cfg->valid_rx_ant;
672 if (priv->cfg->ops->lib->temp_ops.set_ct_kill) 672
673 priv->cfg->ops->lib->temp_ops.set_ct_kill(priv); 673 iwl4965_set_ct_threshold(priv);
674 674
675 priv->hw_params.sens = &iwl4965_sensitivity; 675 priv->hw_params.sens = &iwl4965_sensitivity;
676 priv->hw_params.beacon_time_tsf_bits = IWLAGN_EXT_BEACON_TIME_POS; 676 priv->hw_params.beacon_time_tsf_bits = IWLAGN_EXT_BEACON_TIME_POS;
@@ -1724,13 +1724,13 @@ static int iwl4965_txq_agg_disable(struct iwl_priv *priv, u16 txq_id,
1724 u16 ssn_idx, u8 tx_fifo) 1724 u16 ssn_idx, u8 tx_fifo)
1725{ 1725{
1726 if ((IWL49_FIRST_AMPDU_QUEUE > txq_id) || 1726 if ((IWL49_FIRST_AMPDU_QUEUE > txq_id) ||
1727 (IWL49_FIRST_AMPDU_QUEUE + priv->cfg->num_of_ampdu_queues 1727 (IWL49_FIRST_AMPDU_QUEUE +
1728 <= txq_id)) { 1728 priv->cfg->base_params->num_of_ampdu_queues <= txq_id)) {
1729 IWL_WARN(priv, 1729 IWL_WARN(priv,
1730 "queue number out of range: %d, must be %d to %d\n", 1730 "queue number out of range: %d, must be %d to %d\n",
1731 txq_id, IWL49_FIRST_AMPDU_QUEUE, 1731 txq_id, IWL49_FIRST_AMPDU_QUEUE,
1732 IWL49_FIRST_AMPDU_QUEUE + 1732 IWL49_FIRST_AMPDU_QUEUE +
1733 priv->cfg->num_of_ampdu_queues - 1); 1733 priv->cfg->base_params->num_of_ampdu_queues - 1);
1734 return -EINVAL; 1734 return -EINVAL;
1735 } 1735 }
1736 1736
@@ -1792,13 +1792,13 @@ static int iwl4965_txq_agg_enable(struct iwl_priv *priv, int txq_id,
1792 int ret; 1792 int ret;
1793 1793
1794 if ((IWL49_FIRST_AMPDU_QUEUE > txq_id) || 1794 if ((IWL49_FIRST_AMPDU_QUEUE > txq_id) ||
1795 (IWL49_FIRST_AMPDU_QUEUE + priv->cfg->num_of_ampdu_queues 1795 (IWL49_FIRST_AMPDU_QUEUE +
1796 <= txq_id)) { 1796 priv->cfg->base_params->num_of_ampdu_queues <= txq_id)) {
1797 IWL_WARN(priv, 1797 IWL_WARN(priv,
1798 "queue number out of range: %d, must be %d to %d\n", 1798 "queue number out of range: %d, must be %d to %d\n",
1799 txq_id, IWL49_FIRST_AMPDU_QUEUE, 1799 txq_id, IWL49_FIRST_AMPDU_QUEUE,
1800 IWL49_FIRST_AMPDU_QUEUE + 1800 IWL49_FIRST_AMPDU_QUEUE +
1801 priv->cfg->num_of_ampdu_queues - 1); 1801 priv->cfg->base_params->num_of_ampdu_queues - 1);
1802 return -EINVAL; 1802 return -EINVAL;
1803 } 1803 }
1804 1804
@@ -2216,11 +2216,23 @@ static void iwl4965_cancel_deferred_work(struct iwl_priv *priv)
2216 2216
2217static struct iwl_hcmd_ops iwl4965_hcmd = { 2217static struct iwl_hcmd_ops iwl4965_hcmd = {
2218 .rxon_assoc = iwl4965_send_rxon_assoc, 2218 .rxon_assoc = iwl4965_send_rxon_assoc,
2219 .commit_rxon = iwl_commit_rxon, 2219 .commit_rxon = iwlagn_commit_rxon,
2220 .set_rxon_chain = iwl_set_rxon_chain, 2220 .set_rxon_chain = iwlagn_set_rxon_chain,
2221 .send_bt_config = iwl_send_bt_config, 2221 .send_bt_config = iwl_send_bt_config,
2222}; 2222};
2223 2223
2224static void iwl4965_post_scan(struct iwl_priv *priv)
2225{
2226 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
2227
2228 /*
2229 * Since setting the RXON may have been deferred while
2230 * performing the scan, fire one off if needed
2231 */
2232 if (memcmp(&ctx->staging, &ctx->active, sizeof(ctx->staging)))
2233 iwlcore_commit_rxon(priv, ctx);
2234}
2235
2224static struct iwl_hcmd_utils_ops iwl4965_hcmd_utils = { 2236static struct iwl_hcmd_utils_ops iwl4965_hcmd_utils = {
2225 .get_hcmd_size = iwl4965_get_hcmd_size, 2237 .get_hcmd_size = iwl4965_get_hcmd_size,
2226 .build_addsta_hcmd = iwl4965_build_addsta_hcmd, 2238 .build_addsta_hcmd = iwl4965_build_addsta_hcmd,
@@ -2229,6 +2241,7 @@ static struct iwl_hcmd_utils_ops iwl4965_hcmd_utils = {
2229 .tx_cmd_protection = iwlcore_tx_cmd_protection, 2241 .tx_cmd_protection = iwlcore_tx_cmd_protection,
2230 .calc_rssi = iwl4965_calc_rssi, 2242 .calc_rssi = iwl4965_calc_rssi,
2231 .request_scan = iwlagn_request_scan, 2243 .request_scan = iwlagn_request_scan,
2244 .post_scan = iwl4965_post_scan,
2232}; 2245};
2233 2246
2234static struct iwl_lib_ops iwl4965_lib = { 2247static struct iwl_lib_ops iwl4965_lib = {
@@ -2253,9 +2266,7 @@ static struct iwl_lib_ops iwl4965_lib = {
2253 .set_channel_switch = iwl4965_hw_channel_switch, 2266 .set_channel_switch = iwl4965_hw_channel_switch,
2254 .apm_ops = { 2267 .apm_ops = {
2255 .init = iwl_apm_init, 2268 .init = iwl_apm_init,
2256 .stop = iwl_apm_stop,
2257 .config = iwl4965_nic_config, 2269 .config = iwl4965_nic_config,
2258 .set_pwr_src = iwl_set_pwr_src,
2259 }, 2270 },
2260 .eeprom_ops = { 2271 .eeprom_ops = {
2261 .regulatory_bands = { 2272 .regulatory_bands = {
@@ -2267,7 +2278,6 @@ static struct iwl_lib_ops iwl4965_lib = {
2267 EEPROM_4965_REGULATORY_BAND_24_HT40_CHANNELS, 2278 EEPROM_4965_REGULATORY_BAND_24_HT40_CHANNELS,
2268 EEPROM_4965_REGULATORY_BAND_52_HT40_CHANNELS 2279 EEPROM_4965_REGULATORY_BAND_52_HT40_CHANNELS
2269 }, 2280 },
2270 .verify_signature = iwlcore_eeprom_verify_signature,
2271 .acquire_semaphore = iwlcore_eeprom_acquire_semaphore, 2281 .acquire_semaphore = iwlcore_eeprom_acquire_semaphore,
2272 .release_semaphore = iwlcore_eeprom_release_semaphore, 2282 .release_semaphore = iwlcore_eeprom_release_semaphore,
2273 .calib_version = iwl4965_eeprom_calib_version, 2283 .calib_version = iwl4965_eeprom_calib_version,
@@ -2280,7 +2290,6 @@ static struct iwl_lib_ops iwl4965_lib = {
2280 .isr = iwl_isr_legacy, 2290 .isr = iwl_isr_legacy,
2281 .temp_ops = { 2291 .temp_ops = {
2282 .temperature = iwl4965_temperature_calib, 2292 .temperature = iwl4965_temperature_calib,
2283 .set_ct_kill = iwl4965_set_ct_threshold,
2284 }, 2293 },
2285 .manage_ibss_station = iwlagn_manage_ibss_station, 2294 .manage_ibss_station = iwlagn_manage_ibss_station,
2286 .update_bcast_stations = iwl_update_bcast_stations, 2295 .update_bcast_stations = iwl_update_bcast_stations,
@@ -2289,6 +2298,7 @@ static struct iwl_lib_ops iwl4965_lib = {
2289 .tx_stats_read = iwl_ucode_tx_stats_read, 2298 .tx_stats_read = iwl_ucode_tx_stats_read,
2290 .general_stats_read = iwl_ucode_general_stats_read, 2299 .general_stats_read = iwl_ucode_general_stats_read,
2291 .bt_stats_read = iwl_ucode_bt_stats_read, 2300 .bt_stats_read = iwl_ucode_bt_stats_read,
2301 .reply_tx_error = iwl_reply_tx_error_read,
2292 }, 2302 },
2293 .recover_from_tx_stall = iwl_bg_monitor_recover, 2303 .recover_from_tx_stall = iwl_bg_monitor_recover,
2294 .check_plcp_health = iwl_good_plcp_health, 2304 .check_plcp_health = iwl_good_plcp_health,
@@ -2301,26 +2311,14 @@ static const struct iwl_ops iwl4965_ops = {
2301 .led = &iwlagn_led_ops, 2311 .led = &iwlagn_led_ops,
2302}; 2312};
2303 2313
2304struct iwl_cfg iwl4965_agn_cfg = { 2314static struct iwl_base_params iwl4965_base_params = {
2305 .name = "Intel(R) Wireless WiFi Link 4965AGN",
2306 .fw_name_pre = IWL4965_FW_PRE,
2307 .ucode_api_max = IWL4965_UCODE_API_MAX,
2308 .ucode_api_min = IWL4965_UCODE_API_MIN,
2309 .sku = IWL_SKU_A|IWL_SKU_G|IWL_SKU_N,
2310 .eeprom_size = IWL4965_EEPROM_IMG_SIZE, 2315 .eeprom_size = IWL4965_EEPROM_IMG_SIZE,
2311 .eeprom_ver = EEPROM_4965_EEPROM_VERSION,
2312 .eeprom_calib_ver = EEPROM_4965_TX_POWER_VERSION,
2313 .ops = &iwl4965_ops,
2314 .num_of_queues = IWL49_NUM_QUEUES, 2316 .num_of_queues = IWL49_NUM_QUEUES,
2315 .num_of_ampdu_queues = IWL49_NUM_AMPDU_QUEUES, 2317 .num_of_ampdu_queues = IWL49_NUM_AMPDU_QUEUES,
2316 .mod_params = &iwlagn_mod_params,
2317 .valid_tx_ant = ANT_AB,
2318 .valid_rx_ant = ANT_ABC,
2319 .pll_cfg_val = 0, 2318 .pll_cfg_val = 0,
2320 .set_l0s = true, 2319 .set_l0s = true,
2321 .use_bsm = true, 2320 .use_bsm = true,
2322 .use_isr_legacy = true, 2321 .use_isr_legacy = true,
2323 .ht_greenfield_support = false,
2324 .broken_powersave = true, 2322 .broken_powersave = true,
2325 .led_compensation = 61, 2323 .led_compensation = 61,
2326 .chain_noise_num_beacons = IWL4965_CAL_NUM_BEACONS, 2324 .chain_noise_num_beacons = IWL4965_CAL_NUM_BEACONS,
@@ -2332,6 +2330,21 @@ struct iwl_cfg iwl4965_agn_cfg = {
2332 .ucode_tracing = true, 2330 .ucode_tracing = true,
2333 .sensitivity_calib_by_driver = true, 2331 .sensitivity_calib_by_driver = true,
2334 .chain_noise_calib_by_driver = true, 2332 .chain_noise_calib_by_driver = true,
2333};
2334
2335struct iwl_cfg iwl4965_agn_cfg = {
2336 .name = "Intel(R) Wireless WiFi Link 4965AGN",
2337 .fw_name_pre = IWL4965_FW_PRE,
2338 .ucode_api_max = IWL4965_UCODE_API_MAX,
2339 .ucode_api_min = IWL4965_UCODE_API_MIN,
2340 .sku = IWL_SKU_A|IWL_SKU_G|IWL_SKU_N,
2341 .valid_tx_ant = ANT_AB,
2342 .valid_rx_ant = ANT_ABC,
2343 .eeprom_ver = EEPROM_4965_EEPROM_VERSION,
2344 .eeprom_calib_ver = EEPROM_4965_TX_POWER_VERSION,
2345 .ops = &iwl4965_ops,
2346 .mod_params = &iwlagn_mod_params,
2347 .base_params = &iwl4965_base_params,
2335 /* 2348 /*
2336 * Force use of chains B and C for scan RX on 5 GHz band 2349 * Force use of chains B and C for scan RX on 5 GHz band
2337 * because the device has off-channel reception on chain A. 2350 * because the device has off-channel reception on chain A.
diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c
index 1dbb1246c083..fd9fbc93ea1b 100644
--- a/drivers/net/wireless/iwlwifi/iwl-5000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-5000.c
@@ -170,13 +170,13 @@ static int iwl5000_hw_set_hw_params(struct iwl_priv *priv)
170{ 170{
171 if (priv->cfg->mod_params->num_of_queues >= IWL_MIN_NUM_QUEUES && 171 if (priv->cfg->mod_params->num_of_queues >= IWL_MIN_NUM_QUEUES &&
172 priv->cfg->mod_params->num_of_queues <= IWLAGN_NUM_QUEUES) 172 priv->cfg->mod_params->num_of_queues <= IWLAGN_NUM_QUEUES)
173 priv->cfg->num_of_queues = 173 priv->cfg->base_params->num_of_queues =
174 priv->cfg->mod_params->num_of_queues; 174 priv->cfg->mod_params->num_of_queues;
175 175
176 priv->hw_params.max_txq_num = priv->cfg->num_of_queues; 176 priv->hw_params.max_txq_num = priv->cfg->base_params->num_of_queues;
177 priv->hw_params.dma_chnl_num = FH50_TCSR_CHNL_NUM; 177 priv->hw_params.dma_chnl_num = FH50_TCSR_CHNL_NUM;
178 priv->hw_params.scd_bc_tbls_size = 178 priv->hw_params.scd_bc_tbls_size =
179 priv->cfg->num_of_queues * 179 priv->cfg->base_params->num_of_queues *
180 sizeof(struct iwlagn_scd_bc_tbl); 180 sizeof(struct iwlagn_scd_bc_tbl);
181 priv->hw_params.tfd_size = sizeof(struct iwl_tfd); 181 priv->hw_params.tfd_size = sizeof(struct iwl_tfd);
182 priv->hw_params.max_stations = IWLAGN_STATION_COUNT; 182 priv->hw_params.max_stations = IWLAGN_STATION_COUNT;
@@ -195,8 +195,7 @@ static int iwl5000_hw_set_hw_params(struct iwl_priv *priv)
195 priv->hw_params.valid_tx_ant = priv->cfg->valid_tx_ant; 195 priv->hw_params.valid_tx_ant = priv->cfg->valid_tx_ant;
196 priv->hw_params.valid_rx_ant = priv->cfg->valid_rx_ant; 196 priv->hw_params.valid_rx_ant = priv->cfg->valid_rx_ant;
197 197
198 if (priv->cfg->ops->lib->temp_ops.set_ct_kill) 198 iwl5000_set_ct_threshold(priv);
199 priv->cfg->ops->lib->temp_ops.set_ct_kill(priv);
200 199
201 /* Set initial sensitivity parameters */ 200 /* Set initial sensitivity parameters */
202 /* Set initial calibration set */ 201 /* Set initial calibration set */
@@ -217,13 +216,13 @@ static int iwl5150_hw_set_hw_params(struct iwl_priv *priv)
217{ 216{
218 if (priv->cfg->mod_params->num_of_queues >= IWL_MIN_NUM_QUEUES && 217 if (priv->cfg->mod_params->num_of_queues >= IWL_MIN_NUM_QUEUES &&
219 priv->cfg->mod_params->num_of_queues <= IWLAGN_NUM_QUEUES) 218 priv->cfg->mod_params->num_of_queues <= IWLAGN_NUM_QUEUES)
220 priv->cfg->num_of_queues = 219 priv->cfg->base_params->num_of_queues =
221 priv->cfg->mod_params->num_of_queues; 220 priv->cfg->mod_params->num_of_queues;
222 221
223 priv->hw_params.max_txq_num = priv->cfg->num_of_queues; 222 priv->hw_params.max_txq_num = priv->cfg->base_params->num_of_queues;
224 priv->hw_params.dma_chnl_num = FH50_TCSR_CHNL_NUM; 223 priv->hw_params.dma_chnl_num = FH50_TCSR_CHNL_NUM;
225 priv->hw_params.scd_bc_tbls_size = 224 priv->hw_params.scd_bc_tbls_size =
226 priv->cfg->num_of_queues * 225 priv->cfg->base_params->num_of_queues *
227 sizeof(struct iwlagn_scd_bc_tbl); 226 sizeof(struct iwlagn_scd_bc_tbl);
228 priv->hw_params.tfd_size = sizeof(struct iwl_tfd); 227 priv->hw_params.tfd_size = sizeof(struct iwl_tfd);
229 priv->hw_params.max_stations = IWLAGN_STATION_COUNT; 228 priv->hw_params.max_stations = IWLAGN_STATION_COUNT;
@@ -242,8 +241,7 @@ static int iwl5150_hw_set_hw_params(struct iwl_priv *priv)
242 priv->hw_params.valid_tx_ant = priv->cfg->valid_tx_ant; 241 priv->hw_params.valid_tx_ant = priv->cfg->valid_tx_ant;
243 priv->hw_params.valid_rx_ant = priv->cfg->valid_rx_ant; 242 priv->hw_params.valid_rx_ant = priv->cfg->valid_rx_ant;
244 243
245 if (priv->cfg->ops->lib->temp_ops.set_ct_kill) 244 iwl5150_set_ct_threshold(priv);
246 priv->cfg->ops->lib->temp_ops.set_ct_kill(priv);
247 245
248 /* Set initial sensitivity parameters */ 246 /* Set initial sensitivity parameters */
249 /* Set initial calibration set */ 247 /* Set initial calibration set */
@@ -370,9 +368,7 @@ static struct iwl_lib_ops iwl5000_lib = {
370 .set_channel_switch = iwl5000_hw_channel_switch, 368 .set_channel_switch = iwl5000_hw_channel_switch,
371 .apm_ops = { 369 .apm_ops = {
372 .init = iwl_apm_init, 370 .init = iwl_apm_init,
373 .stop = iwl_apm_stop,
374 .config = iwl5000_nic_config, 371 .config = iwl5000_nic_config,
375 .set_pwr_src = iwl_set_pwr_src,
376 }, 372 },
377 .eeprom_ops = { 373 .eeprom_ops = {
378 .regulatory_bands = { 374 .regulatory_bands = {
@@ -384,7 +380,6 @@ static struct iwl_lib_ops iwl5000_lib = {
384 EEPROM_REG_BAND_24_HT40_CHANNELS, 380 EEPROM_REG_BAND_24_HT40_CHANNELS,
385 EEPROM_REG_BAND_52_HT40_CHANNELS 381 EEPROM_REG_BAND_52_HT40_CHANNELS
386 }, 382 },
387 .verify_signature = iwlcore_eeprom_verify_signature,
388 .acquire_semaphore = iwlcore_eeprom_acquire_semaphore, 383 .acquire_semaphore = iwlcore_eeprom_acquire_semaphore,
389 .release_semaphore = iwlcore_eeprom_release_semaphore, 384 .release_semaphore = iwlcore_eeprom_release_semaphore,
390 .calib_version = iwlagn_eeprom_calib_version, 385 .calib_version = iwlagn_eeprom_calib_version,
@@ -395,7 +390,6 @@ static struct iwl_lib_ops iwl5000_lib = {
395 .config_ap = iwl_config_ap, 390 .config_ap = iwl_config_ap,
396 .temp_ops = { 391 .temp_ops = {
397 .temperature = iwlagn_temperature, 392 .temperature = iwlagn_temperature,
398 .set_ct_kill = iwl5000_set_ct_threshold,
399 }, 393 },
400 .manage_ibss_station = iwlagn_manage_ibss_station, 394 .manage_ibss_station = iwlagn_manage_ibss_station,
401 .update_bcast_stations = iwl_update_bcast_stations, 395 .update_bcast_stations = iwl_update_bcast_stations,
@@ -404,6 +398,7 @@ static struct iwl_lib_ops iwl5000_lib = {
404 .tx_stats_read = iwl_ucode_tx_stats_read, 398 .tx_stats_read = iwl_ucode_tx_stats_read,
405 .general_stats_read = iwl_ucode_general_stats_read, 399 .general_stats_read = iwl_ucode_general_stats_read,
406 .bt_stats_read = iwl_ucode_bt_stats_read, 400 .bt_stats_read = iwl_ucode_bt_stats_read,
401 .reply_tx_error = iwl_reply_tx_error_read,
407 }, 402 },
408 .recover_from_tx_stall = iwl_bg_monitor_recover, 403 .recover_from_tx_stall = iwl_bg_monitor_recover,
409 .check_plcp_health = iwl_good_plcp_health, 404 .check_plcp_health = iwl_good_plcp_health,
@@ -441,9 +436,7 @@ static struct iwl_lib_ops iwl5150_lib = {
441 .set_channel_switch = iwl5000_hw_channel_switch, 436 .set_channel_switch = iwl5000_hw_channel_switch,
442 .apm_ops = { 437 .apm_ops = {
443 .init = iwl_apm_init, 438 .init = iwl_apm_init,
444 .stop = iwl_apm_stop,
445 .config = iwl5000_nic_config, 439 .config = iwl5000_nic_config,
446 .set_pwr_src = iwl_set_pwr_src,
447 }, 440 },
448 .eeprom_ops = { 441 .eeprom_ops = {
449 .regulatory_bands = { 442 .regulatory_bands = {
@@ -455,7 +448,6 @@ static struct iwl_lib_ops iwl5150_lib = {
455 EEPROM_REG_BAND_24_HT40_CHANNELS, 448 EEPROM_REG_BAND_24_HT40_CHANNELS,
456 EEPROM_REG_BAND_52_HT40_CHANNELS 449 EEPROM_REG_BAND_52_HT40_CHANNELS
457 }, 450 },
458 .verify_signature = iwlcore_eeprom_verify_signature,
459 .acquire_semaphore = iwlcore_eeprom_acquire_semaphore, 451 .acquire_semaphore = iwlcore_eeprom_acquire_semaphore,
460 .release_semaphore = iwlcore_eeprom_release_semaphore, 452 .release_semaphore = iwlcore_eeprom_release_semaphore,
461 .calib_version = iwlagn_eeprom_calib_version, 453 .calib_version = iwlagn_eeprom_calib_version,
@@ -466,7 +458,6 @@ static struct iwl_lib_ops iwl5150_lib = {
466 .config_ap = iwl_config_ap, 458 .config_ap = iwl_config_ap,
467 .temp_ops = { 459 .temp_ops = {
468 .temperature = iwl5150_temperature, 460 .temperature = iwl5150_temperature,
469 .set_ct_kill = iwl5150_set_ct_threshold,
470 }, 461 },
471 .manage_ibss_station = iwlagn_manage_ibss_station, 462 .manage_ibss_station = iwlagn_manage_ibss_station,
472 .update_bcast_stations = iwl_update_bcast_stations, 463 .update_bcast_stations = iwl_update_bcast_stations,
@@ -474,6 +465,8 @@ static struct iwl_lib_ops iwl5150_lib = {
474 .rx_stats_read = iwl_ucode_rx_stats_read, 465 .rx_stats_read = iwl_ucode_rx_stats_read,
475 .tx_stats_read = iwl_ucode_tx_stats_read, 466 .tx_stats_read = iwl_ucode_tx_stats_read,
476 .general_stats_read = iwl_ucode_general_stats_read, 467 .general_stats_read = iwl_ucode_general_stats_read,
468 .bt_stats_read = iwl_ucode_bt_stats_read,
469 .reply_tx_error = iwl_reply_tx_error_read,
477 }, 470 },
478 .recover_from_tx_stall = iwl_bg_monitor_recover, 471 .recover_from_tx_stall = iwl_bg_monitor_recover,
479 .check_plcp_health = iwl_good_plcp_health, 472 .check_plcp_health = iwl_good_plcp_health,
@@ -501,27 +494,14 @@ static const struct iwl_ops iwl5150_ops = {
501 .led = &iwlagn_led_ops, 494 .led = &iwlagn_led_ops,
502}; 495};
503 496
504struct iwl_cfg iwl5300_agn_cfg = { 497static struct iwl_base_params iwl5000_base_params = {
505 .name = "Intel(R) Ultimate N WiFi Link 5300 AGN",
506 .fw_name_pre = IWL5000_FW_PRE,
507 .ucode_api_max = IWL5000_UCODE_API_MAX,
508 .ucode_api_min = IWL5000_UCODE_API_MIN,
509 .sku = IWL_SKU_A|IWL_SKU_G|IWL_SKU_N,
510 .ops = &iwl5000_ops,
511 .eeprom_size = IWLAGN_EEPROM_IMG_SIZE, 498 .eeprom_size = IWLAGN_EEPROM_IMG_SIZE,
512 .eeprom_ver = EEPROM_5000_EEPROM_VERSION,
513 .eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION,
514 .num_of_queues = IWLAGN_NUM_QUEUES, 499 .num_of_queues = IWLAGN_NUM_QUEUES,
515 .num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES, 500 .num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES,
516 .mod_params = &iwlagn_mod_params,
517 .valid_tx_ant = ANT_ABC,
518 .valid_rx_ant = ANT_ABC,
519 .pll_cfg_val = CSR50_ANA_PLL_CFG_VAL, 501 .pll_cfg_val = CSR50_ANA_PLL_CFG_VAL,
520 .set_l0s = true, 502 .set_l0s = true,
521 .use_bsm = false, 503 .use_bsm = false,
522 .ht_greenfield_support = true,
523 .led_compensation = 51, 504 .led_compensation = 51,
524 .use_rts_for_aggregation = true, /* use rts/cts protection */
525 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, 505 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
526 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF, 506 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF,
527 .chain_noise_scale = 1000, 507 .chain_noise_scale = 1000,
@@ -531,6 +511,26 @@ struct iwl_cfg iwl5300_agn_cfg = {
531 .sensitivity_calib_by_driver = true, 511 .sensitivity_calib_by_driver = true,
532 .chain_noise_calib_by_driver = true, 512 .chain_noise_calib_by_driver = true,
533}; 513};
514static struct iwl_ht_params iwl5000_ht_params = {
515 .ht_greenfield_support = true,
516 .use_rts_for_aggregation = true, /* use rts/cts protection */
517};
518
519struct iwl_cfg iwl5300_agn_cfg = {
520 .name = "Intel(R) Ultimate N WiFi Link 5300 AGN",
521 .fw_name_pre = IWL5000_FW_PRE,
522 .ucode_api_max = IWL5000_UCODE_API_MAX,
523 .ucode_api_min = IWL5000_UCODE_API_MIN,
524 .sku = IWL_SKU_A|IWL_SKU_G|IWL_SKU_N,
525 .valid_tx_ant = ANT_ABC,
526 .valid_rx_ant = ANT_ABC,
527 .eeprom_ver = EEPROM_5000_EEPROM_VERSION,
528 .eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION,
529 .ops = &iwl5000_ops,
530 .mod_params = &iwlagn_mod_params,
531 .base_params = &iwl5000_base_params,
532 .ht_params = &iwl5000_ht_params,
533};
534 534
535struct iwl_cfg iwl5100_bgn_cfg = { 535struct iwl_cfg iwl5100_bgn_cfg = {
536 .name = "Intel(R) WiFi Link 5100 BGN", 536 .name = "Intel(R) WiFi Link 5100 BGN",
@@ -538,29 +538,14 @@ struct iwl_cfg iwl5100_bgn_cfg = {
538 .ucode_api_max = IWL5000_UCODE_API_MAX, 538 .ucode_api_max = IWL5000_UCODE_API_MAX,
539 .ucode_api_min = IWL5000_UCODE_API_MIN, 539 .ucode_api_min = IWL5000_UCODE_API_MIN,
540 .sku = IWL_SKU_G|IWL_SKU_N, 540 .sku = IWL_SKU_G|IWL_SKU_N,
541 .ops = &iwl5000_ops, 541 .valid_tx_ant = ANT_B,
542 .eeprom_size = IWLAGN_EEPROM_IMG_SIZE, 542 .valid_rx_ant = ANT_AB,
543 .eeprom_ver = EEPROM_5000_EEPROM_VERSION, 543 .eeprom_ver = EEPROM_5000_EEPROM_VERSION,
544 .eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION, 544 .eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION,
545 .num_of_queues = IWLAGN_NUM_QUEUES, 545 .ops = &iwl5000_ops,
546 .num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES,
547 .mod_params = &iwlagn_mod_params, 546 .mod_params = &iwlagn_mod_params,
548 .valid_tx_ant = ANT_B, 547 .base_params = &iwl5000_base_params,
549 .valid_rx_ant = ANT_AB, 548 .ht_params = &iwl5000_ht_params,
550 .pll_cfg_val = CSR50_ANA_PLL_CFG_VAL,
551 .set_l0s = true,
552 .use_bsm = false,
553 .ht_greenfield_support = true,
554 .led_compensation = 51,
555 .use_rts_for_aggregation = true, /* use rts/cts protection */
556 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
557 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF,
558 .chain_noise_scale = 1000,
559 .monitor_recover_period = IWL_LONG_MONITORING_PERIOD,
560 .max_event_log_size = 512,
561 .ucode_tracing = true,
562 .sensitivity_calib_by_driver = true,
563 .chain_noise_calib_by_driver = true,
564}; 549};
565 550
566struct iwl_cfg iwl5100_abg_cfg = { 551struct iwl_cfg iwl5100_abg_cfg = {
@@ -569,27 +554,13 @@ struct iwl_cfg iwl5100_abg_cfg = {
569 .ucode_api_max = IWL5000_UCODE_API_MAX, 554 .ucode_api_max = IWL5000_UCODE_API_MAX,
570 .ucode_api_min = IWL5000_UCODE_API_MIN, 555 .ucode_api_min = IWL5000_UCODE_API_MIN,
571 .sku = IWL_SKU_A|IWL_SKU_G, 556 .sku = IWL_SKU_A|IWL_SKU_G,
572 .ops = &iwl5000_ops, 557 .valid_tx_ant = ANT_B,
573 .eeprom_size = IWLAGN_EEPROM_IMG_SIZE, 558 .valid_rx_ant = ANT_AB,
574 .eeprom_ver = EEPROM_5000_EEPROM_VERSION, 559 .eeprom_ver = EEPROM_5000_EEPROM_VERSION,
575 .eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION, 560 .eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION,
576 .num_of_queues = IWLAGN_NUM_QUEUES, 561 .ops = &iwl5000_ops,
577 .num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES,
578 .mod_params = &iwlagn_mod_params, 562 .mod_params = &iwlagn_mod_params,
579 .valid_tx_ant = ANT_B, 563 .base_params = &iwl5000_base_params,
580 .valid_rx_ant = ANT_AB,
581 .pll_cfg_val = CSR50_ANA_PLL_CFG_VAL,
582 .set_l0s = true,
583 .use_bsm = false,
584 .led_compensation = 51,
585 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
586 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF,
587 .chain_noise_scale = 1000,
588 .monitor_recover_period = IWL_LONG_MONITORING_PERIOD,
589 .max_event_log_size = 512,
590 .ucode_tracing = true,
591 .sensitivity_calib_by_driver = true,
592 .chain_noise_calib_by_driver = true,
593}; 564};
594 565
595struct iwl_cfg iwl5100_agn_cfg = { 566struct iwl_cfg iwl5100_agn_cfg = {
@@ -598,29 +569,14 @@ struct iwl_cfg iwl5100_agn_cfg = {
598 .ucode_api_max = IWL5000_UCODE_API_MAX, 569 .ucode_api_max = IWL5000_UCODE_API_MAX,
599 .ucode_api_min = IWL5000_UCODE_API_MIN, 570 .ucode_api_min = IWL5000_UCODE_API_MIN,
600 .sku = IWL_SKU_A|IWL_SKU_G|IWL_SKU_N, 571 .sku = IWL_SKU_A|IWL_SKU_G|IWL_SKU_N,
601 .ops = &iwl5000_ops, 572 .valid_tx_ant = ANT_B,
602 .eeprom_size = IWLAGN_EEPROM_IMG_SIZE, 573 .valid_rx_ant = ANT_AB,
603 .eeprom_ver = EEPROM_5000_EEPROM_VERSION, 574 .eeprom_ver = EEPROM_5000_EEPROM_VERSION,
604 .eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION, 575 .eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION,
605 .num_of_queues = IWLAGN_NUM_QUEUES, 576 .ops = &iwl5000_ops,
606 .num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES,
607 .mod_params = &iwlagn_mod_params, 577 .mod_params = &iwlagn_mod_params,
608 .valid_tx_ant = ANT_B, 578 .base_params = &iwl5000_base_params,
609 .valid_rx_ant = ANT_AB, 579 .ht_params = &iwl5000_ht_params,
610 .pll_cfg_val = CSR50_ANA_PLL_CFG_VAL,
611 .set_l0s = true,
612 .use_bsm = false,
613 .ht_greenfield_support = true,
614 .led_compensation = 51,
615 .use_rts_for_aggregation = true, /* use rts/cts protection */
616 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
617 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF,
618 .chain_noise_scale = 1000,
619 .monitor_recover_period = IWL_LONG_MONITORING_PERIOD,
620 .max_event_log_size = 512,
621 .ucode_tracing = true,
622 .sensitivity_calib_by_driver = true,
623 .chain_noise_calib_by_driver = true,
624}; 580};
625 581
626struct iwl_cfg iwl5350_agn_cfg = { 582struct iwl_cfg iwl5350_agn_cfg = {
@@ -629,29 +585,14 @@ struct iwl_cfg iwl5350_agn_cfg = {
629 .ucode_api_max = IWL5000_UCODE_API_MAX, 585 .ucode_api_max = IWL5000_UCODE_API_MAX,
630 .ucode_api_min = IWL5000_UCODE_API_MIN, 586 .ucode_api_min = IWL5000_UCODE_API_MIN,
631 .sku = IWL_SKU_A|IWL_SKU_G|IWL_SKU_N, 587 .sku = IWL_SKU_A|IWL_SKU_G|IWL_SKU_N,
632 .ops = &iwl5000_ops, 588 .valid_tx_ant = ANT_ABC,
633 .eeprom_size = IWLAGN_EEPROM_IMG_SIZE, 589 .valid_rx_ant = ANT_ABC,
634 .eeprom_ver = EEPROM_5050_EEPROM_VERSION, 590 .eeprom_ver = EEPROM_5050_EEPROM_VERSION,
635 .eeprom_calib_ver = EEPROM_5050_TX_POWER_VERSION, 591 .eeprom_calib_ver = EEPROM_5050_TX_POWER_VERSION,
636 .num_of_queues = IWLAGN_NUM_QUEUES, 592 .ops = &iwl5000_ops,
637 .num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES,
638 .mod_params = &iwlagn_mod_params, 593 .mod_params = &iwlagn_mod_params,
639 .valid_tx_ant = ANT_ABC, 594 .base_params = &iwl5000_base_params,
640 .valid_rx_ant = ANT_ABC, 595 .ht_params = &iwl5000_ht_params,
641 .pll_cfg_val = CSR50_ANA_PLL_CFG_VAL,
642 .set_l0s = true,
643 .use_bsm = false,
644 .ht_greenfield_support = true,
645 .led_compensation = 51,
646 .use_rts_for_aggregation = true, /* use rts/cts protection */
647 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
648 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF,
649 .chain_noise_scale = 1000,
650 .monitor_recover_period = IWL_LONG_MONITORING_PERIOD,
651 .max_event_log_size = 512,
652 .ucode_tracing = true,
653 .sensitivity_calib_by_driver = true,
654 .chain_noise_calib_by_driver = true,
655}; 596};
656 597
657struct iwl_cfg iwl5150_agn_cfg = { 598struct iwl_cfg iwl5150_agn_cfg = {
@@ -660,29 +601,14 @@ struct iwl_cfg iwl5150_agn_cfg = {
660 .ucode_api_max = IWL5150_UCODE_API_MAX, 601 .ucode_api_max = IWL5150_UCODE_API_MAX,
661 .ucode_api_min = IWL5150_UCODE_API_MIN, 602 .ucode_api_min = IWL5150_UCODE_API_MIN,
662 .sku = IWL_SKU_A|IWL_SKU_G|IWL_SKU_N, 603 .sku = IWL_SKU_A|IWL_SKU_G|IWL_SKU_N,
663 .ops = &iwl5150_ops, 604 .valid_tx_ant = ANT_A,
664 .eeprom_size = IWLAGN_EEPROM_IMG_SIZE, 605 .valid_rx_ant = ANT_AB,
665 .eeprom_ver = EEPROM_5050_EEPROM_VERSION, 606 .eeprom_ver = EEPROM_5050_EEPROM_VERSION,
666 .eeprom_calib_ver = EEPROM_5050_TX_POWER_VERSION, 607 .eeprom_calib_ver = EEPROM_5050_TX_POWER_VERSION,
667 .num_of_queues = IWLAGN_NUM_QUEUES, 608 .ops = &iwl5150_ops,
668 .num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES,
669 .mod_params = &iwlagn_mod_params, 609 .mod_params = &iwlagn_mod_params,
670 .valid_tx_ant = ANT_A, 610 .base_params = &iwl5000_base_params,
671 .valid_rx_ant = ANT_AB, 611 .ht_params = &iwl5000_ht_params,
672 .pll_cfg_val = CSR50_ANA_PLL_CFG_VAL,
673 .set_l0s = true,
674 .use_bsm = false,
675 .ht_greenfield_support = true,
676 .led_compensation = 51,
677 .use_rts_for_aggregation = true, /* use rts/cts protection */
678 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
679 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF,
680 .chain_noise_scale = 1000,
681 .monitor_recover_period = IWL_LONG_MONITORING_PERIOD,
682 .max_event_log_size = 512,
683 .ucode_tracing = true,
684 .sensitivity_calib_by_driver = true,
685 .chain_noise_calib_by_driver = true,
686 .need_dc_calib = true, 612 .need_dc_calib = true,
687}; 613};
688 614
@@ -692,27 +618,13 @@ struct iwl_cfg iwl5150_abg_cfg = {
692 .ucode_api_max = IWL5150_UCODE_API_MAX, 618 .ucode_api_max = IWL5150_UCODE_API_MAX,
693 .ucode_api_min = IWL5150_UCODE_API_MIN, 619 .ucode_api_min = IWL5150_UCODE_API_MIN,
694 .sku = IWL_SKU_A|IWL_SKU_G, 620 .sku = IWL_SKU_A|IWL_SKU_G,
695 .ops = &iwl5150_ops, 621 .valid_tx_ant = ANT_A,
696 .eeprom_size = IWLAGN_EEPROM_IMG_SIZE, 622 .valid_rx_ant = ANT_AB,
697 .eeprom_ver = EEPROM_5050_EEPROM_VERSION, 623 .eeprom_ver = EEPROM_5050_EEPROM_VERSION,
698 .eeprom_calib_ver = EEPROM_5050_TX_POWER_VERSION, 624 .eeprom_calib_ver = EEPROM_5050_TX_POWER_VERSION,
699 .num_of_queues = IWLAGN_NUM_QUEUES, 625 .ops = &iwl5150_ops,
700 .num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES,
701 .mod_params = &iwlagn_mod_params, 626 .mod_params = &iwlagn_mod_params,
702 .valid_tx_ant = ANT_A, 627 .base_params = &iwl5000_base_params,
703 .valid_rx_ant = ANT_AB,
704 .pll_cfg_val = CSR50_ANA_PLL_CFG_VAL,
705 .set_l0s = true,
706 .use_bsm = false,
707 .led_compensation = 51,
708 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
709 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF,
710 .chain_noise_scale = 1000,
711 .monitor_recover_period = IWL_LONG_MONITORING_PERIOD,
712 .max_event_log_size = 512,
713 .ucode_tracing = true,
714 .sensitivity_calib_by_driver = true,
715 .chain_noise_calib_by_driver = true,
716 .need_dc_calib = true, 628 .need_dc_calib = true,
717}; 629};
718 630
diff --git a/drivers/net/wireless/iwlwifi/iwl-6000.c b/drivers/net/wireless/iwlwifi/iwl-6000.c
index 2fdba088bd27..11e6532fc573 100644
--- a/drivers/net/wireless/iwlwifi/iwl-6000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-6000.c
@@ -51,13 +51,15 @@
51 51
52/* Highest firmware API version supported */ 52/* Highest firmware API version supported */
53#define IWL6000_UCODE_API_MAX 4 53#define IWL6000_UCODE_API_MAX 4
54#define IWL6050_UCODE_API_MAX 4 54#define IWL6050_UCODE_API_MAX 5
55#define IWL6000G2_UCODE_API_MAX 5 55#define IWL6000G2_UCODE_API_MAX 5
56#define IWL130_UCODE_API_MAX 5
56 57
57/* Lowest firmware API version supported */ 58/* Lowest firmware API version supported */
58#define IWL6000_UCODE_API_MIN 4 59#define IWL6000_UCODE_API_MIN 4
59#define IWL6050_UCODE_API_MIN 4 60#define IWL6050_UCODE_API_MIN 4
60#define IWL6000G2_UCODE_API_MIN 4 61#define IWL6000G2_UCODE_API_MIN 4
62#define IWL130_UCODE_API_MIN 5
61 63
62#define IWL6000_FW_PRE "iwlwifi-6000-" 64#define IWL6000_FW_PRE "iwlwifi-6000-"
63#define _IWL6000_MODULE_FIRMWARE(api) IWL6000_FW_PRE #api ".ucode" 65#define _IWL6000_MODULE_FIRMWARE(api) IWL6000_FW_PRE #api ".ucode"
@@ -75,6 +77,9 @@
75#define _IWL6000G2B_MODULE_FIRMWARE(api) IWL6000G2B_FW_PRE #api ".ucode" 77#define _IWL6000G2B_MODULE_FIRMWARE(api) IWL6000G2B_FW_PRE #api ".ucode"
76#define IWL6000G2B_MODULE_FIRMWARE(api) _IWL6000G2B_MODULE_FIRMWARE(api) 78#define IWL6000G2B_MODULE_FIRMWARE(api) _IWL6000G2B_MODULE_FIRMWARE(api)
77 79
80#define IWL130_FW_PRE "iwlwifi-130-"
81#define _IWL130_MODULE_FIRMWARE(api) IWL130_FW_PRE #api ".ucode"
82#define IWL130_MODULE_FIRMWARE(api) _IWL130_MODULE_FIRMWARE(api)
78 83
79static void iwl6000_set_ct_threshold(struct iwl_priv *priv) 84static void iwl6000_set_ct_threshold(struct iwl_priv *priv)
80{ 85{
@@ -83,15 +88,24 @@ static void iwl6000_set_ct_threshold(struct iwl_priv *priv)
83 priv->hw_params.ct_kill_exit_threshold = CT_KILL_EXIT_THRESHOLD; 88 priv->hw_params.ct_kill_exit_threshold = CT_KILL_EXIT_THRESHOLD;
84} 89}
85 90
86/* Indicate calibration version to uCode. */ 91static void iwl6050_additional_nic_config(struct iwl_priv *priv)
87static void iwl6000_set_calib_version(struct iwl_priv *priv)
88{ 92{
89 if (priv->cfg->need_dc_calib && 93 /* Indicate calibration version to uCode. */
90 (priv->cfg->ops->lib->eeprom_ops.calib_version(priv) >= 6)) 94 if (priv->cfg->ops->lib->eeprom_ops.calib_version(priv) >= 6)
91 iwl_set_bit(priv, CSR_GP_DRIVER_REG, 95 iwl_set_bit(priv, CSR_GP_DRIVER_REG,
92 CSR_GP_DRIVER_REG_BIT_CALIB_VERSION6); 96 CSR_GP_DRIVER_REG_BIT_CALIB_VERSION6);
93} 97}
94 98
99static void iwl6050g2_additional_nic_config(struct iwl_priv *priv)
100{
101 /* Indicate calibration version to uCode. */
102 if (priv->cfg->ops->lib->eeprom_ops.calib_version(priv) >= 6)
103 iwl_set_bit(priv, CSR_GP_DRIVER_REG,
104 CSR_GP_DRIVER_REG_BIT_CALIB_VERSION6);
105 iwl_set_bit(priv, CSR_GP_DRIVER_REG,
106 CSR_GP_DRIVER_REG_BIT_6050_1x2);
107}
108
95/* NIC configuration for 6000 series */ 109/* NIC configuration for 6000 series */
96static void iwl6000_nic_config(struct iwl_priv *priv) 110static void iwl6000_nic_config(struct iwl_priv *priv)
97{ 111{
@@ -117,9 +131,11 @@ static void iwl6000_nic_config(struct iwl_priv *priv)
117 iwl_write32(priv, CSR_GP_DRIVER_REG, 131 iwl_write32(priv, CSR_GP_DRIVER_REG,
118 CSR_GP_DRIVER_REG_BIT_RADIO_SKU_2x2_IPA); 132 CSR_GP_DRIVER_REG_BIT_RADIO_SKU_2x2_IPA);
119 } 133 }
120 /* else do nothing, uCode configured */ 134 /* do additional nic configuration if needed */
121 if (priv->cfg->ops->lib->temp_ops.set_calib_version) 135 if (priv->cfg->ops->nic &&
122 priv->cfg->ops->lib->temp_ops.set_calib_version(priv); 136 priv->cfg->ops->nic->additional_nic_config) {
137 priv->cfg->ops->nic->additional_nic_config(priv);
138 }
123} 139}
124 140
125static struct iwl_sensitivity_ranges iwl6000_sensitivity = { 141static struct iwl_sensitivity_ranges iwl6000_sensitivity = {
@@ -151,13 +167,13 @@ static int iwl6000_hw_set_hw_params(struct iwl_priv *priv)
151{ 167{
152 if (priv->cfg->mod_params->num_of_queues >= IWL_MIN_NUM_QUEUES && 168 if (priv->cfg->mod_params->num_of_queues >= IWL_MIN_NUM_QUEUES &&
153 priv->cfg->mod_params->num_of_queues <= IWLAGN_NUM_QUEUES) 169 priv->cfg->mod_params->num_of_queues <= IWLAGN_NUM_QUEUES)
154 priv->cfg->num_of_queues = 170 priv->cfg->base_params->num_of_queues =
155 priv->cfg->mod_params->num_of_queues; 171 priv->cfg->mod_params->num_of_queues;
156 172
157 priv->hw_params.max_txq_num = priv->cfg->num_of_queues; 173 priv->hw_params.max_txq_num = priv->cfg->base_params->num_of_queues;
158 priv->hw_params.dma_chnl_num = FH50_TCSR_CHNL_NUM; 174 priv->hw_params.dma_chnl_num = FH50_TCSR_CHNL_NUM;
159 priv->hw_params.scd_bc_tbls_size = 175 priv->hw_params.scd_bc_tbls_size =
160 priv->cfg->num_of_queues * 176 priv->cfg->base_params->num_of_queues *
161 sizeof(struct iwlagn_scd_bc_tbl); 177 sizeof(struct iwlagn_scd_bc_tbl);
162 priv->hw_params.tfd_size = sizeof(struct iwl_tfd); 178 priv->hw_params.tfd_size = sizeof(struct iwl_tfd);
163 priv->hw_params.max_stations = IWLAGN_STATION_COUNT; 179 priv->hw_params.max_stations = IWLAGN_STATION_COUNT;
@@ -176,8 +192,7 @@ static int iwl6000_hw_set_hw_params(struct iwl_priv *priv)
176 priv->hw_params.valid_tx_ant = priv->cfg->valid_tx_ant; 192 priv->hw_params.valid_tx_ant = priv->cfg->valid_tx_ant;
177 priv->hw_params.valid_rx_ant = priv->cfg->valid_rx_ant; 193 priv->hw_params.valid_rx_ant = priv->cfg->valid_rx_ant;
178 194
179 if (priv->cfg->ops->lib->temp_ops.set_ct_kill) 195 iwl6000_set_ct_threshold(priv);
180 priv->cfg->ops->lib->temp_ops.set_ct_kill(priv);
181 196
182 /* Set initial sensitivity parameters */ 197 /* Set initial sensitivity parameters */
183 /* Set initial calibration set */ 198 /* Set initial calibration set */
@@ -188,7 +203,9 @@ static int iwl6000_hw_set_hw_params(struct iwl_priv *priv)
188 BIT(IWL_CALIB_TX_IQ) | 203 BIT(IWL_CALIB_TX_IQ) |
189 BIT(IWL_CALIB_BASE_BAND); 204 BIT(IWL_CALIB_BASE_BAND);
190 if (priv->cfg->need_dc_calib) 205 if (priv->cfg->need_dc_calib)
191 priv->hw_params.calib_init_cfg |= BIT(IWL_CALIB_DC); 206 priv->hw_params.calib_rt_cfg |= BIT(IWL_CALIB_CFG_DC_IDX);
207 if (priv->cfg->need_temp_offset_calib)
208 priv->hw_params.calib_init_cfg |= BIT(IWL_CALIB_TEMP_OFFSET);
192 209
193 priv->hw_params.beacon_time_tsf_bits = IWLAGN_EXT_BEACON_TIME_POS; 210 priv->hw_params.beacon_time_tsf_bits = IWLAGN_EXT_BEACON_TIME_POS;
194 211
@@ -293,9 +310,7 @@ static struct iwl_lib_ops iwl6000_lib = {
293 .set_channel_switch = iwl6000_hw_channel_switch, 310 .set_channel_switch = iwl6000_hw_channel_switch,
294 .apm_ops = { 311 .apm_ops = {
295 .init = iwl_apm_init, 312 .init = iwl_apm_init,
296 .stop = iwl_apm_stop,
297 .config = iwl6000_nic_config, 313 .config = iwl6000_nic_config,
298 .set_pwr_src = iwl_set_pwr_src,
299 }, 314 },
300 .eeprom_ops = { 315 .eeprom_ops = {
301 .regulatory_bands = { 316 .regulatory_bands = {
@@ -307,7 +322,6 @@ static struct iwl_lib_ops iwl6000_lib = {
307 EEPROM_6000_REG_BAND_24_HT40_CHANNELS, 322 EEPROM_6000_REG_BAND_24_HT40_CHANNELS,
308 EEPROM_REG_BAND_52_HT40_CHANNELS 323 EEPROM_REG_BAND_52_HT40_CHANNELS
309 }, 324 },
310 .verify_signature = iwlcore_eeprom_verify_signature,
311 .acquire_semaphore = iwlcore_eeprom_acquire_semaphore, 325 .acquire_semaphore = iwlcore_eeprom_acquire_semaphore,
312 .release_semaphore = iwlcore_eeprom_release_semaphore, 326 .release_semaphore = iwlcore_eeprom_release_semaphore,
313 .calib_version = iwlagn_eeprom_calib_version, 327 .calib_version = iwlagn_eeprom_calib_version,
@@ -319,8 +333,6 @@ static struct iwl_lib_ops iwl6000_lib = {
319 .config_ap = iwl_config_ap, 333 .config_ap = iwl_config_ap,
320 .temp_ops = { 334 .temp_ops = {
321 .temperature = iwlagn_temperature, 335 .temperature = iwlagn_temperature,
322 .set_ct_kill = iwl6000_set_ct_threshold,
323 .set_calib_version = iwl6000_set_calib_version,
324 }, 336 },
325 .manage_ibss_station = iwlagn_manage_ibss_station, 337 .manage_ibss_station = iwlagn_manage_ibss_station,
326 .update_bcast_stations = iwl_update_bcast_stations, 338 .update_bcast_stations = iwl_update_bcast_stations,
@@ -329,6 +341,7 @@ static struct iwl_lib_ops iwl6000_lib = {
329 .tx_stats_read = iwl_ucode_tx_stats_read, 341 .tx_stats_read = iwl_ucode_tx_stats_read,
330 .general_stats_read = iwl_ucode_general_stats_read, 342 .general_stats_read = iwl_ucode_general_stats_read,
331 .bt_stats_read = iwl_ucode_bt_stats_read, 343 .bt_stats_read = iwl_ucode_bt_stats_read,
344 .reply_tx_error = iwl_reply_tx_error_read,
332 }, 345 },
333 .recover_from_tx_stall = iwl_bg_monitor_recover, 346 .recover_from_tx_stall = iwl_bg_monitor_recover,
334 .check_plcp_health = iwl_good_plcp_health, 347 .check_plcp_health = iwl_good_plcp_health,
@@ -368,9 +381,7 @@ static struct iwl_lib_ops iwl6000g2b_lib = {
368 .set_channel_switch = iwl6000_hw_channel_switch, 381 .set_channel_switch = iwl6000_hw_channel_switch,
369 .apm_ops = { 382 .apm_ops = {
370 .init = iwl_apm_init, 383 .init = iwl_apm_init,
371 .stop = iwl_apm_stop,
372 .config = iwl6000_nic_config, 384 .config = iwl6000_nic_config,
373 .set_pwr_src = iwl_set_pwr_src,
374 }, 385 },
375 .eeprom_ops = { 386 .eeprom_ops = {
376 .regulatory_bands = { 387 .regulatory_bands = {
@@ -382,7 +393,6 @@ static struct iwl_lib_ops iwl6000g2b_lib = {
382 EEPROM_6000_REG_BAND_24_HT40_CHANNELS, 393 EEPROM_6000_REG_BAND_24_HT40_CHANNELS,
383 EEPROM_REG_BAND_52_HT40_CHANNELS 394 EEPROM_REG_BAND_52_HT40_CHANNELS
384 }, 395 },
385 .verify_signature = iwlcore_eeprom_verify_signature,
386 .acquire_semaphore = iwlcore_eeprom_acquire_semaphore, 396 .acquire_semaphore = iwlcore_eeprom_acquire_semaphore,
387 .release_semaphore = iwlcore_eeprom_release_semaphore, 397 .release_semaphore = iwlcore_eeprom_release_semaphore,
388 .calib_version = iwlagn_eeprom_calib_version, 398 .calib_version = iwlagn_eeprom_calib_version,
@@ -394,8 +404,6 @@ static struct iwl_lib_ops iwl6000g2b_lib = {
394 .config_ap = iwl_config_ap, 404 .config_ap = iwl_config_ap,
395 .temp_ops = { 405 .temp_ops = {
396 .temperature = iwlagn_temperature, 406 .temperature = iwlagn_temperature,
397 .set_ct_kill = iwl6000_set_ct_threshold,
398 .set_calib_version = iwl6000_set_calib_version,
399 }, 407 },
400 .manage_ibss_station = iwlagn_manage_ibss_station, 408 .manage_ibss_station = iwlagn_manage_ibss_station,
401 .update_bcast_stations = iwl_update_bcast_stations, 409 .update_bcast_stations = iwl_update_bcast_stations,
@@ -404,6 +412,7 @@ static struct iwl_lib_ops iwl6000g2b_lib = {
404 .tx_stats_read = iwl_ucode_tx_stats_read, 412 .tx_stats_read = iwl_ucode_tx_stats_read,
405 .general_stats_read = iwl_ucode_general_stats_read, 413 .general_stats_read = iwl_ucode_general_stats_read,
406 .bt_stats_read = iwl_ucode_bt_stats_read, 414 .bt_stats_read = iwl_ucode_bt_stats_read,
415 .reply_tx_error = iwl_reply_tx_error_read,
407 }, 416 },
408 .recover_from_tx_stall = iwl_bg_monitor_recover, 417 .recover_from_tx_stall = iwl_bg_monitor_recover,
409 .check_plcp_health = iwl_good_plcp_health, 418 .check_plcp_health = iwl_good_plcp_health,
@@ -417,6 +426,14 @@ static struct iwl_lib_ops iwl6000g2b_lib = {
417 } 426 }
418}; 427};
419 428
429static struct iwl_nic_ops iwl6050_nic_ops = {
430 .additional_nic_config = &iwl6050_additional_nic_config,
431};
432
433static struct iwl_nic_ops iwl6050g2_nic_ops = {
434 .additional_nic_config = &iwl6050g2_additional_nic_config,
435};
436
420static const struct iwl_ops iwl6000_ops = { 437static const struct iwl_ops iwl6000_ops = {
421 .lib = &iwl6000_lib, 438 .lib = &iwl6000_lib,
422 .hcmd = &iwlagn_hcmd, 439 .hcmd = &iwlagn_hcmd,
@@ -424,6 +441,22 @@ static const struct iwl_ops iwl6000_ops = {
424 .led = &iwlagn_led_ops, 441 .led = &iwlagn_led_ops,
425}; 442};
426 443
444static const struct iwl_ops iwl6050_ops = {
445 .lib = &iwl6000_lib,
446 .hcmd = &iwlagn_hcmd,
447 .utils = &iwlagn_hcmd_utils,
448 .led = &iwlagn_led_ops,
449 .nic = &iwl6050_nic_ops,
450};
451
452static const struct iwl_ops iwl6050g2_ops = {
453 .lib = &iwl6000_lib,
454 .hcmd = &iwlagn_hcmd,
455 .utils = &iwlagn_hcmd_utils,
456 .led = &iwlagn_led_ops,
457 .nic = &iwl6050g2_nic_ops,
458};
459
427static const struct iwl_ops iwl6000g2b_ops = { 460static const struct iwl_ops iwl6000g2b_ops = {
428 .lib = &iwl6000g2b_lib, 461 .lib = &iwl6000g2b_lib,
429 .hcmd = &iwlagn_bt_hcmd, 462 .hcmd = &iwlagn_bt_hcmd,
@@ -431,30 +464,16 @@ static const struct iwl_ops iwl6000g2b_ops = {
431 .led = &iwlagn_led_ops, 464 .led = &iwlagn_led_ops,
432}; 465};
433 466
434struct iwl_cfg iwl6000g2a_2agn_cfg = { 467static struct iwl_base_params iwl6000_base_params = {
435 .name = "6000 Series 2x2 AGN Gen2a",
436 .fw_name_pre = IWL6000G2A_FW_PRE,
437 .ucode_api_max = IWL6000G2_UCODE_API_MAX,
438 .ucode_api_min = IWL6000G2_UCODE_API_MIN,
439 .sku = IWL_SKU_A|IWL_SKU_G|IWL_SKU_N,
440 .ops = &iwl6000_ops,
441 .eeprom_size = OTP_LOW_IMAGE_SIZE, 468 .eeprom_size = OTP_LOW_IMAGE_SIZE,
442 .eeprom_ver = EEPROM_6000G2_EEPROM_VERSION,
443 .eeprom_calib_ver = EEPROM_6000G2_TX_POWER_VERSION,
444 .num_of_queues = IWLAGN_NUM_QUEUES, 469 .num_of_queues = IWLAGN_NUM_QUEUES,
445 .num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES, 470 .num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES,
446 .mod_params = &iwlagn_mod_params,
447 .valid_tx_ant = ANT_AB,
448 .valid_rx_ant = ANT_AB,
449 .pll_cfg_val = 0, 471 .pll_cfg_val = 0,
450 .set_l0s = true, 472 .set_l0s = true,
451 .use_bsm = false, 473 .use_bsm = false,
452 .pa_type = IWL_PA_SYSTEM,
453 .max_ll_items = OTP_MAX_LL_ITEMS_6x00, 474 .max_ll_items = OTP_MAX_LL_ITEMS_6x00,
454 .shadow_ram_support = true, 475 .shadow_ram_support = true,
455 .ht_greenfield_support = true,
456 .led_compensation = 51, 476 .led_compensation = 51,
457 .use_rts_for_aggregation = true, /* use rts/cts protection */
458 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, 477 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
459 .supports_idle = true, 478 .supports_idle = true,
460 .adv_thermal_throttle = true, 479 .adv_thermal_throttle = true,
@@ -466,29 +485,16 @@ struct iwl_cfg iwl6000g2a_2agn_cfg = {
466 .ucode_tracing = true, 485 .ucode_tracing = true,
467 .sensitivity_calib_by_driver = true, 486 .sensitivity_calib_by_driver = true,
468 .chain_noise_calib_by_driver = true, 487 .chain_noise_calib_by_driver = true,
469 .need_dc_calib = true,
470}; 488};
471 489
472struct iwl_cfg iwl6000g2a_2abg_cfg = { 490static struct iwl_base_params iwl6050_base_params = {
473 .name = "6000 Series 2x2 ABG Gen2a",
474 .fw_name_pre = IWL6000G2A_FW_PRE,
475 .ucode_api_max = IWL6000G2_UCODE_API_MAX,
476 .ucode_api_min = IWL6000G2_UCODE_API_MIN,
477 .sku = IWL_SKU_A|IWL_SKU_G,
478 .ops = &iwl6000_ops,
479 .eeprom_size = OTP_LOW_IMAGE_SIZE, 491 .eeprom_size = OTP_LOW_IMAGE_SIZE,
480 .eeprom_ver = EEPROM_6000G2_EEPROM_VERSION,
481 .eeprom_calib_ver = EEPROM_6000G2_TX_POWER_VERSION,
482 .num_of_queues = IWLAGN_NUM_QUEUES, 492 .num_of_queues = IWLAGN_NUM_QUEUES,
483 .num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES, 493 .num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES,
484 .mod_params = &iwlagn_mod_params,
485 .valid_tx_ant = ANT_AB,
486 .valid_rx_ant = ANT_AB,
487 .pll_cfg_val = 0, 494 .pll_cfg_val = 0,
488 .set_l0s = true, 495 .set_l0s = true,
489 .use_bsm = false, 496 .use_bsm = false,
490 .pa_type = IWL_PA_SYSTEM, 497 .max_ll_items = OTP_MAX_LL_ITEMS_6x50,
491 .max_ll_items = OTP_MAX_LL_ITEMS_6x00,
492 .shadow_ram_support = true, 498 .shadow_ram_support = true,
493 .led_compensation = 51, 499 .led_compensation = 51,
494 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, 500 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
@@ -496,33 +502,20 @@ struct iwl_cfg iwl6000g2a_2abg_cfg = {
496 .adv_thermal_throttle = true, 502 .adv_thermal_throttle = true,
497 .support_ct_kill_exit = true, 503 .support_ct_kill_exit = true,
498 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, 504 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
499 .chain_noise_scale = 1000, 505 .chain_noise_scale = 1500,
500 .monitor_recover_period = IWL_DEF_MONITORING_PERIOD, 506 .monitor_recover_period = IWL_DEF_MONITORING_PERIOD,
501 .max_event_log_size = 512, 507 .max_event_log_size = 1024,
508 .ucode_tracing = true,
502 .sensitivity_calib_by_driver = true, 509 .sensitivity_calib_by_driver = true,
503 .chain_noise_calib_by_driver = true, 510 .chain_noise_calib_by_driver = true,
504 .need_dc_calib = true,
505}; 511};
506 512static struct iwl_base_params iwl6000_coex_base_params = {
507struct iwl_cfg iwl6000g2a_2bg_cfg = {
508 .name = "6000 Series 2x2 BG Gen2a",
509 .fw_name_pre = IWL6000G2A_FW_PRE,
510 .ucode_api_max = IWL6000G2_UCODE_API_MAX,
511 .ucode_api_min = IWL6000G2_UCODE_API_MIN,
512 .sku = IWL_SKU_G,
513 .ops = &iwl6000_ops,
514 .eeprom_size = OTP_LOW_IMAGE_SIZE, 513 .eeprom_size = OTP_LOW_IMAGE_SIZE,
515 .eeprom_ver = EEPROM_6000G2_EEPROM_VERSION,
516 .eeprom_calib_ver = EEPROM_6000G2_TX_POWER_VERSION,
517 .num_of_queues = IWLAGN_NUM_QUEUES, 514 .num_of_queues = IWLAGN_NUM_QUEUES,
518 .num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES, 515 .num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES,
519 .mod_params = &iwlagn_mod_params,
520 .valid_tx_ant = ANT_AB,
521 .valid_rx_ant = ANT_AB,
522 .pll_cfg_val = 0, 516 .pll_cfg_val = 0,
523 .set_l0s = true, 517 .set_l0s = true,
524 .use_bsm = false, 518 .use_bsm = false,
525 .pa_type = IWL_PA_SYSTEM,
526 .max_ll_items = OTP_MAX_LL_ITEMS_6x00, 519 .max_ll_items = OTP_MAX_LL_ITEMS_6x00,
527 .shadow_ram_support = true, 520 .shadow_ram_support = true,
528 .led_compensation = 51, 521 .led_compensation = 51,
@@ -532,11 +525,76 @@ struct iwl_cfg iwl6000g2a_2bg_cfg = {
532 .support_ct_kill_exit = true, 525 .support_ct_kill_exit = true,
533 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, 526 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
534 .chain_noise_scale = 1000, 527 .chain_noise_scale = 1000,
535 .monitor_recover_period = IWL_DEF_MONITORING_PERIOD, 528 .monitor_recover_period = IWL_LONG_MONITORING_PERIOD,
536 .max_event_log_size = 512, 529 .max_event_log_size = 512,
530 .ucode_tracing = true,
537 .sensitivity_calib_by_driver = true, 531 .sensitivity_calib_by_driver = true,
538 .chain_noise_calib_by_driver = true, 532 .chain_noise_calib_by_driver = true,
533};
534
535static struct iwl_ht_params iwl6000_ht_params = {
536 .ht_greenfield_support = true,
537 .use_rts_for_aggregation = true, /* use rts/cts protection */
538};
539
540static struct iwl_bt_params iwl6000_bt_params = {
541 .bt_statistics = true,
542 /* Due to bluetooth, we transmit 2.4 GHz probes only on antenna A */
543 .advanced_bt_coexist = true,
544 .bt_init_traffic_load = IWL_BT_COEX_TRAFFIC_LOAD_NONE,
545 .bt_prio_boost = IWLAGN_BT_PRIO_BOOST_DEFAULT,
546};
547
548struct iwl_cfg iwl6000g2a_2agn_cfg = {
549 .name = "6000 Series 2x2 AGN Gen2a",
550 .fw_name_pre = IWL6000G2A_FW_PRE,
551 .ucode_api_max = IWL6000G2_UCODE_API_MAX,
552 .ucode_api_min = IWL6000G2_UCODE_API_MIN,
553 .sku = IWL_SKU_A|IWL_SKU_G|IWL_SKU_N,
554 .valid_tx_ant = ANT_AB,
555 .valid_rx_ant = ANT_AB,
556 .eeprom_ver = EEPROM_6000G2_EEPROM_VERSION,
557 .eeprom_calib_ver = EEPROM_6000G2_TX_POWER_VERSION,
558 .ops = &iwl6000_ops,
559 .mod_params = &iwlagn_mod_params,
560 .base_params = &iwl6000_base_params,
561 .ht_params = &iwl6000_ht_params,
562 .need_dc_calib = true,
563 .need_temp_offset_calib = true,
564};
565
566struct iwl_cfg iwl6000g2a_2abg_cfg = {
567 .name = "6000 Series 2x2 ABG Gen2a",
568 .fw_name_pre = IWL6000G2A_FW_PRE,
569 .ucode_api_max = IWL6000G2_UCODE_API_MAX,
570 .ucode_api_min = IWL6000G2_UCODE_API_MIN,
571 .sku = IWL_SKU_A|IWL_SKU_G,
572 .valid_tx_ant = ANT_AB,
573 .valid_rx_ant = ANT_AB,
574 .eeprom_ver = EEPROM_6000G2_EEPROM_VERSION,
575 .eeprom_calib_ver = EEPROM_6000G2_TX_POWER_VERSION,
576 .ops = &iwl6000_ops,
577 .mod_params = &iwlagn_mod_params,
578 .base_params = &iwl6000_base_params,
539 .need_dc_calib = true, 579 .need_dc_calib = true,
580 .need_temp_offset_calib = true,
581};
582
583struct iwl_cfg iwl6000g2a_2bg_cfg = {
584 .name = "6000 Series 2x2 BG Gen2a",
585 .fw_name_pre = IWL6000G2A_FW_PRE,
586 .ucode_api_max = IWL6000G2_UCODE_API_MAX,
587 .ucode_api_min = IWL6000G2_UCODE_API_MIN,
588 .sku = IWL_SKU_G,
589 .valid_tx_ant = ANT_AB,
590 .valid_rx_ant = ANT_AB,
591 .eeprom_ver = EEPROM_6000G2_EEPROM_VERSION,
592 .eeprom_calib_ver = EEPROM_6000G2_TX_POWER_VERSION,
593 .ops = &iwl6000_ops,
594 .mod_params = &iwlagn_mod_params,
595 .base_params = &iwl6000_base_params,
596 .need_dc_calib = true,
597 .need_temp_offset_calib = true,
540}; 598};
541 599
542struct iwl_cfg iwl6000g2b_2agn_cfg = { 600struct iwl_cfg iwl6000g2b_2agn_cfg = {
@@ -545,41 +603,19 @@ struct iwl_cfg iwl6000g2b_2agn_cfg = {
545 .ucode_api_max = IWL6000G2_UCODE_API_MAX, 603 .ucode_api_max = IWL6000G2_UCODE_API_MAX,
546 .ucode_api_min = IWL6000G2_UCODE_API_MIN, 604 .ucode_api_min = IWL6000G2_UCODE_API_MIN,
547 .sku = IWL_SKU_A|IWL_SKU_G|IWL_SKU_N, 605 .sku = IWL_SKU_A|IWL_SKU_G|IWL_SKU_N,
548 .ops = &iwl6000g2b_ops, 606 .valid_tx_ant = ANT_AB,
549 .eeprom_size = OTP_LOW_IMAGE_SIZE, 607 .valid_rx_ant = ANT_AB,
550 .eeprom_ver = EEPROM_6000G2_EEPROM_VERSION, 608 .eeprom_ver = EEPROM_6000G2_EEPROM_VERSION,
551 .eeprom_calib_ver = EEPROM_6000G2_TX_POWER_VERSION, 609 .eeprom_calib_ver = EEPROM_6000G2_TX_POWER_VERSION,
552 .num_of_queues = IWLAGN_NUM_QUEUES, 610 .ops = &iwl6000g2b_ops,
553 .num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES,
554 .mod_params = &iwlagn_mod_params, 611 .mod_params = &iwlagn_mod_params,
555 .valid_tx_ant = ANT_AB, 612 .base_params = &iwl6000_coex_base_params,
556 .valid_rx_ant = ANT_AB, 613 .bt_params = &iwl6000_bt_params,
557 .pll_cfg_val = 0, 614 .ht_params = &iwl6000_ht_params,
558 .set_l0s = true,
559 .use_bsm = false,
560 .pa_type = IWL_PA_SYSTEM,
561 .max_ll_items = OTP_MAX_LL_ITEMS_6x00,
562 .shadow_ram_support = true,
563 .ht_greenfield_support = true,
564 .led_compensation = 51,
565 .use_rts_for_aggregation = true, /* use rts/cts protection */
566 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
567 .supports_idle = true,
568 .adv_thermal_throttle = true,
569 .support_ct_kill_exit = true,
570 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DISABLE,
571 .chain_noise_scale = 1000,
572 .monitor_recover_period = IWL_LONG_MONITORING_PERIOD,
573 .max_event_log_size = 512,
574 .sensitivity_calib_by_driver = true,
575 .chain_noise_calib_by_driver = true,
576 .need_dc_calib = true, 615 .need_dc_calib = true,
577 .bt_statistics = true, 616 .need_temp_offset_calib = true,
578 /* Due to bluetooth, we transmit 2.4 GHz probes only on antenna A */ 617 /* Due to bluetooth, we transmit 2.4 GHz probes only on antenna A */
579 .scan_tx_antennas[IEEE80211_BAND_2GHZ] = ANT_A, 618 .scan_tx_antennas[IEEE80211_BAND_2GHZ] = ANT_A,
580 .advanced_bt_coexist = true,
581 .bt_init_traffic_load = IWL_BT_COEX_TRAFFIC_LOAD_NONE,
582 .bt_prio_boost = IWLAGN_BT_PRIO_BOOST_DEFAULT,
583}; 619};
584 620
585struct iwl_cfg iwl6000g2b_2abg_cfg = { 621struct iwl_cfg iwl6000g2b_2abg_cfg = {
@@ -588,39 +624,18 @@ struct iwl_cfg iwl6000g2b_2abg_cfg = {
588 .ucode_api_max = IWL6000G2_UCODE_API_MAX, 624 .ucode_api_max = IWL6000G2_UCODE_API_MAX,
589 .ucode_api_min = IWL6000G2_UCODE_API_MIN, 625 .ucode_api_min = IWL6000G2_UCODE_API_MIN,
590 .sku = IWL_SKU_A|IWL_SKU_G, 626 .sku = IWL_SKU_A|IWL_SKU_G,
591 .ops = &iwl6000g2b_ops, 627 .valid_tx_ant = ANT_AB,
592 .eeprom_size = OTP_LOW_IMAGE_SIZE, 628 .valid_rx_ant = ANT_AB,
593 .eeprom_ver = EEPROM_6000G2_EEPROM_VERSION, 629 .eeprom_ver = EEPROM_6000G2_EEPROM_VERSION,
594 .eeprom_calib_ver = EEPROM_6000G2_TX_POWER_VERSION, 630 .eeprom_calib_ver = EEPROM_6000G2_TX_POWER_VERSION,
595 .num_of_queues = IWLAGN_NUM_QUEUES, 631 .ops = &iwl6000g2b_ops,
596 .num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES,
597 .mod_params = &iwlagn_mod_params, 632 .mod_params = &iwlagn_mod_params,
598 .valid_tx_ant = ANT_AB, 633 .base_params = &iwl6000_coex_base_params,
599 .valid_rx_ant = ANT_AB, 634 .bt_params = &iwl6000_bt_params,
600 .pll_cfg_val = 0,
601 .set_l0s = true,
602 .use_bsm = false,
603 .pa_type = IWL_PA_SYSTEM,
604 .max_ll_items = OTP_MAX_LL_ITEMS_6x00,
605 .shadow_ram_support = true,
606 .led_compensation = 51,
607 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
608 .supports_idle = true,
609 .adv_thermal_throttle = true,
610 .support_ct_kill_exit = true,
611 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DISABLE,
612 .chain_noise_scale = 1000,
613 .monitor_recover_period = IWL_LONG_MONITORING_PERIOD,
614 .max_event_log_size = 512,
615 .sensitivity_calib_by_driver = true,
616 .chain_noise_calib_by_driver = true,
617 .need_dc_calib = true, 635 .need_dc_calib = true,
618 .bt_statistics = true, 636 .need_temp_offset_calib = true,
619 /* Due to bluetooth, we transmit 2.4 GHz probes only on antenna A */ 637 /* Due to bluetooth, we transmit 2.4 GHz probes only on antenna A */
620 .scan_tx_antennas[IEEE80211_BAND_2GHZ] = ANT_A, 638 .scan_tx_antennas[IEEE80211_BAND_2GHZ] = ANT_A,
621 .advanced_bt_coexist = true,
622 .bt_init_traffic_load = IWL_BT_COEX_TRAFFIC_LOAD_NONE,
623 .bt_prio_boost = IWLAGN_BT_PRIO_BOOST_DEFAULT,
624}; 639};
625 640
626struct iwl_cfg iwl6000g2b_2bgn_cfg = { 641struct iwl_cfg iwl6000g2b_2bgn_cfg = {
@@ -629,41 +644,19 @@ struct iwl_cfg iwl6000g2b_2bgn_cfg = {
629 .ucode_api_max = IWL6000G2_UCODE_API_MAX, 644 .ucode_api_max = IWL6000G2_UCODE_API_MAX,
630 .ucode_api_min = IWL6000G2_UCODE_API_MIN, 645 .ucode_api_min = IWL6000G2_UCODE_API_MIN,
631 .sku = IWL_SKU_G|IWL_SKU_N, 646 .sku = IWL_SKU_G|IWL_SKU_N,
632 .ops = &iwl6000g2b_ops, 647 .valid_tx_ant = ANT_AB,
633 .eeprom_size = OTP_LOW_IMAGE_SIZE, 648 .valid_rx_ant = ANT_AB,
634 .eeprom_ver = EEPROM_6000G2_EEPROM_VERSION, 649 .eeprom_ver = EEPROM_6000G2_EEPROM_VERSION,
635 .eeprom_calib_ver = EEPROM_6000G2_TX_POWER_VERSION, 650 .eeprom_calib_ver = EEPROM_6000G2_TX_POWER_VERSION,
636 .num_of_queues = IWLAGN_NUM_QUEUES, 651 .ops = &iwl6000g2b_ops,
637 .num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES,
638 .mod_params = &iwlagn_mod_params, 652 .mod_params = &iwlagn_mod_params,
639 .valid_tx_ant = ANT_AB, 653 .base_params = &iwl6000_coex_base_params,
640 .valid_rx_ant = ANT_AB, 654 .bt_params = &iwl6000_bt_params,
641 .pll_cfg_val = 0, 655 .ht_params = &iwl6000_ht_params,
642 .set_l0s = true,
643 .use_bsm = false,
644 .pa_type = IWL_PA_SYSTEM,
645 .max_ll_items = OTP_MAX_LL_ITEMS_6x00,
646 .shadow_ram_support = true,
647 .ht_greenfield_support = true,
648 .led_compensation = 51,
649 .use_rts_for_aggregation = true, /* use rts/cts protection */
650 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
651 .supports_idle = true,
652 .adv_thermal_throttle = true,
653 .support_ct_kill_exit = true,
654 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DISABLE,
655 .chain_noise_scale = 1000,
656 .monitor_recover_period = IWL_LONG_MONITORING_PERIOD,
657 .max_event_log_size = 512,
658 .sensitivity_calib_by_driver = true,
659 .chain_noise_calib_by_driver = true,
660 .need_dc_calib = true, 656 .need_dc_calib = true,
661 .bt_statistics = true, 657 .need_temp_offset_calib = true,
662 /* Due to bluetooth, we transmit 2.4 GHz probes only on antenna A */ 658 /* Due to bluetooth, we transmit 2.4 GHz probes only on antenna A */
663 .scan_tx_antennas[IEEE80211_BAND_2GHZ] = ANT_A, 659 .scan_tx_antennas[IEEE80211_BAND_2GHZ] = ANT_A,
664 .advanced_bt_coexist = true,
665 .bt_init_traffic_load = IWL_BT_COEX_TRAFFIC_LOAD_NONE,
666 .bt_prio_boost = IWLAGN_BT_PRIO_BOOST_DEFAULT,
667}; 660};
668 661
669struct iwl_cfg iwl6000g2b_2bg_cfg = { 662struct iwl_cfg iwl6000g2b_2bg_cfg = {
@@ -672,39 +665,18 @@ struct iwl_cfg iwl6000g2b_2bg_cfg = {
672 .ucode_api_max = IWL6000G2_UCODE_API_MAX, 665 .ucode_api_max = IWL6000G2_UCODE_API_MAX,
673 .ucode_api_min = IWL6000G2_UCODE_API_MIN, 666 .ucode_api_min = IWL6000G2_UCODE_API_MIN,
674 .sku = IWL_SKU_G, 667 .sku = IWL_SKU_G,
675 .ops = &iwl6000g2b_ops, 668 .valid_tx_ant = ANT_AB,
676 .eeprom_size = OTP_LOW_IMAGE_SIZE, 669 .valid_rx_ant = ANT_AB,
677 .eeprom_ver = EEPROM_6000G2_EEPROM_VERSION, 670 .eeprom_ver = EEPROM_6000G2_EEPROM_VERSION,
678 .eeprom_calib_ver = EEPROM_6000G2_TX_POWER_VERSION, 671 .eeprom_calib_ver = EEPROM_6000G2_TX_POWER_VERSION,
679 .num_of_queues = IWLAGN_NUM_QUEUES, 672 .ops = &iwl6000g2b_ops,
680 .num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES,
681 .mod_params = &iwlagn_mod_params, 673 .mod_params = &iwlagn_mod_params,
682 .valid_tx_ant = ANT_AB, 674 .base_params = &iwl6000_coex_base_params,
683 .valid_rx_ant = ANT_AB, 675 .bt_params = &iwl6000_bt_params,
684 .pll_cfg_val = 0,
685 .set_l0s = true,
686 .use_bsm = false,
687 .pa_type = IWL_PA_SYSTEM,
688 .max_ll_items = OTP_MAX_LL_ITEMS_6x00,
689 .shadow_ram_support = true,
690 .led_compensation = 51,
691 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
692 .supports_idle = true,
693 .adv_thermal_throttle = true,
694 .support_ct_kill_exit = true,
695 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DISABLE,
696 .chain_noise_scale = 1000,
697 .monitor_recover_period = IWL_LONG_MONITORING_PERIOD,
698 .max_event_log_size = 512,
699 .sensitivity_calib_by_driver = true,
700 .chain_noise_calib_by_driver = true,
701 .need_dc_calib = true, 676 .need_dc_calib = true,
702 .bt_statistics = true, 677 .need_temp_offset_calib = true,
703 /* Due to bluetooth, we transmit 2.4 GHz probes only on antenna A */ 678 /* Due to bluetooth, we transmit 2.4 GHz probes only on antenna A */
704 .scan_tx_antennas[IEEE80211_BAND_2GHZ] = ANT_A, 679 .scan_tx_antennas[IEEE80211_BAND_2GHZ] = ANT_A,
705 .advanced_bt_coexist = true,
706 .bt_init_traffic_load = IWL_BT_COEX_TRAFFIC_LOAD_NONE,
707 .bt_prio_boost = IWLAGN_BT_PRIO_BOOST_DEFAULT,
708}; 680};
709 681
710struct iwl_cfg iwl6000g2b_bgn_cfg = { 682struct iwl_cfg iwl6000g2b_bgn_cfg = {
@@ -713,41 +685,19 @@ struct iwl_cfg iwl6000g2b_bgn_cfg = {
713 .ucode_api_max = IWL6000G2_UCODE_API_MAX, 685 .ucode_api_max = IWL6000G2_UCODE_API_MAX,
714 .ucode_api_min = IWL6000G2_UCODE_API_MIN, 686 .ucode_api_min = IWL6000G2_UCODE_API_MIN,
715 .sku = IWL_SKU_G|IWL_SKU_N, 687 .sku = IWL_SKU_G|IWL_SKU_N,
716 .ops = &iwl6000g2b_ops, 688 .valid_tx_ant = ANT_A,
717 .eeprom_size = OTP_LOW_IMAGE_SIZE, 689 .valid_rx_ant = ANT_AB,
718 .eeprom_ver = EEPROM_6000G2_EEPROM_VERSION, 690 .eeprom_ver = EEPROM_6000G2_EEPROM_VERSION,
719 .eeprom_calib_ver = EEPROM_6000G2_TX_POWER_VERSION, 691 .eeprom_calib_ver = EEPROM_6000G2_TX_POWER_VERSION,
720 .num_of_queues = IWLAGN_NUM_QUEUES, 692 .ops = &iwl6000g2b_ops,
721 .num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES,
722 .mod_params = &iwlagn_mod_params, 693 .mod_params = &iwlagn_mod_params,
723 .valid_tx_ant = ANT_A, 694 .base_params = &iwl6000_coex_base_params,
724 .valid_rx_ant = ANT_AB, 695 .bt_params = &iwl6000_bt_params,
725 .pll_cfg_val = 0, 696 .ht_params = &iwl6000_ht_params,
726 .set_l0s = true,
727 .use_bsm = false,
728 .pa_type = IWL_PA_SYSTEM,
729 .max_ll_items = OTP_MAX_LL_ITEMS_6x00,
730 .shadow_ram_support = true,
731 .ht_greenfield_support = true,
732 .led_compensation = 51,
733 .use_rts_for_aggregation = true, /* use rts/cts protection */
734 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
735 .supports_idle = true,
736 .adv_thermal_throttle = true,
737 .support_ct_kill_exit = true,
738 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DISABLE,
739 .chain_noise_scale = 1000,
740 .monitor_recover_period = IWL_LONG_MONITORING_PERIOD,
741 .max_event_log_size = 512,
742 .sensitivity_calib_by_driver = true,
743 .chain_noise_calib_by_driver = true,
744 .need_dc_calib = true, 697 .need_dc_calib = true,
745 .bt_statistics = true, 698 .need_temp_offset_calib = true,
746 /* Due to bluetooth, we transmit 2.4 GHz probes only on antenna A */ 699 /* Due to bluetooth, we transmit 2.4 GHz probes only on antenna A */
747 .scan_tx_antennas[IEEE80211_BAND_2GHZ] = ANT_A, 700 .scan_tx_antennas[IEEE80211_BAND_2GHZ] = ANT_A,
748 .advanced_bt_coexist = true,
749 .bt_init_traffic_load = IWL_BT_COEX_TRAFFIC_LOAD_NONE,
750 .bt_prio_boost = IWLAGN_BT_PRIO_BOOST_DEFAULT,
751}; 701};
752 702
753struct iwl_cfg iwl6000g2b_bg_cfg = { 703struct iwl_cfg iwl6000g2b_bg_cfg = {
@@ -756,39 +706,18 @@ struct iwl_cfg iwl6000g2b_bg_cfg = {
756 .ucode_api_max = IWL6000G2_UCODE_API_MAX, 706 .ucode_api_max = IWL6000G2_UCODE_API_MAX,
757 .ucode_api_min = IWL6000G2_UCODE_API_MIN, 707 .ucode_api_min = IWL6000G2_UCODE_API_MIN,
758 .sku = IWL_SKU_G, 708 .sku = IWL_SKU_G,
759 .ops = &iwl6000g2b_ops, 709 .valid_tx_ant = ANT_A,
760 .eeprom_size = OTP_LOW_IMAGE_SIZE, 710 .valid_rx_ant = ANT_AB,
761 .eeprom_ver = EEPROM_6000G2_EEPROM_VERSION, 711 .eeprom_ver = EEPROM_6000G2_EEPROM_VERSION,
762 .eeprom_calib_ver = EEPROM_6000G2_TX_POWER_VERSION, 712 .eeprom_calib_ver = EEPROM_6000G2_TX_POWER_VERSION,
763 .num_of_queues = IWLAGN_NUM_QUEUES, 713 .ops = &iwl6000g2b_ops,
764 .num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES,
765 .mod_params = &iwlagn_mod_params, 714 .mod_params = &iwlagn_mod_params,
766 .valid_tx_ant = ANT_A, 715 .base_params = &iwl6000_coex_base_params,
767 .valid_rx_ant = ANT_AB, 716 .bt_params = &iwl6000_bt_params,
768 .pll_cfg_val = 0,
769 .set_l0s = true,
770 .use_bsm = false,
771 .pa_type = IWL_PA_SYSTEM,
772 .max_ll_items = OTP_MAX_LL_ITEMS_6x00,
773 .shadow_ram_support = true,
774 .led_compensation = 51,
775 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
776 .supports_idle = true,
777 .adv_thermal_throttle = true,
778 .support_ct_kill_exit = true,
779 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DISABLE,
780 .chain_noise_scale = 1000,
781 .monitor_recover_period = IWL_LONG_MONITORING_PERIOD,
782 .max_event_log_size = 512,
783 .sensitivity_calib_by_driver = true,
784 .chain_noise_calib_by_driver = true,
785 .need_dc_calib = true, 717 .need_dc_calib = true,
786 .bt_statistics = true, 718 .need_temp_offset_calib = true,
787 /* Due to bluetooth, we transmit 2.4 GHz probes only on antenna A */ 719 /* Due to bluetooth, we transmit 2.4 GHz probes only on antenna A */
788 .scan_tx_antennas[IEEE80211_BAND_2GHZ] = ANT_A, 720 .scan_tx_antennas[IEEE80211_BAND_2GHZ] = ANT_A,
789 .advanced_bt_coexist = true,
790 .bt_init_traffic_load = IWL_BT_COEX_TRAFFIC_LOAD_NONE,
791 .bt_prio_boost = IWLAGN_BT_PRIO_BOOST_DEFAULT,
792}; 721};
793 722
794/* 723/*
@@ -800,35 +729,15 @@ struct iwl_cfg iwl6000i_2agn_cfg = {
800 .ucode_api_max = IWL6000_UCODE_API_MAX, 729 .ucode_api_max = IWL6000_UCODE_API_MAX,
801 .ucode_api_min = IWL6000_UCODE_API_MIN, 730 .ucode_api_min = IWL6000_UCODE_API_MIN,
802 .sku = IWL_SKU_A|IWL_SKU_G|IWL_SKU_N, 731 .sku = IWL_SKU_A|IWL_SKU_G|IWL_SKU_N,
803 .ops = &iwl6000_ops, 732 .valid_tx_ant = ANT_BC,
804 .eeprom_size = OTP_LOW_IMAGE_SIZE, 733 .valid_rx_ant = ANT_BC,
805 .eeprom_ver = EEPROM_6000_EEPROM_VERSION, 734 .eeprom_ver = EEPROM_6000_EEPROM_VERSION,
806 .eeprom_calib_ver = EEPROM_6000_TX_POWER_VERSION, 735 .eeprom_calib_ver = EEPROM_6000_TX_POWER_VERSION,
807 .num_of_queues = IWLAGN_NUM_QUEUES, 736 .ops = &iwl6000_ops,
808 .num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES,
809 .mod_params = &iwlagn_mod_params, 737 .mod_params = &iwlagn_mod_params,
810 .valid_tx_ant = ANT_BC, 738 .base_params = &iwl6000_base_params,
811 .valid_rx_ant = ANT_BC, 739 .ht_params = &iwl6000_ht_params,
812 .pll_cfg_val = 0,
813 .set_l0s = true,
814 .use_bsm = false,
815 .pa_type = IWL_PA_INTERNAL, 740 .pa_type = IWL_PA_INTERNAL,
816 .max_ll_items = OTP_MAX_LL_ITEMS_6x00,
817 .shadow_ram_support = true,
818 .ht_greenfield_support = true,
819 .led_compensation = 51,
820 .use_rts_for_aggregation = true, /* use rts/cts protection */
821 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
822 .supports_idle = true,
823 .adv_thermal_throttle = true,
824 .support_ct_kill_exit = true,
825 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
826 .chain_noise_scale = 1000,
827 .monitor_recover_period = IWL_DEF_MONITORING_PERIOD,
828 .max_event_log_size = 1024,
829 .ucode_tracing = true,
830 .sensitivity_calib_by_driver = true,
831 .chain_noise_calib_by_driver = true,
832}; 741};
833 742
834struct iwl_cfg iwl6000i_2abg_cfg = { 743struct iwl_cfg iwl6000i_2abg_cfg = {
@@ -837,33 +746,14 @@ struct iwl_cfg iwl6000i_2abg_cfg = {
837 .ucode_api_max = IWL6000_UCODE_API_MAX, 746 .ucode_api_max = IWL6000_UCODE_API_MAX,
838 .ucode_api_min = IWL6000_UCODE_API_MIN, 747 .ucode_api_min = IWL6000_UCODE_API_MIN,
839 .sku = IWL_SKU_A|IWL_SKU_G, 748 .sku = IWL_SKU_A|IWL_SKU_G,
840 .ops = &iwl6000_ops, 749 .valid_tx_ant = ANT_BC,
841 .eeprom_size = OTP_LOW_IMAGE_SIZE, 750 .valid_rx_ant = ANT_BC,
842 .eeprom_ver = EEPROM_6000_EEPROM_VERSION, 751 .eeprom_ver = EEPROM_6000_EEPROM_VERSION,
843 .eeprom_calib_ver = EEPROM_6000_TX_POWER_VERSION, 752 .eeprom_calib_ver = EEPROM_6000_TX_POWER_VERSION,
844 .num_of_queues = IWLAGN_NUM_QUEUES, 753 .ops = &iwl6000_ops,
845 .num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES,
846 .mod_params = &iwlagn_mod_params, 754 .mod_params = &iwlagn_mod_params,
847 .valid_tx_ant = ANT_BC, 755 .base_params = &iwl6000_base_params,
848 .valid_rx_ant = ANT_BC,
849 .pll_cfg_val = 0,
850 .set_l0s = true,
851 .use_bsm = false,
852 .pa_type = IWL_PA_INTERNAL, 756 .pa_type = IWL_PA_INTERNAL,
853 .max_ll_items = OTP_MAX_LL_ITEMS_6x00,
854 .shadow_ram_support = true,
855 .led_compensation = 51,
856 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
857 .supports_idle = true,
858 .adv_thermal_throttle = true,
859 .support_ct_kill_exit = true,
860 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
861 .chain_noise_scale = 1000,
862 .monitor_recover_period = IWL_DEF_MONITORING_PERIOD,
863 .max_event_log_size = 1024,
864 .ucode_tracing = true,
865 .sensitivity_calib_by_driver = true,
866 .chain_noise_calib_by_driver = true,
867}; 757};
868 758
869struct iwl_cfg iwl6000i_2bg_cfg = { 759struct iwl_cfg iwl6000i_2bg_cfg = {
@@ -872,33 +762,14 @@ struct iwl_cfg iwl6000i_2bg_cfg = {
872 .ucode_api_max = IWL6000_UCODE_API_MAX, 762 .ucode_api_max = IWL6000_UCODE_API_MAX,
873 .ucode_api_min = IWL6000_UCODE_API_MIN, 763 .ucode_api_min = IWL6000_UCODE_API_MIN,
874 .sku = IWL_SKU_G, 764 .sku = IWL_SKU_G,
875 .ops = &iwl6000_ops, 765 .valid_tx_ant = ANT_BC,
876 .eeprom_size = OTP_LOW_IMAGE_SIZE, 766 .valid_rx_ant = ANT_BC,
877 .eeprom_ver = EEPROM_6000_EEPROM_VERSION, 767 .eeprom_ver = EEPROM_6000_EEPROM_VERSION,
878 .eeprom_calib_ver = EEPROM_6000_TX_POWER_VERSION, 768 .eeprom_calib_ver = EEPROM_6000_TX_POWER_VERSION,
879 .num_of_queues = IWLAGN_NUM_QUEUES, 769 .ops = &iwl6000_ops,
880 .num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES,
881 .mod_params = &iwlagn_mod_params, 770 .mod_params = &iwlagn_mod_params,
882 .valid_tx_ant = ANT_BC, 771 .base_params = &iwl6000_base_params,
883 .valid_rx_ant = ANT_BC,
884 .pll_cfg_val = 0,
885 .set_l0s = true,
886 .use_bsm = false,
887 .pa_type = IWL_PA_INTERNAL, 772 .pa_type = IWL_PA_INTERNAL,
888 .max_ll_items = OTP_MAX_LL_ITEMS_6x00,
889 .shadow_ram_support = true,
890 .led_compensation = 51,
891 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
892 .supports_idle = true,
893 .adv_thermal_throttle = true,
894 .support_ct_kill_exit = true,
895 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
896 .chain_noise_scale = 1000,
897 .monitor_recover_period = IWL_DEF_MONITORING_PERIOD,
898 .max_event_log_size = 1024,
899 .ucode_tracing = true,
900 .sensitivity_calib_by_driver = true,
901 .chain_noise_calib_by_driver = true,
902}; 773};
903 774
904struct iwl_cfg iwl6050_2agn_cfg = { 775struct iwl_cfg iwl6050_2agn_cfg = {
@@ -907,35 +778,14 @@ struct iwl_cfg iwl6050_2agn_cfg = {
907 .ucode_api_max = IWL6050_UCODE_API_MAX, 778 .ucode_api_max = IWL6050_UCODE_API_MAX,
908 .ucode_api_min = IWL6050_UCODE_API_MIN, 779 .ucode_api_min = IWL6050_UCODE_API_MIN,
909 .sku = IWL_SKU_A|IWL_SKU_G|IWL_SKU_N, 780 .sku = IWL_SKU_A|IWL_SKU_G|IWL_SKU_N,
910 .ops = &iwl6000_ops, 781 .valid_tx_ant = ANT_AB,
911 .eeprom_size = OTP_LOW_IMAGE_SIZE, 782 .valid_rx_ant = ANT_AB,
783 .ops = &iwl6050_ops,
912 .eeprom_ver = EEPROM_6050_EEPROM_VERSION, 784 .eeprom_ver = EEPROM_6050_EEPROM_VERSION,
913 .eeprom_calib_ver = EEPROM_6050_TX_POWER_VERSION, 785 .eeprom_calib_ver = EEPROM_6050_TX_POWER_VERSION,
914 .num_of_queues = IWLAGN_NUM_QUEUES,
915 .num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES,
916 .mod_params = &iwlagn_mod_params, 786 .mod_params = &iwlagn_mod_params,
917 .valid_tx_ant = ANT_AB, 787 .base_params = &iwl6050_base_params,
918 .valid_rx_ant = ANT_AB, 788 .ht_params = &iwl6000_ht_params,
919 .pll_cfg_val = 0,
920 .set_l0s = true,
921 .use_bsm = false,
922 .pa_type = IWL_PA_SYSTEM,
923 .max_ll_items = OTP_MAX_LL_ITEMS_6x50,
924 .shadow_ram_support = true,
925 .ht_greenfield_support = true,
926 .led_compensation = 51,
927 .use_rts_for_aggregation = true, /* use rts/cts protection */
928 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
929 .supports_idle = true,
930 .adv_thermal_throttle = true,
931 .support_ct_kill_exit = true,
932 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
933 .chain_noise_scale = 1500,
934 .monitor_recover_period = IWL_DEF_MONITORING_PERIOD,
935 .max_event_log_size = 1024,
936 .ucode_tracing = true,
937 .sensitivity_calib_by_driver = true,
938 .chain_noise_calib_by_driver = true,
939 .need_dc_calib = true, 789 .need_dc_calib = true,
940}; 790};
941 791
@@ -945,35 +795,14 @@ struct iwl_cfg iwl6050g2_bgn_cfg = {
945 .ucode_api_max = IWL6050_UCODE_API_MAX, 795 .ucode_api_max = IWL6050_UCODE_API_MAX,
946 .ucode_api_min = IWL6050_UCODE_API_MIN, 796 .ucode_api_min = IWL6050_UCODE_API_MIN,
947 .sku = IWL_SKU_G|IWL_SKU_N, 797 .sku = IWL_SKU_G|IWL_SKU_N,
948 .ops = &iwl6000_ops, 798 .valid_tx_ant = ANT_A,
949 .eeprom_size = OTP_LOW_IMAGE_SIZE, 799 .valid_rx_ant = ANT_AB,
950 .eeprom_ver = EEPROM_6050G2_EEPROM_VERSION, 800 .eeprom_ver = EEPROM_6050G2_EEPROM_VERSION,
951 .eeprom_calib_ver = EEPROM_6050G2_TX_POWER_VERSION, 801 .eeprom_calib_ver = EEPROM_6050G2_TX_POWER_VERSION,
952 .num_of_queues = IWLAGN_NUM_QUEUES, 802 .ops = &iwl6050g2_ops,
953 .num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES,
954 .mod_params = &iwlagn_mod_params, 803 .mod_params = &iwlagn_mod_params,
955 .valid_tx_ant = ANT_A, 804 .base_params = &iwl6050_base_params,
956 .valid_rx_ant = ANT_AB, 805 .ht_params = &iwl6000_ht_params,
957 .pll_cfg_val = 0,
958 .set_l0s = true,
959 .use_bsm = false,
960 .pa_type = IWL_PA_SYSTEM,
961 .max_ll_items = OTP_MAX_LL_ITEMS_6x50,
962 .shadow_ram_support = true,
963 .ht_greenfield_support = true,
964 .led_compensation = 51,
965 .use_rts_for_aggregation = true, /* use rts/cts protection */
966 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
967 .supports_idle = true,
968 .adv_thermal_throttle = true,
969 .support_ct_kill_exit = true,
970 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
971 .chain_noise_scale = 1500,
972 .monitor_recover_period = IWL_DEF_MONITORING_PERIOD,
973 .max_event_log_size = 1024,
974 .ucode_tracing = true,
975 .sensitivity_calib_by_driver = true,
976 .chain_noise_calib_by_driver = true,
977 .need_dc_calib = true, 806 .need_dc_calib = true,
978}; 807};
979 808
@@ -983,33 +812,13 @@ struct iwl_cfg iwl6050_2abg_cfg = {
983 .ucode_api_max = IWL6050_UCODE_API_MAX, 812 .ucode_api_max = IWL6050_UCODE_API_MAX,
984 .ucode_api_min = IWL6050_UCODE_API_MIN, 813 .ucode_api_min = IWL6050_UCODE_API_MIN,
985 .sku = IWL_SKU_A|IWL_SKU_G, 814 .sku = IWL_SKU_A|IWL_SKU_G,
986 .ops = &iwl6000_ops, 815 .valid_tx_ant = ANT_AB,
987 .eeprom_size = OTP_LOW_IMAGE_SIZE, 816 .valid_rx_ant = ANT_AB,
988 .eeprom_ver = EEPROM_6050_EEPROM_VERSION, 817 .eeprom_ver = EEPROM_6050_EEPROM_VERSION,
989 .eeprom_calib_ver = EEPROM_6050_TX_POWER_VERSION, 818 .eeprom_calib_ver = EEPROM_6050_TX_POWER_VERSION,
990 .num_of_queues = IWLAGN_NUM_QUEUES, 819 .ops = &iwl6050_ops,
991 .num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES,
992 .mod_params = &iwlagn_mod_params, 820 .mod_params = &iwlagn_mod_params,
993 .valid_tx_ant = ANT_AB, 821 .base_params = &iwl6050_base_params,
994 .valid_rx_ant = ANT_AB,
995 .pll_cfg_val = 0,
996 .set_l0s = true,
997 .use_bsm = false,
998 .pa_type = IWL_PA_SYSTEM,
999 .max_ll_items = OTP_MAX_LL_ITEMS_6x50,
1000 .shadow_ram_support = true,
1001 .led_compensation = 51,
1002 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
1003 .supports_idle = true,
1004 .adv_thermal_throttle = true,
1005 .support_ct_kill_exit = true,
1006 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
1007 .chain_noise_scale = 1500,
1008 .monitor_recover_period = IWL_DEF_MONITORING_PERIOD,
1009 .max_event_log_size = 1024,
1010 .ucode_tracing = true,
1011 .sensitivity_calib_by_driver = true,
1012 .chain_noise_calib_by_driver = true,
1013 .need_dc_calib = true, 822 .need_dc_calib = true,
1014}; 823};
1015 824
@@ -1019,38 +828,58 @@ struct iwl_cfg iwl6000_3agn_cfg = {
1019 .ucode_api_max = IWL6000_UCODE_API_MAX, 828 .ucode_api_max = IWL6000_UCODE_API_MAX,
1020 .ucode_api_min = IWL6000_UCODE_API_MIN, 829 .ucode_api_min = IWL6000_UCODE_API_MIN,
1021 .sku = IWL_SKU_A|IWL_SKU_G|IWL_SKU_N, 830 .sku = IWL_SKU_A|IWL_SKU_G|IWL_SKU_N,
1022 .ops = &iwl6000_ops, 831 .valid_tx_ant = ANT_ABC,
1023 .eeprom_size = OTP_LOW_IMAGE_SIZE, 832 .valid_rx_ant = ANT_ABC,
1024 .eeprom_ver = EEPROM_6000_EEPROM_VERSION, 833 .eeprom_ver = EEPROM_6000_EEPROM_VERSION,
1025 .eeprom_calib_ver = EEPROM_6000_TX_POWER_VERSION, 834 .eeprom_calib_ver = EEPROM_6000_TX_POWER_VERSION,
1026 .num_of_queues = IWLAGN_NUM_QUEUES, 835 .ops = &iwl6000_ops,
1027 .num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES,
1028 .mod_params = &iwlagn_mod_params, 836 .mod_params = &iwlagn_mod_params,
1029 .valid_tx_ant = ANT_ABC, 837 .base_params = &iwl6000_base_params,
1030 .valid_rx_ant = ANT_ABC, 838 .ht_params = &iwl6000_ht_params,
1031 .pll_cfg_val = 0, 839 .need_dc_calib = true,
1032 .set_l0s = true, 840};
1033 .use_bsm = false, 841
1034 .pa_type = IWL_PA_SYSTEM, 842struct iwl_cfg iwl130_bgn_cfg = {
1035 .max_ll_items = OTP_MAX_LL_ITEMS_6x00, 843 .name = "Intel(R) 130 Series 1x1 BGN",
1036 .shadow_ram_support = true, 844 .fw_name_pre = IWL6000G2B_FW_PRE,
1037 .ht_greenfield_support = true, 845 .ucode_api_max = IWL130_UCODE_API_MAX,
1038 .led_compensation = 51, 846 .ucode_api_min = IWL130_UCODE_API_MIN,
1039 .use_rts_for_aggregation = true, /* use rts/cts protection */ 847 .sku = IWL_SKU_G|IWL_SKU_N,
1040 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, 848 .valid_tx_ant = ANT_A,
1041 .supports_idle = true, 849 .valid_rx_ant = ANT_A,
1042 .adv_thermal_throttle = true, 850 .eeprom_ver = EEPROM_6000G2_EEPROM_VERSION,
1043 .support_ct_kill_exit = true, 851 .eeprom_calib_ver = EEPROM_6000G2_TX_POWER_VERSION,
1044 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, 852 .ops = &iwl6000g2b_ops,
1045 .chain_noise_scale = 1000, 853 .mod_params = &iwlagn_mod_params,
1046 .monitor_recover_period = IWL_DEF_MONITORING_PERIOD, 854 .base_params = &iwl6000_coex_base_params,
1047 .max_event_log_size = 1024, 855 .bt_params = &iwl6000_bt_params,
1048 .ucode_tracing = true, 856 .ht_params = &iwl6000_ht_params,
1049 .sensitivity_calib_by_driver = true, 857 .need_dc_calib = true,
1050 .chain_noise_calib_by_driver = true, 858 /* Due to bluetooth, we transmit 2.4 GHz probes only on antenna A */
859 .scan_tx_antennas[IEEE80211_BAND_2GHZ] = ANT_A,
860};
861
862struct iwl_cfg iwl130_bg_cfg = {
863 .name = "Intel(R) 130 Series 1x2 BG",
864 .fw_name_pre = IWL6000G2B_FW_PRE,
865 .ucode_api_max = IWL130_UCODE_API_MAX,
866 .ucode_api_min = IWL130_UCODE_API_MIN,
867 .sku = IWL_SKU_G,
868 .valid_tx_ant = ANT_A,
869 .valid_rx_ant = ANT_A,
870 .eeprom_ver = EEPROM_6000G2_EEPROM_VERSION,
871 .eeprom_calib_ver = EEPROM_6000G2_TX_POWER_VERSION,
872 .ops = &iwl6000g2b_ops,
873 .mod_params = &iwlagn_mod_params,
874 .base_params = &iwl6000_coex_base_params,
875 .bt_params = &iwl6000_bt_params,
876 .need_dc_calib = true,
877 /* Due to bluetooth, we transmit 2.4 GHz probes only on antenna A */
878 .scan_tx_antennas[IEEE80211_BAND_2GHZ] = ANT_A,
1051}; 879};
1052 880
1053MODULE_FIRMWARE(IWL6000_MODULE_FIRMWARE(IWL6000_UCODE_API_MAX)); 881MODULE_FIRMWARE(IWL6000_MODULE_FIRMWARE(IWL6000_UCODE_API_MAX));
1054MODULE_FIRMWARE(IWL6050_MODULE_FIRMWARE(IWL6050_UCODE_API_MAX)); 882MODULE_FIRMWARE(IWL6050_MODULE_FIRMWARE(IWL6050_UCODE_API_MAX));
1055MODULE_FIRMWARE(IWL6000G2A_MODULE_FIRMWARE(IWL6000G2_UCODE_API_MAX)); 883MODULE_FIRMWARE(IWL6000G2A_MODULE_FIRMWARE(IWL6000G2_UCODE_API_MAX));
1056MODULE_FIRMWARE(IWL6000G2B_MODULE_FIRMWARE(IWL6000G2_UCODE_API_MAX)); 884MODULE_FIRMWARE(IWL6000G2B_MODULE_FIRMWARE(IWL6000G2_UCODE_API_MAX));
885MODULE_FIRMWARE(IWL130_MODULE_FIRMWARE(IWL130_UCODE_API_MAX));
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-calib.c b/drivers/net/wireless/iwlwifi/iwl-agn-calib.c
index 84ad62958535..e2019e756936 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-calib.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-calib.c
@@ -65,7 +65,7 @@
65 65
66#include "iwl-dev.h" 66#include "iwl-dev.h"
67#include "iwl-core.h" 67#include "iwl-core.h"
68#include "iwl-calib.h" 68#include "iwl-agn-calib.h"
69 69
70/***************************************************************************** 70/*****************************************************************************
71 * INIT calibrations framework 71 * INIT calibrations framework
@@ -631,7 +631,8 @@ void iwl_sensitivity_calibration(struct iwl_priv *priv, void *resp)
631 } 631 }
632 632
633 spin_lock_irqsave(&priv->lock, flags); 633 spin_lock_irqsave(&priv->lock, flags);
634 if (priv->cfg->bt_statistics) { 634 if (priv->cfg->bt_params &&
635 priv->cfg->bt_params->bt_statistics) {
635 rx_info = &(((struct iwl_bt_notif_statistics *)resp)-> 636 rx_info = &(((struct iwl_bt_notif_statistics *)resp)->
636 rx.general.common); 637 rx.general.common);
637 ofdm = &(((struct iwl_bt_notif_statistics *)resp)->rx.ofdm); 638 ofdm = &(((struct iwl_bt_notif_statistics *)resp)->rx.ofdm);
@@ -786,7 +787,8 @@ void iwl_chain_noise_calibration(struct iwl_priv *priv, void *stat_resp)
786 } 787 }
787 788
788 spin_lock_irqsave(&priv->lock, flags); 789 spin_lock_irqsave(&priv->lock, flags);
789 if (priv->cfg->bt_statistics) { 790 if (priv->cfg->bt_params &&
791 priv->cfg->bt_params->bt_statistics) {
790 rx_info = &(((struct iwl_bt_notif_statistics *)stat_resp)-> 792 rx_info = &(((struct iwl_bt_notif_statistics *)stat_resp)->
791 rx.general.common); 793 rx.general.common);
792 } else { 794 } else {
@@ -801,7 +803,8 @@ void iwl_chain_noise_calibration(struct iwl_priv *priv, void *stat_resp)
801 803
802 rxon_band24 = !!(ctx->staging.flags & RXON_FLG_BAND_24G_MSK); 804 rxon_band24 = !!(ctx->staging.flags & RXON_FLG_BAND_24G_MSK);
803 rxon_chnum = le16_to_cpu(ctx->staging.channel); 805 rxon_chnum = le16_to_cpu(ctx->staging.channel);
804 if (priv->cfg->bt_statistics) { 806 if (priv->cfg->bt_params &&
807 priv->cfg->bt_params->bt_statistics) {
805 stat_band24 = !!(((struct iwl_bt_notif_statistics *) 808 stat_band24 = !!(((struct iwl_bt_notif_statistics *)
806 stat_resp)->flag & 809 stat_resp)->flag &
807 STATISTICS_REPLY_FLG_BAND_24G_MSK); 810 STATISTICS_REPLY_FLG_BAND_24G_MSK);
@@ -861,16 +864,17 @@ void iwl_chain_noise_calibration(struct iwl_priv *priv, void *stat_resp)
861 /* If this is the "chain_noise_num_beacons", determine: 864 /* If this is the "chain_noise_num_beacons", determine:
862 * 1) Disconnected antennas (using signal strengths) 865 * 1) Disconnected antennas (using signal strengths)
863 * 2) Differential gain (using silence noise) to balance receivers */ 866 * 2) Differential gain (using silence noise) to balance receivers */
864 if (data->beacon_count != priv->cfg->chain_noise_num_beacons) 867 if (data->beacon_count !=
868 priv->cfg->base_params->chain_noise_num_beacons)
865 return; 869 return;
866 870
867 /* Analyze signal for disconnected antenna */ 871 /* Analyze signal for disconnected antenna */
868 average_sig[0] = 872 average_sig[0] = data->chain_signal_a /
869 (data->chain_signal_a) / priv->cfg->chain_noise_num_beacons; 873 priv->cfg->base_params->chain_noise_num_beacons;
870 average_sig[1] = 874 average_sig[1] = data->chain_signal_b /
871 (data->chain_signal_b) / priv->cfg->chain_noise_num_beacons; 875 priv->cfg->base_params->chain_noise_num_beacons;
872 average_sig[2] = 876 average_sig[2] = data->chain_signal_c /
873 (data->chain_signal_c) / priv->cfg->chain_noise_num_beacons; 877 priv->cfg->base_params->chain_noise_num_beacons;
874 878
875 if (average_sig[0] >= average_sig[1]) { 879 if (average_sig[0] >= average_sig[1]) {
876 max_average_sig = average_sig[0]; 880 max_average_sig = average_sig[0];
@@ -920,7 +924,9 @@ void iwl_chain_noise_calibration(struct iwl_priv *priv, void *stat_resp)
920 * To be safe, simply mask out any chains that we know 924 * To be safe, simply mask out any chains that we know
921 * are not on the device. 925 * are not on the device.
922 */ 926 */
923 if (priv->cfg->advanced_bt_coexist && priv->bt_full_concurrent) { 927 if (priv->cfg->bt_params &&
928 priv->cfg->bt_params->advanced_bt_coexist &&
929 priv->bt_full_concurrent) {
924 /* operated as 1x1 in full concurrency mode */ 930 /* operated as 1x1 in full concurrency mode */
925 active_chains &= first_antenna(priv->hw_params.valid_rx_ant); 931 active_chains &= first_antenna(priv->hw_params.valid_rx_ant);
926 } else 932 } else
@@ -967,12 +973,12 @@ void iwl_chain_noise_calibration(struct iwl_priv *priv, void *stat_resp)
967 active_chains); 973 active_chains);
968 974
969 /* Analyze noise for rx balance */ 975 /* Analyze noise for rx balance */
970 average_noise[0] = 976 average_noise[0] = data->chain_noise_a /
971 ((data->chain_noise_a) / priv->cfg->chain_noise_num_beacons); 977 priv->cfg->base_params->chain_noise_num_beacons;
972 average_noise[1] = 978 average_noise[1] = data->chain_noise_b /
973 ((data->chain_noise_b) / priv->cfg->chain_noise_num_beacons); 979 priv->cfg->base_params->chain_noise_num_beacons;
974 average_noise[2] = 980 average_noise[2] = data->chain_noise_c /
975 ((data->chain_noise_c) / priv->cfg->chain_noise_num_beacons); 981 priv->cfg->base_params->chain_noise_num_beacons;
976 982
977 for (i = 0; i < NUM_RX_CHAINS; i++) { 983 for (i = 0; i < NUM_RX_CHAINS; i++) {
978 if (!(data->disconn_array[i]) && 984 if (!(data->disconn_array[i]) &&
diff --git a/drivers/net/wireless/iwlwifi/iwl-calib.h b/drivers/net/wireless/iwlwifi/iwl-agn-calib.h
index ba9523fbb300..e37ae7261630 100644
--- a/drivers/net/wireless/iwlwifi/iwl-calib.h
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-calib.h
@@ -79,4 +79,8 @@ static inline void iwl_chain_noise_reset(struct iwl_priv *priv)
79 priv->cfg->ops->utils->chain_noise_reset(priv); 79 priv->cfg->ops->utils->chain_noise_reset(priv);
80} 80}
81 81
82int iwl_send_calib_results(struct iwl_priv *priv);
83int iwl_calib_set(struct iwl_calib_result *res, const u8 *buf, int len);
84void iwl_calib_free_results(struct iwl_priv *priv);
85
82#endif /* __iwl_calib_h__ */ 86#endif /* __iwl_calib_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-debugfs.c b/drivers/net/wireless/iwlwifi/iwl-agn-debugfs.c
index d706b8afbe5a..a358d4334a1a 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-debugfs.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-debugfs.c
@@ -25,15 +25,22 @@
25* Intel Linux Wireless <ilw@linux.intel.com> 25* Intel Linux Wireless <ilw@linux.intel.com>
26* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 26* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27*****************************************************************************/ 27*****************************************************************************/
28 28#include "iwl-agn.h"
29#include "iwl-agn-debugfs.h" 29#include "iwl-agn-debugfs.h"
30 30
31static const char *fmt_value = " %-30s %10u\n";
32static const char *fmt_hex = " %-30s 0x%02X\n";
33static const char *fmt_table = " %-30s %10u %10u %10u %10u\n";
34static const char *fmt_header =
35 "%-32s current cumulative delta max\n";
36
31static int iwl_statistics_flag(struct iwl_priv *priv, char *buf, int bufsz) 37static int iwl_statistics_flag(struct iwl_priv *priv, char *buf, int bufsz)
32{ 38{
33 int p = 0; 39 int p = 0;
34 u32 flag; 40 u32 flag;
35 41
36 if (priv->cfg->bt_statistics) 42 if (priv->cfg->bt_params &&
43 priv->cfg->bt_params->bt_statistics)
37 flag = le32_to_cpu(priv->_agn.statistics_bt.flag); 44 flag = le32_to_cpu(priv->_agn.statistics_bt.flag);
38 else 45 else
39 flag = le32_to_cpu(priv->_agn.statistics.flag); 46 flag = le32_to_cpu(priv->_agn.statistics.flag);
@@ -82,7 +89,8 @@ ssize_t iwl_ucode_rx_stats_read(struct file *file, char __user *user_buf,
82 * the last statistics notification from uCode 89 * the last statistics notification from uCode
83 * might not reflect the current uCode activity 90 * might not reflect the current uCode activity
84 */ 91 */
85 if (priv->cfg->bt_statistics) { 92 if (priv->cfg->bt_params &&
93 priv->cfg->bt_params->bt_statistics) {
86 ofdm = &priv->_agn.statistics_bt.rx.ofdm; 94 ofdm = &priv->_agn.statistics_bt.rx.ofdm;
87 cck = &priv->_agn.statistics_bt.rx.cck; 95 cck = &priv->_agn.statistics_bt.rx.cck;
88 general = &priv->_agn.statistics_bt.rx.general.common; 96 general = &priv->_agn.statistics_bt.rx.general.common;
@@ -121,436 +129,380 @@ ssize_t iwl_ucode_rx_stats_read(struct file *file, char __user *user_buf,
121 } 129 }
122 130
123 pos += iwl_statistics_flag(priv, buf, bufsz); 131 pos += iwl_statistics_flag(priv, buf, bufsz);
124 pos += scnprintf(buf + pos, bufsz - pos, "%-32s current"
125 "acumulative delta max\n",
126 "Statistics_Rx - OFDM:");
127 pos += scnprintf(buf + pos, bufsz - pos, 132 pos += scnprintf(buf + pos, bufsz - pos,
128 " %-30s %10u %10u %10u %10u\n", 133 fmt_header, "Statistics_Rx - OFDM:");
129 "ina_cnt:", le32_to_cpu(ofdm->ina_cnt), 134 pos += scnprintf(buf + pos, bufsz - pos,
135 fmt_table, "ina_cnt:",
136 le32_to_cpu(ofdm->ina_cnt),
130 accum_ofdm->ina_cnt, 137 accum_ofdm->ina_cnt,
131 delta_ofdm->ina_cnt, max_ofdm->ina_cnt); 138 delta_ofdm->ina_cnt, max_ofdm->ina_cnt);
132 pos += scnprintf(buf + pos, bufsz - pos, 139 pos += scnprintf(buf + pos, bufsz - pos,
133 " %-30s %10u %10u %10u %10u\n", 140 fmt_table, "fina_cnt:",
134 "fina_cnt:",
135 le32_to_cpu(ofdm->fina_cnt), accum_ofdm->fina_cnt, 141 le32_to_cpu(ofdm->fina_cnt), accum_ofdm->fina_cnt,
136 delta_ofdm->fina_cnt, max_ofdm->fina_cnt); 142 delta_ofdm->fina_cnt, max_ofdm->fina_cnt);
137 pos += scnprintf(buf + pos, bufsz - pos, 143 pos += scnprintf(buf + pos, bufsz - pos,
138 " %-30s %10u %10u %10u %10u\n", 144 fmt_table, "plcp_err:",
139 "plcp_err:",
140 le32_to_cpu(ofdm->plcp_err), accum_ofdm->plcp_err, 145 le32_to_cpu(ofdm->plcp_err), accum_ofdm->plcp_err,
141 delta_ofdm->plcp_err, max_ofdm->plcp_err); 146 delta_ofdm->plcp_err, max_ofdm->plcp_err);
142 pos += scnprintf(buf + pos, bufsz - pos, 147 pos += scnprintf(buf + pos, bufsz - pos,
143 " %-30s %10u %10u %10u %10u\n", "crc32_err:", 148 fmt_table, "crc32_err:",
144 le32_to_cpu(ofdm->crc32_err), accum_ofdm->crc32_err, 149 le32_to_cpu(ofdm->crc32_err), accum_ofdm->crc32_err,
145 delta_ofdm->crc32_err, max_ofdm->crc32_err); 150 delta_ofdm->crc32_err, max_ofdm->crc32_err);
146 pos += scnprintf(buf + pos, bufsz - pos, 151 pos += scnprintf(buf + pos, bufsz - pos,
147 " %-30s %10u %10u %10u %10u\n", "overrun_err:", 152 fmt_table, "overrun_err:",
148 le32_to_cpu(ofdm->overrun_err), 153 le32_to_cpu(ofdm->overrun_err),
149 accum_ofdm->overrun_err, delta_ofdm->overrun_err, 154 accum_ofdm->overrun_err, delta_ofdm->overrun_err,
150 max_ofdm->overrun_err); 155 max_ofdm->overrun_err);
151 pos += scnprintf(buf + pos, bufsz - pos, 156 pos += scnprintf(buf + pos, bufsz - pos,
152 " %-30s %10u %10u %10u %10u\n", 157 fmt_table, "early_overrun_err:",
153 "early_overrun_err:",
154 le32_to_cpu(ofdm->early_overrun_err), 158 le32_to_cpu(ofdm->early_overrun_err),
155 accum_ofdm->early_overrun_err, 159 accum_ofdm->early_overrun_err,
156 delta_ofdm->early_overrun_err, 160 delta_ofdm->early_overrun_err,
157 max_ofdm->early_overrun_err); 161 max_ofdm->early_overrun_err);
158 pos += scnprintf(buf + pos, bufsz - pos, 162 pos += scnprintf(buf + pos, bufsz - pos,
159 " %-30s %10u %10u %10u %10u\n", 163 fmt_table, "crc32_good:",
160 "crc32_good:", le32_to_cpu(ofdm->crc32_good), 164 le32_to_cpu(ofdm->crc32_good),
161 accum_ofdm->crc32_good, delta_ofdm->crc32_good, 165 accum_ofdm->crc32_good, delta_ofdm->crc32_good,
162 max_ofdm->crc32_good); 166 max_ofdm->crc32_good);
163 pos += scnprintf(buf + pos, bufsz - pos, 167 pos += scnprintf(buf + pos, bufsz - pos,
164 " %-30s %10u %10u %10u %10u\n", "false_alarm_cnt:", 168 fmt_table, "false_alarm_cnt:",
165 le32_to_cpu(ofdm->false_alarm_cnt), 169 le32_to_cpu(ofdm->false_alarm_cnt),
166 accum_ofdm->false_alarm_cnt, 170 accum_ofdm->false_alarm_cnt,
167 delta_ofdm->false_alarm_cnt, 171 delta_ofdm->false_alarm_cnt,
168 max_ofdm->false_alarm_cnt); 172 max_ofdm->false_alarm_cnt);
169 pos += scnprintf(buf + pos, bufsz - pos, 173 pos += scnprintf(buf + pos, bufsz - pos,
170 " %-30s %10u %10u %10u %10u\n", 174 fmt_table, "fina_sync_err_cnt:",
171 "fina_sync_err_cnt:",
172 le32_to_cpu(ofdm->fina_sync_err_cnt), 175 le32_to_cpu(ofdm->fina_sync_err_cnt),
173 accum_ofdm->fina_sync_err_cnt, 176 accum_ofdm->fina_sync_err_cnt,
174 delta_ofdm->fina_sync_err_cnt, 177 delta_ofdm->fina_sync_err_cnt,
175 max_ofdm->fina_sync_err_cnt); 178 max_ofdm->fina_sync_err_cnt);
176 pos += scnprintf(buf + pos, bufsz - pos, 179 pos += scnprintf(buf + pos, bufsz - pos,
177 " %-30s %10u %10u %10u %10u\n", "sfd_timeout:", 180 fmt_table, "sfd_timeout:",
178 le32_to_cpu(ofdm->sfd_timeout), 181 le32_to_cpu(ofdm->sfd_timeout),
179 accum_ofdm->sfd_timeout, delta_ofdm->sfd_timeout, 182 accum_ofdm->sfd_timeout, delta_ofdm->sfd_timeout,
180 max_ofdm->sfd_timeout); 183 max_ofdm->sfd_timeout);
181 pos += scnprintf(buf + pos, bufsz - pos, 184 pos += scnprintf(buf + pos, bufsz - pos,
182 " %-30s %10u %10u %10u %10u\n", "fina_timeout:", 185 fmt_table, "fina_timeout:",
183 le32_to_cpu(ofdm->fina_timeout), 186 le32_to_cpu(ofdm->fina_timeout),
184 accum_ofdm->fina_timeout, delta_ofdm->fina_timeout, 187 accum_ofdm->fina_timeout, delta_ofdm->fina_timeout,
185 max_ofdm->fina_timeout); 188 max_ofdm->fina_timeout);
186 pos += scnprintf(buf + pos, bufsz - pos, 189 pos += scnprintf(buf + pos, bufsz - pos,
187 " %-30s %10u %10u %10u %10u\n", 190 fmt_table, "unresponded_rts:",
188 "unresponded_rts:",
189 le32_to_cpu(ofdm->unresponded_rts), 191 le32_to_cpu(ofdm->unresponded_rts),
190 accum_ofdm->unresponded_rts, 192 accum_ofdm->unresponded_rts,
191 delta_ofdm->unresponded_rts, 193 delta_ofdm->unresponded_rts,
192 max_ofdm->unresponded_rts); 194 max_ofdm->unresponded_rts);
193 pos += scnprintf(buf + pos, bufsz - pos, 195 pos += scnprintf(buf + pos, bufsz - pos,
194 " %-30s %10u %10u %10u %10u\n", 196 fmt_table, "rxe_frame_lmt_ovrun:",
195 "rxe_frame_lmt_ovrun:",
196 le32_to_cpu(ofdm->rxe_frame_limit_overrun), 197 le32_to_cpu(ofdm->rxe_frame_limit_overrun),
197 accum_ofdm->rxe_frame_limit_overrun, 198 accum_ofdm->rxe_frame_limit_overrun,
198 delta_ofdm->rxe_frame_limit_overrun, 199 delta_ofdm->rxe_frame_limit_overrun,
199 max_ofdm->rxe_frame_limit_overrun); 200 max_ofdm->rxe_frame_limit_overrun);
200 pos += scnprintf(buf + pos, bufsz - pos, 201 pos += scnprintf(buf + pos, bufsz - pos,
201 " %-30s %10u %10u %10u %10u\n", "sent_ack_cnt:", 202 fmt_table, "sent_ack_cnt:",
202 le32_to_cpu(ofdm->sent_ack_cnt), 203 le32_to_cpu(ofdm->sent_ack_cnt),
203 accum_ofdm->sent_ack_cnt, delta_ofdm->sent_ack_cnt, 204 accum_ofdm->sent_ack_cnt, delta_ofdm->sent_ack_cnt,
204 max_ofdm->sent_ack_cnt); 205 max_ofdm->sent_ack_cnt);
205 pos += scnprintf(buf + pos, bufsz - pos, 206 pos += scnprintf(buf + pos, bufsz - pos,
206 " %-30s %10u %10u %10u %10u\n", "sent_cts_cnt:", 207 fmt_table, "sent_cts_cnt:",
207 le32_to_cpu(ofdm->sent_cts_cnt), 208 le32_to_cpu(ofdm->sent_cts_cnt),
208 accum_ofdm->sent_cts_cnt, delta_ofdm->sent_cts_cnt, 209 accum_ofdm->sent_cts_cnt, delta_ofdm->sent_cts_cnt,
209 max_ofdm->sent_cts_cnt); 210 max_ofdm->sent_cts_cnt);
210 pos += scnprintf(buf + pos, bufsz - pos, 211 pos += scnprintf(buf + pos, bufsz - pos,
211 " %-30s %10u %10u %10u %10u\n", 212 fmt_table, "sent_ba_rsp_cnt:",
212 "sent_ba_rsp_cnt:",
213 le32_to_cpu(ofdm->sent_ba_rsp_cnt), 213 le32_to_cpu(ofdm->sent_ba_rsp_cnt),
214 accum_ofdm->sent_ba_rsp_cnt, 214 accum_ofdm->sent_ba_rsp_cnt,
215 delta_ofdm->sent_ba_rsp_cnt, 215 delta_ofdm->sent_ba_rsp_cnt,
216 max_ofdm->sent_ba_rsp_cnt); 216 max_ofdm->sent_ba_rsp_cnt);
217 pos += scnprintf(buf + pos, bufsz - pos, 217 pos += scnprintf(buf + pos, bufsz - pos,
218 " %-30s %10u %10u %10u %10u\n", "dsp_self_kill:", 218 fmt_table, "dsp_self_kill:",
219 le32_to_cpu(ofdm->dsp_self_kill), 219 le32_to_cpu(ofdm->dsp_self_kill),
220 accum_ofdm->dsp_self_kill, 220 accum_ofdm->dsp_self_kill,
221 delta_ofdm->dsp_self_kill, 221 delta_ofdm->dsp_self_kill,
222 max_ofdm->dsp_self_kill); 222 max_ofdm->dsp_self_kill);
223 pos += scnprintf(buf + pos, bufsz - pos, 223 pos += scnprintf(buf + pos, bufsz - pos,
224 " %-30s %10u %10u %10u %10u\n", 224 fmt_table, "mh_format_err:",
225 "mh_format_err:",
226 le32_to_cpu(ofdm->mh_format_err), 225 le32_to_cpu(ofdm->mh_format_err),
227 accum_ofdm->mh_format_err, 226 accum_ofdm->mh_format_err,
228 delta_ofdm->mh_format_err, 227 delta_ofdm->mh_format_err,
229 max_ofdm->mh_format_err); 228 max_ofdm->mh_format_err);
230 pos += scnprintf(buf + pos, bufsz - pos, 229 pos += scnprintf(buf + pos, bufsz - pos,
231 " %-30s %10u %10u %10u %10u\n", 230 fmt_table, "re_acq_main_rssi_sum:",
232 "re_acq_main_rssi_sum:",
233 le32_to_cpu(ofdm->re_acq_main_rssi_sum), 231 le32_to_cpu(ofdm->re_acq_main_rssi_sum),
234 accum_ofdm->re_acq_main_rssi_sum, 232 accum_ofdm->re_acq_main_rssi_sum,
235 delta_ofdm->re_acq_main_rssi_sum, 233 delta_ofdm->re_acq_main_rssi_sum,
236 max_ofdm->re_acq_main_rssi_sum); 234 max_ofdm->re_acq_main_rssi_sum);
237 235
238 pos += scnprintf(buf + pos, bufsz - pos, "%-32s current"
239 "acumulative delta max\n",
240 "Statistics_Rx - CCK:");
241 pos += scnprintf(buf + pos, bufsz - pos, 236 pos += scnprintf(buf + pos, bufsz - pos,
242 " %-30s %10u %10u %10u %10u\n", 237 fmt_header, "Statistics_Rx - CCK:");
243 "ina_cnt:", 238 pos += scnprintf(buf + pos, bufsz - pos,
239 fmt_table, "ina_cnt:",
244 le32_to_cpu(cck->ina_cnt), accum_cck->ina_cnt, 240 le32_to_cpu(cck->ina_cnt), accum_cck->ina_cnt,
245 delta_cck->ina_cnt, max_cck->ina_cnt); 241 delta_cck->ina_cnt, max_cck->ina_cnt);
246 pos += scnprintf(buf + pos, bufsz - pos, 242 pos += scnprintf(buf + pos, bufsz - pos,
247 " %-30s %10u %10u %10u %10u\n", 243 fmt_table, "fina_cnt:",
248 "fina_cnt:",
249 le32_to_cpu(cck->fina_cnt), accum_cck->fina_cnt, 244 le32_to_cpu(cck->fina_cnt), accum_cck->fina_cnt,
250 delta_cck->fina_cnt, max_cck->fina_cnt); 245 delta_cck->fina_cnt, max_cck->fina_cnt);
251 pos += scnprintf(buf + pos, bufsz - pos, 246 pos += scnprintf(buf + pos, bufsz - pos,
252 " %-30s %10u %10u %10u %10u\n", 247 fmt_table, "plcp_err:",
253 "plcp_err:",
254 le32_to_cpu(cck->plcp_err), accum_cck->plcp_err, 248 le32_to_cpu(cck->plcp_err), accum_cck->plcp_err,
255 delta_cck->plcp_err, max_cck->plcp_err); 249 delta_cck->plcp_err, max_cck->plcp_err);
256 pos += scnprintf(buf + pos, bufsz - pos, 250 pos += scnprintf(buf + pos, bufsz - pos,
257 " %-30s %10u %10u %10u %10u\n", 251 fmt_table, "crc32_err:",
258 "crc32_err:",
259 le32_to_cpu(cck->crc32_err), accum_cck->crc32_err, 252 le32_to_cpu(cck->crc32_err), accum_cck->crc32_err,
260 delta_cck->crc32_err, max_cck->crc32_err); 253 delta_cck->crc32_err, max_cck->crc32_err);
261 pos += scnprintf(buf + pos, bufsz - pos, 254 pos += scnprintf(buf + pos, bufsz - pos,
262 " %-30s %10u %10u %10u %10u\n", 255 fmt_table, "overrun_err:",
263 "overrun_err:",
264 le32_to_cpu(cck->overrun_err), 256 le32_to_cpu(cck->overrun_err),
265 accum_cck->overrun_err, delta_cck->overrun_err, 257 accum_cck->overrun_err, delta_cck->overrun_err,
266 max_cck->overrun_err); 258 max_cck->overrun_err);
267 pos += scnprintf(buf + pos, bufsz - pos, 259 pos += scnprintf(buf + pos, bufsz - pos,
268 " %-30s %10u %10u %10u %10u\n", 260 fmt_table, "early_overrun_err:",
269 "early_overrun_err:",
270 le32_to_cpu(cck->early_overrun_err), 261 le32_to_cpu(cck->early_overrun_err),
271 accum_cck->early_overrun_err, 262 accum_cck->early_overrun_err,
272 delta_cck->early_overrun_err, 263 delta_cck->early_overrun_err,
273 max_cck->early_overrun_err); 264 max_cck->early_overrun_err);
274 pos += scnprintf(buf + pos, bufsz - pos, 265 pos += scnprintf(buf + pos, bufsz - pos,
275 " %-30s %10u %10u %10u %10u\n", 266 fmt_table, "crc32_good:",
276 "crc32_good:",
277 le32_to_cpu(cck->crc32_good), accum_cck->crc32_good, 267 le32_to_cpu(cck->crc32_good), accum_cck->crc32_good,
278 delta_cck->crc32_good, max_cck->crc32_good); 268 delta_cck->crc32_good, max_cck->crc32_good);
279 pos += scnprintf(buf + pos, bufsz - pos, 269 pos += scnprintf(buf + pos, bufsz - pos,
280 " %-30s %10u %10u %10u %10u\n", 270 fmt_table, "false_alarm_cnt:",
281 "false_alarm_cnt:",
282 le32_to_cpu(cck->false_alarm_cnt), 271 le32_to_cpu(cck->false_alarm_cnt),
283 accum_cck->false_alarm_cnt, 272 accum_cck->false_alarm_cnt,
284 delta_cck->false_alarm_cnt, max_cck->false_alarm_cnt); 273 delta_cck->false_alarm_cnt, max_cck->false_alarm_cnt);
285 pos += scnprintf(buf + pos, bufsz - pos, 274 pos += scnprintf(buf + pos, bufsz - pos,
286 " %-30s %10u %10u %10u %10u\n", 275 fmt_table, "fina_sync_err_cnt:",
287 "fina_sync_err_cnt:",
288 le32_to_cpu(cck->fina_sync_err_cnt), 276 le32_to_cpu(cck->fina_sync_err_cnt),
289 accum_cck->fina_sync_err_cnt, 277 accum_cck->fina_sync_err_cnt,
290 delta_cck->fina_sync_err_cnt, 278 delta_cck->fina_sync_err_cnt,
291 max_cck->fina_sync_err_cnt); 279 max_cck->fina_sync_err_cnt);
292 pos += scnprintf(buf + pos, bufsz - pos, 280 pos += scnprintf(buf + pos, bufsz - pos,
293 " %-30s %10u %10u %10u %10u\n", 281 fmt_table, "sfd_timeout:",
294 "sfd_timeout:",
295 le32_to_cpu(cck->sfd_timeout), 282 le32_to_cpu(cck->sfd_timeout),
296 accum_cck->sfd_timeout, delta_cck->sfd_timeout, 283 accum_cck->sfd_timeout, delta_cck->sfd_timeout,
297 max_cck->sfd_timeout); 284 max_cck->sfd_timeout);
298 pos += scnprintf(buf + pos, bufsz - pos, 285 pos += scnprintf(buf + pos, bufsz - pos,
299 " %-30s %10u %10u %10u %10u\n", "fina_timeout:", 286 fmt_table, "fina_timeout:",
300 le32_to_cpu(cck->fina_timeout), 287 le32_to_cpu(cck->fina_timeout),
301 accum_cck->fina_timeout, delta_cck->fina_timeout, 288 accum_cck->fina_timeout, delta_cck->fina_timeout,
302 max_cck->fina_timeout); 289 max_cck->fina_timeout);
303 pos += scnprintf(buf + pos, bufsz - pos, 290 pos += scnprintf(buf + pos, bufsz - pos,
304 " %-30s %10u %10u %10u %10u\n", 291 fmt_table, "unresponded_rts:",
305 "unresponded_rts:",
306 le32_to_cpu(cck->unresponded_rts), 292 le32_to_cpu(cck->unresponded_rts),
307 accum_cck->unresponded_rts, delta_cck->unresponded_rts, 293 accum_cck->unresponded_rts, delta_cck->unresponded_rts,
308 max_cck->unresponded_rts); 294 max_cck->unresponded_rts);
309 pos += scnprintf(buf + pos, bufsz - pos, 295 pos += scnprintf(buf + pos, bufsz - pos,
310 " %-30s %10u %10u %10u %10u\n", 296 fmt_table, "rxe_frame_lmt_ovrun:",
311 "rxe_frame_lmt_ovrun:",
312 le32_to_cpu(cck->rxe_frame_limit_overrun), 297 le32_to_cpu(cck->rxe_frame_limit_overrun),
313 accum_cck->rxe_frame_limit_overrun, 298 accum_cck->rxe_frame_limit_overrun,
314 delta_cck->rxe_frame_limit_overrun, 299 delta_cck->rxe_frame_limit_overrun,
315 max_cck->rxe_frame_limit_overrun); 300 max_cck->rxe_frame_limit_overrun);
316 pos += scnprintf(buf + pos, bufsz - pos, 301 pos += scnprintf(buf + pos, bufsz - pos,
317 " %-30s %10u %10u %10u %10u\n", "sent_ack_cnt:", 302 fmt_table, "sent_ack_cnt:",
318 le32_to_cpu(cck->sent_ack_cnt), 303 le32_to_cpu(cck->sent_ack_cnt),
319 accum_cck->sent_ack_cnt, delta_cck->sent_ack_cnt, 304 accum_cck->sent_ack_cnt, delta_cck->sent_ack_cnt,
320 max_cck->sent_ack_cnt); 305 max_cck->sent_ack_cnt);
321 pos += scnprintf(buf + pos, bufsz - pos, 306 pos += scnprintf(buf + pos, bufsz - pos,
322 " %-30s %10u %10u %10u %10u\n", "sent_cts_cnt:", 307 fmt_table, "sent_cts_cnt:",
323 le32_to_cpu(cck->sent_cts_cnt), 308 le32_to_cpu(cck->sent_cts_cnt),
324 accum_cck->sent_cts_cnt, delta_cck->sent_cts_cnt, 309 accum_cck->sent_cts_cnt, delta_cck->sent_cts_cnt,
325 max_cck->sent_cts_cnt); 310 max_cck->sent_cts_cnt);
326 pos += scnprintf(buf + pos, bufsz - pos, 311 pos += scnprintf(buf + pos, bufsz - pos,
327 " %-30s %10u %10u %10u %10u\n", "sent_ba_rsp_cnt:", 312 fmt_table, "sent_ba_rsp_cnt:",
328 le32_to_cpu(cck->sent_ba_rsp_cnt), 313 le32_to_cpu(cck->sent_ba_rsp_cnt),
329 accum_cck->sent_ba_rsp_cnt, 314 accum_cck->sent_ba_rsp_cnt,
330 delta_cck->sent_ba_rsp_cnt, 315 delta_cck->sent_ba_rsp_cnt,
331 max_cck->sent_ba_rsp_cnt); 316 max_cck->sent_ba_rsp_cnt);
332 pos += scnprintf(buf + pos, bufsz - pos, 317 pos += scnprintf(buf + pos, bufsz - pos,
333 " %-30s %10u %10u %10u %10u\n", "dsp_self_kill:", 318 fmt_table, "dsp_self_kill:",
334 le32_to_cpu(cck->dsp_self_kill), 319 le32_to_cpu(cck->dsp_self_kill),
335 accum_cck->dsp_self_kill, delta_cck->dsp_self_kill, 320 accum_cck->dsp_self_kill, delta_cck->dsp_self_kill,
336 max_cck->dsp_self_kill); 321 max_cck->dsp_self_kill);
337 pos += scnprintf(buf + pos, bufsz - pos, 322 pos += scnprintf(buf + pos, bufsz - pos,
338 " %-30s %10u %10u %10u %10u\n", "mh_format_err:", 323 fmt_table, "mh_format_err:",
339 le32_to_cpu(cck->mh_format_err), 324 le32_to_cpu(cck->mh_format_err),
340 accum_cck->mh_format_err, delta_cck->mh_format_err, 325 accum_cck->mh_format_err, delta_cck->mh_format_err,
341 max_cck->mh_format_err); 326 max_cck->mh_format_err);
342 pos += scnprintf(buf + pos, bufsz - pos, 327 pos += scnprintf(buf + pos, bufsz - pos,
343 " %-30s %10u %10u %10u %10u\n", 328 fmt_table, "re_acq_main_rssi_sum:",
344 "re_acq_main_rssi_sum:",
345 le32_to_cpu(cck->re_acq_main_rssi_sum), 329 le32_to_cpu(cck->re_acq_main_rssi_sum),
346 accum_cck->re_acq_main_rssi_sum, 330 accum_cck->re_acq_main_rssi_sum,
347 delta_cck->re_acq_main_rssi_sum, 331 delta_cck->re_acq_main_rssi_sum,
348 max_cck->re_acq_main_rssi_sum); 332 max_cck->re_acq_main_rssi_sum);
349 333
350 pos += scnprintf(buf + pos, bufsz - pos, "%-32s current"
351 "acumulative delta max\n",
352 "Statistics_Rx - GENERAL:");
353 pos += scnprintf(buf + pos, bufsz - pos, 334 pos += scnprintf(buf + pos, bufsz - pos,
354 " %-30s %10u %10u %10u %10u\n", "bogus_cts:", 335 fmt_header, "Statistics_Rx - GENERAL:");
336 pos += scnprintf(buf + pos, bufsz - pos,
337 fmt_table, "bogus_cts:",
355 le32_to_cpu(general->bogus_cts), 338 le32_to_cpu(general->bogus_cts),
356 accum_general->bogus_cts, delta_general->bogus_cts, 339 accum_general->bogus_cts, delta_general->bogus_cts,
357 max_general->bogus_cts); 340 max_general->bogus_cts);
358 pos += scnprintf(buf + pos, bufsz - pos, 341 pos += scnprintf(buf + pos, bufsz - pos,
359 " %-30s %10u %10u %10u %10u\n", "bogus_ack:", 342 fmt_table, "bogus_ack:",
360 le32_to_cpu(general->bogus_ack), 343 le32_to_cpu(general->bogus_ack),
361 accum_general->bogus_ack, delta_general->bogus_ack, 344 accum_general->bogus_ack, delta_general->bogus_ack,
362 max_general->bogus_ack); 345 max_general->bogus_ack);
363 pos += scnprintf(buf + pos, bufsz - pos, 346 pos += scnprintf(buf + pos, bufsz - pos,
364 " %-30s %10u %10u %10u %10u\n", 347 fmt_table, "non_bssid_frames:",
365 "non_bssid_frames:",
366 le32_to_cpu(general->non_bssid_frames), 348 le32_to_cpu(general->non_bssid_frames),
367 accum_general->non_bssid_frames, 349 accum_general->non_bssid_frames,
368 delta_general->non_bssid_frames, 350 delta_general->non_bssid_frames,
369 max_general->non_bssid_frames); 351 max_general->non_bssid_frames);
370 pos += scnprintf(buf + pos, bufsz - pos, 352 pos += scnprintf(buf + pos, bufsz - pos,
371 " %-30s %10u %10u %10u %10u\n", 353 fmt_table, "filtered_frames:",
372 "filtered_frames:",
373 le32_to_cpu(general->filtered_frames), 354 le32_to_cpu(general->filtered_frames),
374 accum_general->filtered_frames, 355 accum_general->filtered_frames,
375 delta_general->filtered_frames, 356 delta_general->filtered_frames,
376 max_general->filtered_frames); 357 max_general->filtered_frames);
377 pos += scnprintf(buf + pos, bufsz - pos, 358 pos += scnprintf(buf + pos, bufsz - pos,
378 " %-30s %10u %10u %10u %10u\n", 359 fmt_table, "non_channel_beacons:",
379 "non_channel_beacons:",
380 le32_to_cpu(general->non_channel_beacons), 360 le32_to_cpu(general->non_channel_beacons),
381 accum_general->non_channel_beacons, 361 accum_general->non_channel_beacons,
382 delta_general->non_channel_beacons, 362 delta_general->non_channel_beacons,
383 max_general->non_channel_beacons); 363 max_general->non_channel_beacons);
384 pos += scnprintf(buf + pos, bufsz - pos, 364 pos += scnprintf(buf + pos, bufsz - pos,
385 " %-30s %10u %10u %10u %10u\n", 365 fmt_table, "channel_beacons:",
386 "channel_beacons:",
387 le32_to_cpu(general->channel_beacons), 366 le32_to_cpu(general->channel_beacons),
388 accum_general->channel_beacons, 367 accum_general->channel_beacons,
389 delta_general->channel_beacons, 368 delta_general->channel_beacons,
390 max_general->channel_beacons); 369 max_general->channel_beacons);
391 pos += scnprintf(buf + pos, bufsz - pos, 370 pos += scnprintf(buf + pos, bufsz - pos,
392 " %-30s %10u %10u %10u %10u\n", 371 fmt_table, "num_missed_bcon:",
393 "num_missed_bcon:",
394 le32_to_cpu(general->num_missed_bcon), 372 le32_to_cpu(general->num_missed_bcon),
395 accum_general->num_missed_bcon, 373 accum_general->num_missed_bcon,
396 delta_general->num_missed_bcon, 374 delta_general->num_missed_bcon,
397 max_general->num_missed_bcon); 375 max_general->num_missed_bcon);
398 pos += scnprintf(buf + pos, bufsz - pos, 376 pos += scnprintf(buf + pos, bufsz - pos,
399 " %-30s %10u %10u %10u %10u\n", 377 fmt_table, "adc_rx_saturation_time:",
400 "adc_rx_saturation_time:",
401 le32_to_cpu(general->adc_rx_saturation_time), 378 le32_to_cpu(general->adc_rx_saturation_time),
402 accum_general->adc_rx_saturation_time, 379 accum_general->adc_rx_saturation_time,
403 delta_general->adc_rx_saturation_time, 380 delta_general->adc_rx_saturation_time,
404 max_general->adc_rx_saturation_time); 381 max_general->adc_rx_saturation_time);
405 pos += scnprintf(buf + pos, bufsz - pos, 382 pos += scnprintf(buf + pos, bufsz - pos,
406 " %-30s %10u %10u %10u %10u\n", 383 fmt_table, "ina_detect_search_tm:",
407 "ina_detect_search_tm:",
408 le32_to_cpu(general->ina_detection_search_time), 384 le32_to_cpu(general->ina_detection_search_time),
409 accum_general->ina_detection_search_time, 385 accum_general->ina_detection_search_time,
410 delta_general->ina_detection_search_time, 386 delta_general->ina_detection_search_time,
411 max_general->ina_detection_search_time); 387 max_general->ina_detection_search_time);
412 pos += scnprintf(buf + pos, bufsz - pos, 388 pos += scnprintf(buf + pos, bufsz - pos,
413 " %-30s %10u %10u %10u %10u\n", 389 fmt_table, "beacon_silence_rssi_a:",
414 "beacon_silence_rssi_a:",
415 le32_to_cpu(general->beacon_silence_rssi_a), 390 le32_to_cpu(general->beacon_silence_rssi_a),
416 accum_general->beacon_silence_rssi_a, 391 accum_general->beacon_silence_rssi_a,
417 delta_general->beacon_silence_rssi_a, 392 delta_general->beacon_silence_rssi_a,
418 max_general->beacon_silence_rssi_a); 393 max_general->beacon_silence_rssi_a);
419 pos += scnprintf(buf + pos, bufsz - pos, 394 pos += scnprintf(buf + pos, bufsz - pos,
420 " %-30s %10u %10u %10u %10u\n", 395 fmt_table, "beacon_silence_rssi_b:",
421 "beacon_silence_rssi_b:",
422 le32_to_cpu(general->beacon_silence_rssi_b), 396 le32_to_cpu(general->beacon_silence_rssi_b),
423 accum_general->beacon_silence_rssi_b, 397 accum_general->beacon_silence_rssi_b,
424 delta_general->beacon_silence_rssi_b, 398 delta_general->beacon_silence_rssi_b,
425 max_general->beacon_silence_rssi_b); 399 max_general->beacon_silence_rssi_b);
426 pos += scnprintf(buf + pos, bufsz - pos, 400 pos += scnprintf(buf + pos, bufsz - pos,
427 " %-30s %10u %10u %10u %10u\n", 401 fmt_table, "beacon_silence_rssi_c:",
428 "beacon_silence_rssi_c:",
429 le32_to_cpu(general->beacon_silence_rssi_c), 402 le32_to_cpu(general->beacon_silence_rssi_c),
430 accum_general->beacon_silence_rssi_c, 403 accum_general->beacon_silence_rssi_c,
431 delta_general->beacon_silence_rssi_c, 404 delta_general->beacon_silence_rssi_c,
432 max_general->beacon_silence_rssi_c); 405 max_general->beacon_silence_rssi_c);
433 pos += scnprintf(buf + pos, bufsz - pos, 406 pos += scnprintf(buf + pos, bufsz - pos,
434 " %-30s %10u %10u %10u %10u\n", 407 fmt_table, "interference_data_flag:",
435 "interference_data_flag:",
436 le32_to_cpu(general->interference_data_flag), 408 le32_to_cpu(general->interference_data_flag),
437 accum_general->interference_data_flag, 409 accum_general->interference_data_flag,
438 delta_general->interference_data_flag, 410 delta_general->interference_data_flag,
439 max_general->interference_data_flag); 411 max_general->interference_data_flag);
440 pos += scnprintf(buf + pos, bufsz - pos, 412 pos += scnprintf(buf + pos, bufsz - pos,
441 " %-30s %10u %10u %10u %10u\n", 413 fmt_table, "channel_load:",
442 "channel_load:",
443 le32_to_cpu(general->channel_load), 414 le32_to_cpu(general->channel_load),
444 accum_general->channel_load, 415 accum_general->channel_load,
445 delta_general->channel_load, 416 delta_general->channel_load,
446 max_general->channel_load); 417 max_general->channel_load);
447 pos += scnprintf(buf + pos, bufsz - pos, 418 pos += scnprintf(buf + pos, bufsz - pos,
448 " %-30s %10u %10u %10u %10u\n", 419 fmt_table, "dsp_false_alarms:",
449 "dsp_false_alarms:",
450 le32_to_cpu(general->dsp_false_alarms), 420 le32_to_cpu(general->dsp_false_alarms),
451 accum_general->dsp_false_alarms, 421 accum_general->dsp_false_alarms,
452 delta_general->dsp_false_alarms, 422 delta_general->dsp_false_alarms,
453 max_general->dsp_false_alarms); 423 max_general->dsp_false_alarms);
454 pos += scnprintf(buf + pos, bufsz - pos, 424 pos += scnprintf(buf + pos, bufsz - pos,
455 " %-30s %10u %10u %10u %10u\n", 425 fmt_table, "beacon_rssi_a:",
456 "beacon_rssi_a:",
457 le32_to_cpu(general->beacon_rssi_a), 426 le32_to_cpu(general->beacon_rssi_a),
458 accum_general->beacon_rssi_a, 427 accum_general->beacon_rssi_a,
459 delta_general->beacon_rssi_a, 428 delta_general->beacon_rssi_a,
460 max_general->beacon_rssi_a); 429 max_general->beacon_rssi_a);
461 pos += scnprintf(buf + pos, bufsz - pos, 430 pos += scnprintf(buf + pos, bufsz - pos,
462 " %-30s %10u %10u %10u %10u\n", 431 fmt_table, "beacon_rssi_b:",
463 "beacon_rssi_b:",
464 le32_to_cpu(general->beacon_rssi_b), 432 le32_to_cpu(general->beacon_rssi_b),
465 accum_general->beacon_rssi_b, 433 accum_general->beacon_rssi_b,
466 delta_general->beacon_rssi_b, 434 delta_general->beacon_rssi_b,
467 max_general->beacon_rssi_b); 435 max_general->beacon_rssi_b);
468 pos += scnprintf(buf + pos, bufsz - pos, 436 pos += scnprintf(buf + pos, bufsz - pos,
469 " %-30s %10u %10u %10u %10u\n", 437 fmt_table, "beacon_rssi_c:",
470 "beacon_rssi_c:",
471 le32_to_cpu(general->beacon_rssi_c), 438 le32_to_cpu(general->beacon_rssi_c),
472 accum_general->beacon_rssi_c, 439 accum_general->beacon_rssi_c,
473 delta_general->beacon_rssi_c, 440 delta_general->beacon_rssi_c,
474 max_general->beacon_rssi_c); 441 max_general->beacon_rssi_c);
475 pos += scnprintf(buf + pos, bufsz - pos, 442 pos += scnprintf(buf + pos, bufsz - pos,
476 " %-30s %10u %10u %10u %10u\n", 443 fmt_table, "beacon_energy_a:",
477 "beacon_energy_a:",
478 le32_to_cpu(general->beacon_energy_a), 444 le32_to_cpu(general->beacon_energy_a),
479 accum_general->beacon_energy_a, 445 accum_general->beacon_energy_a,
480 delta_general->beacon_energy_a, 446 delta_general->beacon_energy_a,
481 max_general->beacon_energy_a); 447 max_general->beacon_energy_a);
482 pos += scnprintf(buf + pos, bufsz - pos, 448 pos += scnprintf(buf + pos, bufsz - pos,
483 " %-30s %10u %10u %10u %10u\n", 449 fmt_table, "beacon_energy_b:",
484 "beacon_energy_b:",
485 le32_to_cpu(general->beacon_energy_b), 450 le32_to_cpu(general->beacon_energy_b),
486 accum_general->beacon_energy_b, 451 accum_general->beacon_energy_b,
487 delta_general->beacon_energy_b, 452 delta_general->beacon_energy_b,
488 max_general->beacon_energy_b); 453 max_general->beacon_energy_b);
489 pos += scnprintf(buf + pos, bufsz - pos, 454 pos += scnprintf(buf + pos, bufsz - pos,
490 " %-30s %10u %10u %10u %10u\n", 455 fmt_table, "beacon_energy_c:",
491 "beacon_energy_c:",
492 le32_to_cpu(general->beacon_energy_c), 456 le32_to_cpu(general->beacon_energy_c),
493 accum_general->beacon_energy_c, 457 accum_general->beacon_energy_c,
494 delta_general->beacon_energy_c, 458 delta_general->beacon_energy_c,
495 max_general->beacon_energy_c); 459 max_general->beacon_energy_c);
496 460
497 pos += scnprintf(buf + pos, bufsz - pos, "Statistics_Rx - OFDM_HT:\n");
498 pos += scnprintf(buf + pos, bufsz - pos, "%-32s current"
499 "acumulative delta max\n",
500 "Statistics_Rx - OFDM_HT:");
501 pos += scnprintf(buf + pos, bufsz - pos, 461 pos += scnprintf(buf + pos, bufsz - pos,
502 " %-30s %10u %10u %10u %10u\n", 462 fmt_header, "Statistics_Rx - OFDM_HT:");
503 "plcp_err:", 463 pos += scnprintf(buf + pos, bufsz - pos,
464 fmt_table, "plcp_err:",
504 le32_to_cpu(ht->plcp_err), accum_ht->plcp_err, 465 le32_to_cpu(ht->plcp_err), accum_ht->plcp_err,
505 delta_ht->plcp_err, max_ht->plcp_err); 466 delta_ht->plcp_err, max_ht->plcp_err);
506 pos += scnprintf(buf + pos, bufsz - pos, 467 pos += scnprintf(buf + pos, bufsz - pos,
507 " %-30s %10u %10u %10u %10u\n", 468 fmt_table, "overrun_err:",
508 "overrun_err:",
509 le32_to_cpu(ht->overrun_err), accum_ht->overrun_err, 469 le32_to_cpu(ht->overrun_err), accum_ht->overrun_err,
510 delta_ht->overrun_err, max_ht->overrun_err); 470 delta_ht->overrun_err, max_ht->overrun_err);
511 pos += scnprintf(buf + pos, bufsz - pos, 471 pos += scnprintf(buf + pos, bufsz - pos,
512 " %-30s %10u %10u %10u %10u\n", 472 fmt_table, "early_overrun_err:",
513 "early_overrun_err:",
514 le32_to_cpu(ht->early_overrun_err), 473 le32_to_cpu(ht->early_overrun_err),
515 accum_ht->early_overrun_err, 474 accum_ht->early_overrun_err,
516 delta_ht->early_overrun_err, 475 delta_ht->early_overrun_err,
517 max_ht->early_overrun_err); 476 max_ht->early_overrun_err);
518 pos += scnprintf(buf + pos, bufsz - pos, 477 pos += scnprintf(buf + pos, bufsz - pos,
519 " %-30s %10u %10u %10u %10u\n", 478 fmt_table, "crc32_good:",
520 "crc32_good:",
521 le32_to_cpu(ht->crc32_good), accum_ht->crc32_good, 479 le32_to_cpu(ht->crc32_good), accum_ht->crc32_good,
522 delta_ht->crc32_good, max_ht->crc32_good); 480 delta_ht->crc32_good, max_ht->crc32_good);
523 pos += scnprintf(buf + pos, bufsz - pos, 481 pos += scnprintf(buf + pos, bufsz - pos,
524 " %-30s %10u %10u %10u %10u\n", 482 fmt_table, "crc32_err:",
525 "crc32_err:",
526 le32_to_cpu(ht->crc32_err), accum_ht->crc32_err, 483 le32_to_cpu(ht->crc32_err), accum_ht->crc32_err,
527 delta_ht->crc32_err, max_ht->crc32_err); 484 delta_ht->crc32_err, max_ht->crc32_err);
528 pos += scnprintf(buf + pos, bufsz - pos, 485 pos += scnprintf(buf + pos, bufsz - pos,
529 " %-30s %10u %10u %10u %10u\n", 486 fmt_table, "mh_format_err:",
530 "mh_format_err:",
531 le32_to_cpu(ht->mh_format_err), 487 le32_to_cpu(ht->mh_format_err),
532 accum_ht->mh_format_err, 488 accum_ht->mh_format_err,
533 delta_ht->mh_format_err, max_ht->mh_format_err); 489 delta_ht->mh_format_err, max_ht->mh_format_err);
534 pos += scnprintf(buf + pos, bufsz - pos, 490 pos += scnprintf(buf + pos, bufsz - pos,
535 " %-30s %10u %10u %10u %10u\n", 491 fmt_table, "agg_crc32_good:",
536 "agg_crc32_good:",
537 le32_to_cpu(ht->agg_crc32_good), 492 le32_to_cpu(ht->agg_crc32_good),
538 accum_ht->agg_crc32_good, 493 accum_ht->agg_crc32_good,
539 delta_ht->agg_crc32_good, max_ht->agg_crc32_good); 494 delta_ht->agg_crc32_good, max_ht->agg_crc32_good);
540 pos += scnprintf(buf + pos, bufsz - pos, 495 pos += scnprintf(buf + pos, bufsz - pos,
541 " %-30s %10u %10u %10u %10u\n", 496 fmt_table, "agg_mpdu_cnt:",
542 "agg_mpdu_cnt:",
543 le32_to_cpu(ht->agg_mpdu_cnt), 497 le32_to_cpu(ht->agg_mpdu_cnt),
544 accum_ht->agg_mpdu_cnt, 498 accum_ht->agg_mpdu_cnt,
545 delta_ht->agg_mpdu_cnt, max_ht->agg_mpdu_cnt); 499 delta_ht->agg_mpdu_cnt, max_ht->agg_mpdu_cnt);
546 pos += scnprintf(buf + pos, bufsz - pos, 500 pos += scnprintf(buf + pos, bufsz - pos,
547 " %-30s %10u %10u %10u %10u\n", 501 fmt_table, "agg_cnt:",
548 "agg_cnt:",
549 le32_to_cpu(ht->agg_cnt), accum_ht->agg_cnt, 502 le32_to_cpu(ht->agg_cnt), accum_ht->agg_cnt,
550 delta_ht->agg_cnt, max_ht->agg_cnt); 503 delta_ht->agg_cnt, max_ht->agg_cnt);
551 pos += scnprintf(buf + pos, bufsz - pos, 504 pos += scnprintf(buf + pos, bufsz - pos,
552 " %-30s %10u %10u %10u %10u\n", 505 fmt_table, "unsupport_mcs:",
553 "unsupport_mcs:",
554 le32_to_cpu(ht->unsupport_mcs), 506 le32_to_cpu(ht->unsupport_mcs),
555 accum_ht->unsupport_mcs, 507 accum_ht->unsupport_mcs,
556 delta_ht->unsupport_mcs, max_ht->unsupport_mcs); 508 delta_ht->unsupport_mcs, max_ht->unsupport_mcs);
@@ -584,7 +536,8 @@ ssize_t iwl_ucode_tx_stats_read(struct file *file,
584 * the last statistics notification from uCode 536 * the last statistics notification from uCode
585 * might not reflect the current uCode activity 537 * might not reflect the current uCode activity
586 */ 538 */
587 if (priv->cfg->bt_statistics) { 539 if (priv->cfg->bt_params &&
540 priv->cfg->bt_params->bt_statistics) {
588 tx = &priv->_agn.statistics_bt.tx; 541 tx = &priv->_agn.statistics_bt.tx;
589 accum_tx = &priv->_agn.accum_statistics_bt.tx; 542 accum_tx = &priv->_agn.accum_statistics_bt.tx;
590 delta_tx = &priv->_agn.delta_statistics_bt.tx; 543 delta_tx = &priv->_agn.delta_statistics_bt.tx;
@@ -597,166 +550,141 @@ ssize_t iwl_ucode_tx_stats_read(struct file *file,
597 } 550 }
598 551
599 pos += iwl_statistics_flag(priv, buf, bufsz); 552 pos += iwl_statistics_flag(priv, buf, bufsz);
600 pos += scnprintf(buf + pos, bufsz - pos, "%-32s current"
601 "acumulative delta max\n",
602 "Statistics_Tx:");
603 pos += scnprintf(buf + pos, bufsz - pos, 553 pos += scnprintf(buf + pos, bufsz - pos,
604 " %-30s %10u %10u %10u %10u\n", 554 fmt_header, "Statistics_Tx:");
605 "preamble:", 555 pos += scnprintf(buf + pos, bufsz - pos,
556 fmt_table, "preamble:",
606 le32_to_cpu(tx->preamble_cnt), 557 le32_to_cpu(tx->preamble_cnt),
607 accum_tx->preamble_cnt, 558 accum_tx->preamble_cnt,
608 delta_tx->preamble_cnt, max_tx->preamble_cnt); 559 delta_tx->preamble_cnt, max_tx->preamble_cnt);
609 pos += scnprintf(buf + pos, bufsz - pos, 560 pos += scnprintf(buf + pos, bufsz - pos,
610 " %-30s %10u %10u %10u %10u\n", 561 fmt_table, "rx_detected_cnt:",
611 "rx_detected_cnt:",
612 le32_to_cpu(tx->rx_detected_cnt), 562 le32_to_cpu(tx->rx_detected_cnt),
613 accum_tx->rx_detected_cnt, 563 accum_tx->rx_detected_cnt,
614 delta_tx->rx_detected_cnt, max_tx->rx_detected_cnt); 564 delta_tx->rx_detected_cnt, max_tx->rx_detected_cnt);
615 pos += scnprintf(buf + pos, bufsz - pos, 565 pos += scnprintf(buf + pos, bufsz - pos,
616 " %-30s %10u %10u %10u %10u\n", 566 fmt_table, "bt_prio_defer_cnt:",
617 "bt_prio_defer_cnt:",
618 le32_to_cpu(tx->bt_prio_defer_cnt), 567 le32_to_cpu(tx->bt_prio_defer_cnt),
619 accum_tx->bt_prio_defer_cnt, 568 accum_tx->bt_prio_defer_cnt,
620 delta_tx->bt_prio_defer_cnt, 569 delta_tx->bt_prio_defer_cnt,
621 max_tx->bt_prio_defer_cnt); 570 max_tx->bt_prio_defer_cnt);
622 pos += scnprintf(buf + pos, bufsz - pos, 571 pos += scnprintf(buf + pos, bufsz - pos,
623 " %-30s %10u %10u %10u %10u\n", 572 fmt_table, "bt_prio_kill_cnt:",
624 "bt_prio_kill_cnt:",
625 le32_to_cpu(tx->bt_prio_kill_cnt), 573 le32_to_cpu(tx->bt_prio_kill_cnt),
626 accum_tx->bt_prio_kill_cnt, 574 accum_tx->bt_prio_kill_cnt,
627 delta_tx->bt_prio_kill_cnt, 575 delta_tx->bt_prio_kill_cnt,
628 max_tx->bt_prio_kill_cnt); 576 max_tx->bt_prio_kill_cnt);
629 pos += scnprintf(buf + pos, bufsz - pos, 577 pos += scnprintf(buf + pos, bufsz - pos,
630 " %-30s %10u %10u %10u %10u\n", 578 fmt_table, "few_bytes_cnt:",
631 "few_bytes_cnt:",
632 le32_to_cpu(tx->few_bytes_cnt), 579 le32_to_cpu(tx->few_bytes_cnt),
633 accum_tx->few_bytes_cnt, 580 accum_tx->few_bytes_cnt,
634 delta_tx->few_bytes_cnt, max_tx->few_bytes_cnt); 581 delta_tx->few_bytes_cnt, max_tx->few_bytes_cnt);
635 pos += scnprintf(buf + pos, bufsz - pos, 582 pos += scnprintf(buf + pos, bufsz - pos,
636 " %-30s %10u %10u %10u %10u\n", 583 fmt_table, "cts_timeout:",
637 "cts_timeout:",
638 le32_to_cpu(tx->cts_timeout), accum_tx->cts_timeout, 584 le32_to_cpu(tx->cts_timeout), accum_tx->cts_timeout,
639 delta_tx->cts_timeout, max_tx->cts_timeout); 585 delta_tx->cts_timeout, max_tx->cts_timeout);
640 pos += scnprintf(buf + pos, bufsz - pos, 586 pos += scnprintf(buf + pos, bufsz - pos,
641 " %-30s %10u %10u %10u %10u\n", 587 fmt_table, "ack_timeout:",
642 "ack_timeout:",
643 le32_to_cpu(tx->ack_timeout), 588 le32_to_cpu(tx->ack_timeout),
644 accum_tx->ack_timeout, 589 accum_tx->ack_timeout,
645 delta_tx->ack_timeout, max_tx->ack_timeout); 590 delta_tx->ack_timeout, max_tx->ack_timeout);
646 pos += scnprintf(buf + pos, bufsz - pos, 591 pos += scnprintf(buf + pos, bufsz - pos,
647 " %-30s %10u %10u %10u %10u\n", 592 fmt_table, "expected_ack_cnt:",
648 "expected_ack_cnt:",
649 le32_to_cpu(tx->expected_ack_cnt), 593 le32_to_cpu(tx->expected_ack_cnt),
650 accum_tx->expected_ack_cnt, 594 accum_tx->expected_ack_cnt,
651 delta_tx->expected_ack_cnt, 595 delta_tx->expected_ack_cnt,
652 max_tx->expected_ack_cnt); 596 max_tx->expected_ack_cnt);
653 pos += scnprintf(buf + pos, bufsz - pos, 597 pos += scnprintf(buf + pos, bufsz - pos,
654 " %-30s %10u %10u %10u %10u\n", 598 fmt_table, "actual_ack_cnt:",
655 "actual_ack_cnt:",
656 le32_to_cpu(tx->actual_ack_cnt), 599 le32_to_cpu(tx->actual_ack_cnt),
657 accum_tx->actual_ack_cnt, 600 accum_tx->actual_ack_cnt,
658 delta_tx->actual_ack_cnt, 601 delta_tx->actual_ack_cnt,
659 max_tx->actual_ack_cnt); 602 max_tx->actual_ack_cnt);
660 pos += scnprintf(buf + pos, bufsz - pos, 603 pos += scnprintf(buf + pos, bufsz - pos,
661 " %-30s %10u %10u %10u %10u\n", 604 fmt_table, "dump_msdu_cnt:",
662 "dump_msdu_cnt:",
663 le32_to_cpu(tx->dump_msdu_cnt), 605 le32_to_cpu(tx->dump_msdu_cnt),
664 accum_tx->dump_msdu_cnt, 606 accum_tx->dump_msdu_cnt,
665 delta_tx->dump_msdu_cnt, 607 delta_tx->dump_msdu_cnt,
666 max_tx->dump_msdu_cnt); 608 max_tx->dump_msdu_cnt);
667 pos += scnprintf(buf + pos, bufsz - pos, 609 pos += scnprintf(buf + pos, bufsz - pos,
668 " %-30s %10u %10u %10u %10u\n", 610 fmt_table, "abort_nxt_frame_mismatch:",
669 "abort_nxt_frame_mismatch:",
670 le32_to_cpu(tx->burst_abort_next_frame_mismatch_cnt), 611 le32_to_cpu(tx->burst_abort_next_frame_mismatch_cnt),
671 accum_tx->burst_abort_next_frame_mismatch_cnt, 612 accum_tx->burst_abort_next_frame_mismatch_cnt,
672 delta_tx->burst_abort_next_frame_mismatch_cnt, 613 delta_tx->burst_abort_next_frame_mismatch_cnt,
673 max_tx->burst_abort_next_frame_mismatch_cnt); 614 max_tx->burst_abort_next_frame_mismatch_cnt);
674 pos += scnprintf(buf + pos, bufsz - pos, 615 pos += scnprintf(buf + pos, bufsz - pos,
675 " %-30s %10u %10u %10u %10u\n", 616 fmt_table, "abort_missing_nxt_frame:",
676 "abort_missing_nxt_frame:",
677 le32_to_cpu(tx->burst_abort_missing_next_frame_cnt), 617 le32_to_cpu(tx->burst_abort_missing_next_frame_cnt),
678 accum_tx->burst_abort_missing_next_frame_cnt, 618 accum_tx->burst_abort_missing_next_frame_cnt,
679 delta_tx->burst_abort_missing_next_frame_cnt, 619 delta_tx->burst_abort_missing_next_frame_cnt,
680 max_tx->burst_abort_missing_next_frame_cnt); 620 max_tx->burst_abort_missing_next_frame_cnt);
681 pos += scnprintf(buf + pos, bufsz - pos, 621 pos += scnprintf(buf + pos, bufsz - pos,
682 " %-30s %10u %10u %10u %10u\n", 622 fmt_table, "cts_timeout_collision:",
683 "cts_timeout_collision:",
684 le32_to_cpu(tx->cts_timeout_collision), 623 le32_to_cpu(tx->cts_timeout_collision),
685 accum_tx->cts_timeout_collision, 624 accum_tx->cts_timeout_collision,
686 delta_tx->cts_timeout_collision, 625 delta_tx->cts_timeout_collision,
687 max_tx->cts_timeout_collision); 626 max_tx->cts_timeout_collision);
688 pos += scnprintf(buf + pos, bufsz - pos, 627 pos += scnprintf(buf + pos, bufsz - pos,
689 " %-30s %10u %10u %10u %10u\n", 628 fmt_table, "ack_ba_timeout_collision:",
690 "ack_ba_timeout_collision:",
691 le32_to_cpu(tx->ack_or_ba_timeout_collision), 629 le32_to_cpu(tx->ack_or_ba_timeout_collision),
692 accum_tx->ack_or_ba_timeout_collision, 630 accum_tx->ack_or_ba_timeout_collision,
693 delta_tx->ack_or_ba_timeout_collision, 631 delta_tx->ack_or_ba_timeout_collision,
694 max_tx->ack_or_ba_timeout_collision); 632 max_tx->ack_or_ba_timeout_collision);
695 pos += scnprintf(buf + pos, bufsz - pos, 633 pos += scnprintf(buf + pos, bufsz - pos,
696 " %-30s %10u %10u %10u %10u\n", 634 fmt_table, "agg ba_timeout:",
697 "agg ba_timeout:",
698 le32_to_cpu(tx->agg.ba_timeout), 635 le32_to_cpu(tx->agg.ba_timeout),
699 accum_tx->agg.ba_timeout, 636 accum_tx->agg.ba_timeout,
700 delta_tx->agg.ba_timeout, 637 delta_tx->agg.ba_timeout,
701 max_tx->agg.ba_timeout); 638 max_tx->agg.ba_timeout);
702 pos += scnprintf(buf + pos, bufsz - pos, 639 pos += scnprintf(buf + pos, bufsz - pos,
703 " %-30s %10u %10u %10u %10u\n", 640 fmt_table, "agg ba_resched_frames:",
704 "agg ba_resched_frames:",
705 le32_to_cpu(tx->agg.ba_reschedule_frames), 641 le32_to_cpu(tx->agg.ba_reschedule_frames),
706 accum_tx->agg.ba_reschedule_frames, 642 accum_tx->agg.ba_reschedule_frames,
707 delta_tx->agg.ba_reschedule_frames, 643 delta_tx->agg.ba_reschedule_frames,
708 max_tx->agg.ba_reschedule_frames); 644 max_tx->agg.ba_reschedule_frames);
709 pos += scnprintf(buf + pos, bufsz - pos, 645 pos += scnprintf(buf + pos, bufsz - pos,
710 " %-30s %10u %10u %10u %10u\n", 646 fmt_table, "agg scd_query_agg_frame:",
711 "agg scd_query_agg_frame:",
712 le32_to_cpu(tx->agg.scd_query_agg_frame_cnt), 647 le32_to_cpu(tx->agg.scd_query_agg_frame_cnt),
713 accum_tx->agg.scd_query_agg_frame_cnt, 648 accum_tx->agg.scd_query_agg_frame_cnt,
714 delta_tx->agg.scd_query_agg_frame_cnt, 649 delta_tx->agg.scd_query_agg_frame_cnt,
715 max_tx->agg.scd_query_agg_frame_cnt); 650 max_tx->agg.scd_query_agg_frame_cnt);
716 pos += scnprintf(buf + pos, bufsz - pos, 651 pos += scnprintf(buf + pos, bufsz - pos,
717 " %-30s %10u %10u %10u %10u\n", 652 fmt_table, "agg scd_query_no_agg:",
718 "agg scd_query_no_agg:",
719 le32_to_cpu(tx->agg.scd_query_no_agg), 653 le32_to_cpu(tx->agg.scd_query_no_agg),
720 accum_tx->agg.scd_query_no_agg, 654 accum_tx->agg.scd_query_no_agg,
721 delta_tx->agg.scd_query_no_agg, 655 delta_tx->agg.scd_query_no_agg,
722 max_tx->agg.scd_query_no_agg); 656 max_tx->agg.scd_query_no_agg);
723 pos += scnprintf(buf + pos, bufsz - pos, 657 pos += scnprintf(buf + pos, bufsz - pos,
724 " %-30s %10u %10u %10u %10u\n", 658 fmt_table, "agg scd_query_agg:",
725 "agg scd_query_agg:",
726 le32_to_cpu(tx->agg.scd_query_agg), 659 le32_to_cpu(tx->agg.scd_query_agg),
727 accum_tx->agg.scd_query_agg, 660 accum_tx->agg.scd_query_agg,
728 delta_tx->agg.scd_query_agg, 661 delta_tx->agg.scd_query_agg,
729 max_tx->agg.scd_query_agg); 662 max_tx->agg.scd_query_agg);
730 pos += scnprintf(buf + pos, bufsz - pos, 663 pos += scnprintf(buf + pos, bufsz - pos,
731 " %-30s %10u %10u %10u %10u\n", 664 fmt_table, "agg scd_query_mismatch:",
732 "agg scd_query_mismatch:",
733 le32_to_cpu(tx->agg.scd_query_mismatch), 665 le32_to_cpu(tx->agg.scd_query_mismatch),
734 accum_tx->agg.scd_query_mismatch, 666 accum_tx->agg.scd_query_mismatch,
735 delta_tx->agg.scd_query_mismatch, 667 delta_tx->agg.scd_query_mismatch,
736 max_tx->agg.scd_query_mismatch); 668 max_tx->agg.scd_query_mismatch);
737 pos += scnprintf(buf + pos, bufsz - pos, 669 pos += scnprintf(buf + pos, bufsz - pos,
738 " %-30s %10u %10u %10u %10u\n", 670 fmt_table, "agg frame_not_ready:",
739 "agg frame_not_ready:",
740 le32_to_cpu(tx->agg.frame_not_ready), 671 le32_to_cpu(tx->agg.frame_not_ready),
741 accum_tx->agg.frame_not_ready, 672 accum_tx->agg.frame_not_ready,
742 delta_tx->agg.frame_not_ready, 673 delta_tx->agg.frame_not_ready,
743 max_tx->agg.frame_not_ready); 674 max_tx->agg.frame_not_ready);
744 pos += scnprintf(buf + pos, bufsz - pos, 675 pos += scnprintf(buf + pos, bufsz - pos,
745 " %-30s %10u %10u %10u %10u\n", 676 fmt_table, "agg underrun:",
746 "agg underrun:",
747 le32_to_cpu(tx->agg.underrun), 677 le32_to_cpu(tx->agg.underrun),
748 accum_tx->agg.underrun, 678 accum_tx->agg.underrun,
749 delta_tx->agg.underrun, max_tx->agg.underrun); 679 delta_tx->agg.underrun, max_tx->agg.underrun);
750 pos += scnprintf(buf + pos, bufsz - pos, 680 pos += scnprintf(buf + pos, bufsz - pos,
751 " %-30s %10u %10u %10u %10u\n", 681 fmt_table, "agg bt_prio_kill:",
752 "agg bt_prio_kill:",
753 le32_to_cpu(tx->agg.bt_prio_kill), 682 le32_to_cpu(tx->agg.bt_prio_kill),
754 accum_tx->agg.bt_prio_kill, 683 accum_tx->agg.bt_prio_kill,
755 delta_tx->agg.bt_prio_kill, 684 delta_tx->agg.bt_prio_kill,
756 max_tx->agg.bt_prio_kill); 685 max_tx->agg.bt_prio_kill);
757 pos += scnprintf(buf + pos, bufsz - pos, 686 pos += scnprintf(buf + pos, bufsz - pos,
758 " %-30s %10u %10u %10u %10u\n", 687 fmt_table, "agg rx_ba_rsp_cnt:",
759 "agg rx_ba_rsp_cnt:",
760 le32_to_cpu(tx->agg.rx_ba_rsp_cnt), 688 le32_to_cpu(tx->agg.rx_ba_rsp_cnt),
761 accum_tx->agg.rx_ba_rsp_cnt, 689 accum_tx->agg.rx_ba_rsp_cnt,
762 delta_tx->agg.rx_ba_rsp_cnt, 690 delta_tx->agg.rx_ba_rsp_cnt,
@@ -767,15 +695,15 @@ ssize_t iwl_ucode_tx_stats_read(struct file *file,
767 "tx power: (1/2 dB step)\n"); 695 "tx power: (1/2 dB step)\n");
768 if ((priv->cfg->valid_tx_ant & ANT_A) && tx->tx_power.ant_a) 696 if ((priv->cfg->valid_tx_ant & ANT_A) && tx->tx_power.ant_a)
769 pos += scnprintf(buf + pos, bufsz - pos, 697 pos += scnprintf(buf + pos, bufsz - pos,
770 "\tantenna A: 0x%X\n", 698 fmt_hex, "antenna A:",
771 tx->tx_power.ant_a); 699 tx->tx_power.ant_a);
772 if ((priv->cfg->valid_tx_ant & ANT_B) && tx->tx_power.ant_b) 700 if ((priv->cfg->valid_tx_ant & ANT_B) && tx->tx_power.ant_b)
773 pos += scnprintf(buf + pos, bufsz - pos, 701 pos += scnprintf(buf + pos, bufsz - pos,
774 "\tantenna B: 0x%X\n", 702 fmt_hex, "antenna B:",
775 tx->tx_power.ant_b); 703 tx->tx_power.ant_b);
776 if ((priv->cfg->valid_tx_ant & ANT_C) && tx->tx_power.ant_c) 704 if ((priv->cfg->valid_tx_ant & ANT_C) && tx->tx_power.ant_c)
777 pos += scnprintf(buf + pos, bufsz - pos, 705 pos += scnprintf(buf + pos, bufsz - pos,
778 "\tantenna C: 0x%X\n", 706 fmt_hex, "antenna C:",
779 tx->tx_power.ant_c); 707 tx->tx_power.ant_c);
780 } 708 }
781 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); 709 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
@@ -809,7 +737,8 @@ ssize_t iwl_ucode_general_stats_read(struct file *file, char __user *user_buf,
809 * the last statistics notification from uCode 737 * the last statistics notification from uCode
810 * might not reflect the current uCode activity 738 * might not reflect the current uCode activity
811 */ 739 */
812 if (priv->cfg->bt_statistics) { 740 if (priv->cfg->bt_params &&
741 priv->cfg->bt_params->bt_statistics) {
813 general = &priv->_agn.statistics_bt.general.common; 742 general = &priv->_agn.statistics_bt.general.common;
814 dbg = &priv->_agn.statistics_bt.general.common.dbg; 743 dbg = &priv->_agn.statistics_bt.general.common.dbg;
815 div = &priv->_agn.statistics_bt.general.common.div; 744 div = &priv->_agn.statistics_bt.general.common.div;
@@ -838,84 +767,72 @@ ssize_t iwl_ucode_general_stats_read(struct file *file, char __user *user_buf,
838 } 767 }
839 768
840 pos += iwl_statistics_flag(priv, buf, bufsz); 769 pos += iwl_statistics_flag(priv, buf, bufsz);
841 pos += scnprintf(buf + pos, bufsz - pos, "%-32s current" 770 pos += scnprintf(buf + pos, bufsz - pos,
842 "acumulative delta max\n", 771 fmt_header, "Statistics_General:");
843 "Statistics_General:"); 772 pos += scnprintf(buf + pos, bufsz - pos,
844 pos += scnprintf(buf + pos, bufsz - pos, " %-30s %10u\n", 773 fmt_value, "temperature:",
845 "temperature:",
846 le32_to_cpu(general->temperature)); 774 le32_to_cpu(general->temperature));
847 pos += scnprintf(buf + pos, bufsz - pos, " %-30s %10u\n", 775 pos += scnprintf(buf + pos, bufsz - pos,
848 "temperature_m:", 776 fmt_value, "temperature_m:",
849 le32_to_cpu(general->temperature_m)); 777 le32_to_cpu(general->temperature_m));
850 pos += scnprintf(buf + pos, bufsz - pos, 778 pos += scnprintf(buf + pos, bufsz - pos,
851 " %-30s %10u %10u %10u %10u\n", 779 fmt_value, "ttl_timestamp:",
852 "burst_check:", 780 le32_to_cpu(general->ttl_timestamp));
781 pos += scnprintf(buf + pos, bufsz - pos,
782 fmt_table, "burst_check:",
853 le32_to_cpu(dbg->burst_check), 783 le32_to_cpu(dbg->burst_check),
854 accum_dbg->burst_check, 784 accum_dbg->burst_check,
855 delta_dbg->burst_check, max_dbg->burst_check); 785 delta_dbg->burst_check, max_dbg->burst_check);
856 pos += scnprintf(buf + pos, bufsz - pos, 786 pos += scnprintf(buf + pos, bufsz - pos,
857 " %-30s %10u %10u %10u %10u\n", 787 fmt_table, "burst_count:",
858 "burst_count:",
859 le32_to_cpu(dbg->burst_count), 788 le32_to_cpu(dbg->burst_count),
860 accum_dbg->burst_count, 789 accum_dbg->burst_count,
861 delta_dbg->burst_count, max_dbg->burst_count); 790 delta_dbg->burst_count, max_dbg->burst_count);
862 pos += scnprintf(buf + pos, bufsz - pos, 791 pos += scnprintf(buf + pos, bufsz - pos,
863 " %-30s %10u %10u %10u %10u\n", 792 fmt_table, "wait_for_silence_timeout_count:",
864 "wait_for_silence_timeout_count:",
865 le32_to_cpu(dbg->wait_for_silence_timeout_cnt), 793 le32_to_cpu(dbg->wait_for_silence_timeout_cnt),
866 accum_dbg->wait_for_silence_timeout_cnt, 794 accum_dbg->wait_for_silence_timeout_cnt,
867 delta_dbg->wait_for_silence_timeout_cnt, 795 delta_dbg->wait_for_silence_timeout_cnt,
868 max_dbg->wait_for_silence_timeout_cnt); 796 max_dbg->wait_for_silence_timeout_cnt);
869 pos += scnprintf(buf + pos, bufsz - pos, 797 pos += scnprintf(buf + pos, bufsz - pos,
870 " %-30s %10u %10u %10u %10u\n", 798 fmt_table, "sleep_time:",
871 "sleep_time:",
872 le32_to_cpu(general->sleep_time), 799 le32_to_cpu(general->sleep_time),
873 accum_general->sleep_time, 800 accum_general->sleep_time,
874 delta_general->sleep_time, max_general->sleep_time); 801 delta_general->sleep_time, max_general->sleep_time);
875 pos += scnprintf(buf + pos, bufsz - pos, 802 pos += scnprintf(buf + pos, bufsz - pos,
876 " %-30s %10u %10u %10u %10u\n", 803 fmt_table, "slots_out:",
877 "slots_out:",
878 le32_to_cpu(general->slots_out), 804 le32_to_cpu(general->slots_out),
879 accum_general->slots_out, 805 accum_general->slots_out,
880 delta_general->slots_out, max_general->slots_out); 806 delta_general->slots_out, max_general->slots_out);
881 pos += scnprintf(buf + pos, bufsz - pos, 807 pos += scnprintf(buf + pos, bufsz - pos,
882 " %-30s %10u %10u %10u %10u\n", 808 fmt_table, "slots_idle:",
883 "slots_idle:",
884 le32_to_cpu(general->slots_idle), 809 le32_to_cpu(general->slots_idle),
885 accum_general->slots_idle, 810 accum_general->slots_idle,
886 delta_general->slots_idle, max_general->slots_idle); 811 delta_general->slots_idle, max_general->slots_idle);
887 pos += scnprintf(buf + pos, bufsz - pos, "ttl_timestamp:\t\t\t%u\n",
888 le32_to_cpu(general->ttl_timestamp));
889 pos += scnprintf(buf + pos, bufsz - pos, 812 pos += scnprintf(buf + pos, bufsz - pos,
890 " %-30s %10u %10u %10u %10u\n", 813 fmt_table, "tx_on_a:",
891 "tx_on_a:",
892 le32_to_cpu(div->tx_on_a), accum_div->tx_on_a, 814 le32_to_cpu(div->tx_on_a), accum_div->tx_on_a,
893 delta_div->tx_on_a, max_div->tx_on_a); 815 delta_div->tx_on_a, max_div->tx_on_a);
894 pos += scnprintf(buf + pos, bufsz - pos, 816 pos += scnprintf(buf + pos, bufsz - pos,
895 " %-30s %10u %10u %10u %10u\n", 817 fmt_table, "tx_on_b:",
896 "tx_on_b:",
897 le32_to_cpu(div->tx_on_b), accum_div->tx_on_b, 818 le32_to_cpu(div->tx_on_b), accum_div->tx_on_b,
898 delta_div->tx_on_b, max_div->tx_on_b); 819 delta_div->tx_on_b, max_div->tx_on_b);
899 pos += scnprintf(buf + pos, bufsz - pos, 820 pos += scnprintf(buf + pos, bufsz - pos,
900 " %-30s %10u %10u %10u %10u\n", 821 fmt_table, "exec_time:",
901 "exec_time:",
902 le32_to_cpu(div->exec_time), accum_div->exec_time, 822 le32_to_cpu(div->exec_time), accum_div->exec_time,
903 delta_div->exec_time, max_div->exec_time); 823 delta_div->exec_time, max_div->exec_time);
904 pos += scnprintf(buf + pos, bufsz - pos, 824 pos += scnprintf(buf + pos, bufsz - pos,
905 " %-30s %10u %10u %10u %10u\n", 825 fmt_table, "probe_time:",
906 "probe_time:",
907 le32_to_cpu(div->probe_time), accum_div->probe_time, 826 le32_to_cpu(div->probe_time), accum_div->probe_time,
908 delta_div->probe_time, max_div->probe_time); 827 delta_div->probe_time, max_div->probe_time);
909 pos += scnprintf(buf + pos, bufsz - pos, 828 pos += scnprintf(buf + pos, bufsz - pos,
910 " %-30s %10u %10u %10u %10u\n", 829 fmt_table, "rx_enable_counter:",
911 "rx_enable_counter:",
912 le32_to_cpu(general->rx_enable_counter), 830 le32_to_cpu(general->rx_enable_counter),
913 accum_general->rx_enable_counter, 831 accum_general->rx_enable_counter,
914 delta_general->rx_enable_counter, 832 delta_general->rx_enable_counter,
915 max_general->rx_enable_counter); 833 max_general->rx_enable_counter);
916 pos += scnprintf(buf + pos, bufsz - pos, 834 pos += scnprintf(buf + pos, bufsz - pos,
917 " %-30s %10u %10u %10u %10u\n", 835 fmt_table, "num_of_sos_states:",
918 "num_of_sos_states:",
919 le32_to_cpu(general->num_of_sos_states), 836 le32_to_cpu(general->num_of_sos_states),
920 accum_general->num_of_sos_states, 837 accum_general->num_of_sos_states,
921 delta_general->num_of_sos_states, 838 delta_general->num_of_sos_states,
@@ -1011,3 +928,147 @@ ssize_t iwl_ucode_bt_stats_read(struct file *file,
1011 kfree(buf); 928 kfree(buf);
1012 return ret; 929 return ret;
1013} 930}
931
932ssize_t iwl_reply_tx_error_read(struct file *file,
933 char __user *user_buf,
934 size_t count, loff_t *ppos)
935{
936 struct iwl_priv *priv = (struct iwl_priv *)file->private_data;
937 int pos = 0;
938 char *buf;
939 int bufsz = (sizeof(struct reply_tx_error_statistics) * 24) +
940 (sizeof(struct reply_agg_tx_error_statistics) * 24) + 200;
941 ssize_t ret;
942
943 if (!iwl_is_alive(priv))
944 return -EAGAIN;
945
946 buf = kzalloc(bufsz, GFP_KERNEL);
947 if (!buf) {
948 IWL_ERR(priv, "Can not allocate Buffer\n");
949 return -ENOMEM;
950 }
951
952 pos += scnprintf(buf + pos, bufsz - pos, "Statistics_TX_Error:\n");
953 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t\t%u\n",
954 iwl_get_tx_fail_reason(TX_STATUS_POSTPONE_DELAY),
955 priv->_agn.reply_tx_stats.pp_delay);
956 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
957 iwl_get_tx_fail_reason(TX_STATUS_POSTPONE_FEW_BYTES),
958 priv->_agn.reply_tx_stats.pp_few_bytes);
959 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
960 iwl_get_tx_fail_reason(TX_STATUS_POSTPONE_BT_PRIO),
961 priv->_agn.reply_tx_stats.pp_bt_prio);
962 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
963 iwl_get_tx_fail_reason(TX_STATUS_POSTPONE_QUIET_PERIOD),
964 priv->_agn.reply_tx_stats.pp_quiet_period);
965 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
966 iwl_get_tx_fail_reason(TX_STATUS_POSTPONE_CALC_TTAK),
967 priv->_agn.reply_tx_stats.pp_calc_ttak);
968 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t%u\n",
969 iwl_get_tx_fail_reason(
970 TX_STATUS_FAIL_INTERNAL_CROSSED_RETRY),
971 priv->_agn.reply_tx_stats.int_crossed_retry);
972 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
973 iwl_get_tx_fail_reason(TX_STATUS_FAIL_SHORT_LIMIT),
974 priv->_agn.reply_tx_stats.short_limit);
975 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
976 iwl_get_tx_fail_reason(TX_STATUS_FAIL_LONG_LIMIT),
977 priv->_agn.reply_tx_stats.long_limit);
978 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
979 iwl_get_tx_fail_reason(TX_STATUS_FAIL_FIFO_UNDERRUN),
980 priv->_agn.reply_tx_stats.fifo_underrun);
981 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
982 iwl_get_tx_fail_reason(TX_STATUS_FAIL_DRAIN_FLOW),
983 priv->_agn.reply_tx_stats.drain_flow);
984 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
985 iwl_get_tx_fail_reason(TX_STATUS_FAIL_RFKILL_FLUSH),
986 priv->_agn.reply_tx_stats.rfkill_flush);
987 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
988 iwl_get_tx_fail_reason(TX_STATUS_FAIL_LIFE_EXPIRE),
989 priv->_agn.reply_tx_stats.life_expire);
990 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
991 iwl_get_tx_fail_reason(TX_STATUS_FAIL_DEST_PS),
992 priv->_agn.reply_tx_stats.dest_ps);
993 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
994 iwl_get_tx_fail_reason(TX_STATUS_FAIL_HOST_ABORTED),
995 priv->_agn.reply_tx_stats.host_abort);
996 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
997 iwl_get_tx_fail_reason(TX_STATUS_FAIL_BT_RETRY),
998 priv->_agn.reply_tx_stats.pp_delay);
999 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
1000 iwl_get_tx_fail_reason(TX_STATUS_FAIL_STA_INVALID),
1001 priv->_agn.reply_tx_stats.sta_invalid);
1002 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
1003 iwl_get_tx_fail_reason(TX_STATUS_FAIL_FRAG_DROPPED),
1004 priv->_agn.reply_tx_stats.frag_drop);
1005 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
1006 iwl_get_tx_fail_reason(TX_STATUS_FAIL_TID_DISABLE),
1007 priv->_agn.reply_tx_stats.tid_disable);
1008 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
1009 iwl_get_tx_fail_reason(TX_STATUS_FAIL_FIFO_FLUSHED),
1010 priv->_agn.reply_tx_stats.fifo_flush);
1011 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t%u\n",
1012 iwl_get_tx_fail_reason(
1013 TX_STATUS_FAIL_INSUFFICIENT_CF_POLL),
1014 priv->_agn.reply_tx_stats.insuff_cf_poll);
1015 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
1016 iwl_get_tx_fail_reason(TX_STATUS_FAIL_PASSIVE_NO_RX),
1017 priv->_agn.reply_tx_stats.fail_hw_drop);
1018 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t%u\n",
1019 iwl_get_tx_fail_reason(
1020 TX_STATUS_FAIL_NO_BEACON_ON_RADAR),
1021 priv->_agn.reply_tx_stats.sta_color_mismatch);
1022 pos += scnprintf(buf + pos, bufsz - pos, "UNKNOWN:\t\t\t%u\n",
1023 priv->_agn.reply_tx_stats.unknown);
1024
1025 pos += scnprintf(buf + pos, bufsz - pos,
1026 "\nStatistics_Agg_TX_Error:\n");
1027
1028 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
1029 iwl_get_agg_tx_fail_reason(AGG_TX_STATE_UNDERRUN_MSK),
1030 priv->_agn.reply_agg_tx_stats.underrun);
1031 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
1032 iwl_get_agg_tx_fail_reason(AGG_TX_STATE_BT_PRIO_MSK),
1033 priv->_agn.reply_agg_tx_stats.bt_prio);
1034 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
1035 iwl_get_agg_tx_fail_reason(AGG_TX_STATE_FEW_BYTES_MSK),
1036 priv->_agn.reply_agg_tx_stats.few_bytes);
1037 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
1038 iwl_get_agg_tx_fail_reason(AGG_TX_STATE_ABORT_MSK),
1039 priv->_agn.reply_agg_tx_stats.abort);
1040 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t%u\n",
1041 iwl_get_agg_tx_fail_reason(
1042 AGG_TX_STATE_LAST_SENT_TTL_MSK),
1043 priv->_agn.reply_agg_tx_stats.last_sent_ttl);
1044 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t%u\n",
1045 iwl_get_agg_tx_fail_reason(
1046 AGG_TX_STATE_LAST_SENT_TRY_CNT_MSK),
1047 priv->_agn.reply_agg_tx_stats.last_sent_try);
1048 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t%u\n",
1049 iwl_get_agg_tx_fail_reason(
1050 AGG_TX_STATE_LAST_SENT_BT_KILL_MSK),
1051 priv->_agn.reply_agg_tx_stats.last_sent_bt_kill);
1052 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
1053 iwl_get_agg_tx_fail_reason(AGG_TX_STATE_SCD_QUERY_MSK),
1054 priv->_agn.reply_agg_tx_stats.scd_query);
1055 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t%u\n",
1056 iwl_get_agg_tx_fail_reason(
1057 AGG_TX_STATE_TEST_BAD_CRC32_MSK),
1058 priv->_agn.reply_agg_tx_stats.bad_crc32);
1059 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
1060 iwl_get_agg_tx_fail_reason(AGG_TX_STATE_RESPONSE_MSK),
1061 priv->_agn.reply_agg_tx_stats.response);
1062 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
1063 iwl_get_agg_tx_fail_reason(AGG_TX_STATE_DUMP_TX_MSK),
1064 priv->_agn.reply_agg_tx_stats.dump_tx);
1065 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
1066 iwl_get_agg_tx_fail_reason(AGG_TX_STATE_DELAY_TX_MSK),
1067 priv->_agn.reply_agg_tx_stats.delay_tx);
1068 pos += scnprintf(buf + pos, bufsz - pos, "UNKNOWN:\t\t\t%u\n",
1069 priv->_agn.reply_agg_tx_stats.unknown);
1070
1071 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1072 kfree(buf);
1073 return ret;
1074}
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-debugfs.h b/drivers/net/wireless/iwlwifi/iwl-agn-debugfs.h
index bbdce5913ac7..f2573b5486cd 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-debugfs.h
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-debugfs.h
@@ -39,6 +39,8 @@ ssize_t iwl_ucode_general_stats_read(struct file *file, char __user *user_buf,
39 size_t count, loff_t *ppos); 39 size_t count, loff_t *ppos);
40ssize_t iwl_ucode_bt_stats_read(struct file *file, char __user *user_buf, 40ssize_t iwl_ucode_bt_stats_read(struct file *file, char __user *user_buf,
41 size_t count, loff_t *ppos); 41 size_t count, loff_t *ppos);
42ssize_t iwl_reply_tx_error_read(struct file *file, char __user *user_buf,
43 size_t count, loff_t *ppos);
42#else 44#else
43static ssize_t iwl_ucode_rx_stats_read(struct file *file, char __user *user_buf, 45static ssize_t iwl_ucode_rx_stats_read(struct file *file, char __user *user_buf,
44 size_t count, loff_t *ppos) 46 size_t count, loff_t *ppos)
@@ -60,4 +62,9 @@ static ssize_t iwl_ucode_bt_stats_read(struct file *file, char __user *user_buf,
60{ 62{
61 return 0; 63 return 0;
62} 64}
65static ssize_t iwl_reply_tx_error_read(struct file *file, char __user *user_buf,
66 size_t count, loff_t *ppos)
67{
68 return 0;
69}
63#endif 70#endif
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-eeprom.c b/drivers/net/wireless/iwlwifi/iwl-agn-eeprom.c
new file mode 100644
index 000000000000..a650baba0809
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-eeprom.c
@@ -0,0 +1,454 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *****************************************************************************/
62
63
64#include <linux/kernel.h>
65#include <linux/module.h>
66#include <linux/slab.h>
67#include <linux/init.h>
68
69#include <net/mac80211.h>
70
71#include "iwl-commands.h"
72#include "iwl-dev.h"
73#include "iwl-core.h"
74#include "iwl-debug.h"
75#include "iwl-agn.h"
76#include "iwl-io.h"
77
78/************************** EEPROM BANDS ****************************
79 *
80 * The iwl_eeprom_band definitions below provide the mapping from the
81 * EEPROM contents to the specific channel number supported for each
82 * band.
83 *
84 * For example, iwl_priv->eeprom.band_3_channels[4] from the band_3
85 * definition below maps to physical channel 42 in the 5.2GHz spectrum.
86 * The specific geography and calibration information for that channel
87 * is contained in the eeprom map itself.
88 *
89 * During init, we copy the eeprom information and channel map
90 * information into priv->channel_info_24/52 and priv->channel_map_24/52
91 *
92 * channel_map_24/52 provides the index in the channel_info array for a
93 * given channel. We have to have two separate maps as there is channel
94 * overlap with the 2.4GHz and 5.2GHz spectrum as seen in band_1 and
95 * band_2
96 *
97 * A value of 0xff stored in the channel_map indicates that the channel
98 * is not supported by the hardware at all.
99 *
100 * A value of 0xfe in the channel_map indicates that the channel is not
101 * valid for Tx with the current hardware. This means that
102 * while the system can tune and receive on a given channel, it may not
103 * be able to associate or transmit any frames on that
104 * channel. There is no corresponding channel information for that
105 * entry.
106 *
107 *********************************************************************/
108
109/**
110 * struct iwl_txpwr_section: eeprom section information
111 * @offset: indirect address into eeprom image
112 * @count: number of "struct iwl_eeprom_enhanced_txpwr" in this section
113 * @band: band type for the section
114 * @is_common - true: common section, false: channel section
115 * @is_cck - true: cck section, false: not cck section
116 * @is_ht_40 - true: all channel in the section are HT40 channel,
117 * false: legacy or HT 20 MHz
118 * ignore if it is common section
119 * @iwl_eeprom_section_channel: channel array in the section,
120 * ignore if common section
121 */
122struct iwl_txpwr_section {
123 u32 offset;
124 u8 count;
125 enum ieee80211_band band;
126 bool is_common;
127 bool is_cck;
128 bool is_ht40;
129 u8 iwl_eeprom_section_channel[EEPROM_MAX_TXPOWER_SECTION_ELEMENTS];
130};
131
132/**
133 * section 1 - 3 are regulatory tx power apply to all channels based on
134 * modulation: CCK, OFDM
135 * Band: 2.4GHz, 5.2GHz
136 * section 4 - 10 are regulatory tx power apply to specified channels
137 * For example:
138 * 1L - Channel 1 Legacy
139 * 1HT - Channel 1 HT
140 * (1,+1) - Channel 1 HT40 "_above_"
141 *
142 * Section 1: all CCK channels
143 * Section 2: all 2.4 GHz OFDM (Legacy, HT and HT40) channels
144 * Section 3: all 5.2 GHz OFDM (Legacy, HT and HT40) channels
145 * Section 4: 2.4 GHz 20MHz channels: 1L, 1HT, 2L, 2HT, 10L, 10HT, 11L, 11HT
146 * Section 5: 2.4 GHz 40MHz channels: (1,+1) (2,+1) (6,+1) (7,+1) (9,+1)
147 * Section 6: 5.2 GHz 20MHz channels: 36L, 64L, 100L, 36HT, 64HT, 100HT
148 * Section 7: 5.2 GHz 40MHz channels: (36,+1) (60,+1) (100,+1)
149 * Section 8: 2.4 GHz channel: 13L, 13HT
150 * Section 9: 2.4 GHz channel: 140L, 140HT
151 * Section 10: 2.4 GHz 40MHz channels: (132,+1) (44,+1)
152 *
153 */
154static const struct iwl_txpwr_section enhinfo[] = {
155 { EEPROM_LB_CCK_20_COMMON, 1, IEEE80211_BAND_2GHZ, true, true, false },
156 { EEPROM_LB_OFDM_COMMON, 3, IEEE80211_BAND_2GHZ, true, false, false },
157 { EEPROM_HB_OFDM_COMMON, 3, IEEE80211_BAND_5GHZ, true, false, false },
158 { EEPROM_LB_OFDM_20_BAND, 8, IEEE80211_BAND_2GHZ,
159 false, false, false,
160 {1, 1, 2, 2, 10, 10, 11, 11 } },
161 { EEPROM_LB_OFDM_HT40_BAND, 5, IEEE80211_BAND_2GHZ,
162 false, false, true,
163 { 1, 2, 6, 7, 9 } },
164 { EEPROM_HB_OFDM_20_BAND, 6, IEEE80211_BAND_5GHZ,
165 false, false, false,
166 { 36, 64, 100, 36, 64, 100 } },
167 { EEPROM_HB_OFDM_HT40_BAND, 3, IEEE80211_BAND_5GHZ,
168 false, false, true,
169 { 36, 60, 100 } },
170 { EEPROM_LB_OFDM_20_CHANNEL_13, 2, IEEE80211_BAND_2GHZ,
171 false, false, false,
172 { 13, 13 } },
173 { EEPROM_HB_OFDM_20_CHANNEL_140, 2, IEEE80211_BAND_5GHZ,
174 false, false, false,
175 { 140, 140 } },
176 { EEPROM_HB_OFDM_HT40_BAND_1, 2, IEEE80211_BAND_5GHZ,
177 false, false, true,
178 { 132, 44 } },
179};
180
181/******************************************************************************
182 *
183 * EEPROM related functions
184 *
185******************************************************************************/
186
187/*
188 * The device's EEPROM semaphore prevents conflicts between driver and uCode
189 * when accessing the EEPROM; each access is a series of pulses to/from the
190 * EEPROM chip, not a single event, so even reads could conflict if they
191 * weren't arbitrated by the semaphore.
192 */
193int iwlcore_eeprom_acquire_semaphore(struct iwl_priv *priv)
194{
195 u16 count;
196 int ret;
197
198 for (count = 0; count < EEPROM_SEM_RETRY_LIMIT; count++) {
199 /* Request semaphore */
200 iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG,
201 CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM);
202
203 /* See if we got it */
204 ret = iwl_poll_bit(priv, CSR_HW_IF_CONFIG_REG,
205 CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM,
206 CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM,
207 EEPROM_SEM_TIMEOUT);
208 if (ret >= 0) {
209 IWL_DEBUG_IO(priv,
210 "Acquired semaphore after %d tries.\n",
211 count+1);
212 return ret;
213 }
214 }
215
216 return ret;
217}
218
219void iwlcore_eeprom_release_semaphore(struct iwl_priv *priv)
220{
221 iwl_clear_bit(priv, CSR_HW_IF_CONFIG_REG,
222 CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM);
223
224}
225
226int iwl_eeprom_check_version(struct iwl_priv *priv)
227{
228 u16 eeprom_ver;
229 u16 calib_ver;
230
231 eeprom_ver = iwl_eeprom_query16(priv, EEPROM_VERSION);
232 calib_ver = priv->cfg->ops->lib->eeprom_ops.calib_version(priv);
233
234 if (eeprom_ver < priv->cfg->eeprom_ver ||
235 calib_ver < priv->cfg->eeprom_calib_ver)
236 goto err;
237
238 IWL_INFO(priv, "device EEPROM VER=0x%x, CALIB=0x%x\n",
239 eeprom_ver, calib_ver);
240
241 return 0;
242err:
243 IWL_ERR(priv, "Unsupported (too old) EEPROM VER=0x%x < 0x%x "
244 "CALIB=0x%x < 0x%x\n",
245 eeprom_ver, priv->cfg->eeprom_ver,
246 calib_ver, priv->cfg->eeprom_calib_ver);
247 return -EINVAL;
248
249}
250
251void iwl_eeprom_get_mac(const struct iwl_priv *priv, u8 *mac)
252{
253 const u8 *addr = priv->cfg->ops->lib->eeprom_ops.query_addr(priv,
254 EEPROM_MAC_ADDRESS);
255 memcpy(mac, addr, ETH_ALEN);
256}
257
258/**
259 * iwl_get_max_txpower_avg - get the highest tx power from all chains.
260 * find the highest tx power from all chains for the channel
261 */
262static s8 iwl_get_max_txpower_avg(struct iwl_priv *priv,
263 struct iwl_eeprom_enhanced_txpwr *enhanced_txpower,
264 int element, s8 *max_txpower_in_half_dbm)
265{
266 s8 max_txpower_avg = 0; /* (dBm) */
267
268 IWL_DEBUG_INFO(priv, "%d - "
269 "chain_a: %d dB chain_b: %d dB "
270 "chain_c: %d dB mimo2: %d dB mimo3: %d dB\n",
271 element,
272 enhanced_txpower[element].chain_a_max >> 1,
273 enhanced_txpower[element].chain_b_max >> 1,
274 enhanced_txpower[element].chain_c_max >> 1,
275 enhanced_txpower[element].mimo2_max >> 1,
276 enhanced_txpower[element].mimo3_max >> 1);
277 /* Take the highest tx power from any valid chains */
278 if ((priv->cfg->valid_tx_ant & ANT_A) &&
279 (enhanced_txpower[element].chain_a_max > max_txpower_avg))
280 max_txpower_avg = enhanced_txpower[element].chain_a_max;
281 if ((priv->cfg->valid_tx_ant & ANT_B) &&
282 (enhanced_txpower[element].chain_b_max > max_txpower_avg))
283 max_txpower_avg = enhanced_txpower[element].chain_b_max;
284 if ((priv->cfg->valid_tx_ant & ANT_C) &&
285 (enhanced_txpower[element].chain_c_max > max_txpower_avg))
286 max_txpower_avg = enhanced_txpower[element].chain_c_max;
287 if (((priv->cfg->valid_tx_ant == ANT_AB) |
288 (priv->cfg->valid_tx_ant == ANT_BC) |
289 (priv->cfg->valid_tx_ant == ANT_AC)) &&
290 (enhanced_txpower[element].mimo2_max > max_txpower_avg))
291 max_txpower_avg = enhanced_txpower[element].mimo2_max;
292 if ((priv->cfg->valid_tx_ant == ANT_ABC) &&
293 (enhanced_txpower[element].mimo3_max > max_txpower_avg))
294 max_txpower_avg = enhanced_txpower[element].mimo3_max;
295
296 /*
297 * max. tx power in EEPROM is in 1/2 dBm format
298 * convert from 1/2 dBm to dBm (round-up convert)
299 * but we also do not want to loss 1/2 dBm resolution which
300 * will impact performance
301 */
302 *max_txpower_in_half_dbm = max_txpower_avg;
303 return (max_txpower_avg & 0x01) + (max_txpower_avg >> 1);
304}
305
306/**
307 * iwl_update_common_txpower: update channel tx power
308 * update tx power per band based on EEPROM enhanced tx power info.
309 */
310static s8 iwl_update_common_txpower(struct iwl_priv *priv,
311 struct iwl_eeprom_enhanced_txpwr *enhanced_txpower,
312 int section, int element, s8 *max_txpower_in_half_dbm)
313{
314 struct iwl_channel_info *ch_info;
315 int ch;
316 bool is_ht40 = false;
317 s8 max_txpower_avg; /* (dBm) */
318
319 /* it is common section, contain all type (Legacy, HT and HT40)
320 * based on the element in the section to determine
321 * is it HT 40 or not
322 */
323 if (element == EEPROM_TXPOWER_COMMON_HT40_INDEX)
324 is_ht40 = true;
325 max_txpower_avg =
326 iwl_get_max_txpower_avg(priv, enhanced_txpower,
327 element, max_txpower_in_half_dbm);
328
329 ch_info = priv->channel_info;
330
331 for (ch = 0; ch < priv->channel_count; ch++) {
332 /* find matching band and update tx power if needed */
333 if ((ch_info->band == enhinfo[section].band) &&
334 (ch_info->max_power_avg < max_txpower_avg) &&
335 (!is_ht40)) {
336 /* Update regulatory-based run-time data */
337 ch_info->max_power_avg = ch_info->curr_txpow =
338 max_txpower_avg;
339 ch_info->scan_power = max_txpower_avg;
340 }
341 if ((ch_info->band == enhinfo[section].band) && is_ht40 &&
342 (ch_info->ht40_max_power_avg < max_txpower_avg)) {
343 /* Update regulatory-based run-time data */
344 ch_info->ht40_max_power_avg = max_txpower_avg;
345 }
346 ch_info++;
347 }
348 return max_txpower_avg;
349}
350
351/**
352 * iwl_update_channel_txpower: update channel tx power
353 * update channel tx power based on EEPROM enhanced tx power info.
354 */
355static s8 iwl_update_channel_txpower(struct iwl_priv *priv,
356 struct iwl_eeprom_enhanced_txpwr *enhanced_txpower,
357 int section, int element, s8 *max_txpower_in_half_dbm)
358{
359 struct iwl_channel_info *ch_info;
360 int ch;
361 u8 channel;
362 s8 max_txpower_avg; /* (dBm) */
363
364 channel = enhinfo[section].iwl_eeprom_section_channel[element];
365 max_txpower_avg =
366 iwl_get_max_txpower_avg(priv, enhanced_txpower,
367 element, max_txpower_in_half_dbm);
368
369 ch_info = priv->channel_info;
370 for (ch = 0; ch < priv->channel_count; ch++) {
371 /* find matching channel and update tx power if needed */
372 if (ch_info->channel == channel) {
373 if ((ch_info->max_power_avg < max_txpower_avg) &&
374 (!enhinfo[section].is_ht40)) {
375 /* Update regulatory-based run-time data */
376 ch_info->max_power_avg = max_txpower_avg;
377 ch_info->curr_txpow = max_txpower_avg;
378 ch_info->scan_power = max_txpower_avg;
379 }
380 if ((enhinfo[section].is_ht40) &&
381 (ch_info->ht40_max_power_avg < max_txpower_avg)) {
382 /* Update regulatory-based run-time data */
383 ch_info->ht40_max_power_avg = max_txpower_avg;
384 }
385 break;
386 }
387 ch_info++;
388 }
389 return max_txpower_avg;
390}
391
392/**
393 * iwlcore_eeprom_enhanced_txpower: process enhanced tx power info
394 */
395void iwlcore_eeprom_enhanced_txpower(struct iwl_priv *priv)
396{
397 int eeprom_section_count = 0;
398 int section, element;
399 struct iwl_eeprom_enhanced_txpwr *enhanced_txpower;
400 u32 offset;
401 s8 max_txpower_avg; /* (dBm) */
402 s8 max_txpower_in_half_dbm; /* (half-dBm) */
403
404 /* Loop through all the sections
405 * adjust bands and channel's max tx power
406 * Set the tx_power_user_lmt to the highest power
407 * supported by any channels and chains
408 */
409 for (section = 0; section < ARRAY_SIZE(enhinfo); section++) {
410 eeprom_section_count = enhinfo[section].count;
411 offset = enhinfo[section].offset;
412 enhanced_txpower = (struct iwl_eeprom_enhanced_txpwr *)
413 iwl_eeprom_query_addr(priv, offset);
414
415 /*
416 * check for valid entry -
417 * different version of EEPROM might contain different set
418 * of enhanced tx power table
419 * always check for valid entry before process
420 * the information
421 */
422 if (!enhanced_txpower->common || enhanced_txpower->reserved)
423 continue;
424
425 for (element = 0; element < eeprom_section_count; element++) {
426 if (enhinfo[section].is_common)
427 max_txpower_avg =
428 iwl_update_common_txpower(priv,
429 enhanced_txpower, section,
430 element,
431 &max_txpower_in_half_dbm);
432 else
433 max_txpower_avg =
434 iwl_update_channel_txpower(priv,
435 enhanced_txpower, section,
436 element,
437 &max_txpower_in_half_dbm);
438
439 /* Update the tx_power_user_lmt to the highest power
440 * supported by any channel */
441 if (max_txpower_avg > priv->tx_power_user_lmt)
442 priv->tx_power_user_lmt = max_txpower_avg;
443
444 /*
445 * Update the tx_power_lmt_in_half_dbm to
446 * the highest power supported by any channel
447 */
448 if (max_txpower_in_half_dbm >
449 priv->tx_power_lmt_in_half_dbm)
450 priv->tx_power_lmt_in_half_dbm =
451 max_txpower_in_half_dbm;
452 }
453 }
454}
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-hcmd.c b/drivers/net/wireless/iwlwifi/iwl-agn-hcmd.c
index 6fb52abafc8d..ffb2f4111ad0 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-hcmd.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-hcmd.c
@@ -137,7 +137,7 @@ static void iwlagn_gain_computation(struct iwl_priv *priv,
137 continue; 137 continue;
138 } 138 }
139 139
140 delta_g = (priv->cfg->chain_noise_scale * 140 delta_g = (priv->cfg->base_params->chain_noise_scale *
141 ((s32)average_noise[default_chain] - 141 ((s32)average_noise[default_chain] -
142 (s32)average_noise[i])) / 1500; 142 (s32)average_noise[i])) / 1500;
143 143
@@ -222,7 +222,8 @@ static void iwlagn_tx_cmd_protection(struct iwl_priv *priv,
222 return; 222 return;
223 } 223 }
224 224
225 if (priv->cfg->use_rts_for_aggregation && 225 if (priv->cfg->ht_params &&
226 priv->cfg->ht_params->use_rts_for_aggregation &&
226 info->flags & IEEE80211_TX_CTL_AMPDU) { 227 info->flags & IEEE80211_TX_CTL_AMPDU) {
227 *tx_flags |= TX_CMD_FLG_PROT_REQUIRE_MSK; 228 *tx_flags |= TX_CMD_FLG_PROT_REQUIRE_MSK;
228 return; 229 return;
@@ -287,6 +288,15 @@ static int iwlagn_set_pan_params(struct iwl_priv *priv)
287 ctx_bss = &priv->contexts[IWL_RXON_CTX_BSS]; 288 ctx_bss = &priv->contexts[IWL_RXON_CTX_BSS];
288 ctx_pan = &priv->contexts[IWL_RXON_CTX_PAN]; 289 ctx_pan = &priv->contexts[IWL_RXON_CTX_PAN];
289 290
291 /*
292 * If the PAN context is inactive, then we don't need
293 * to update the PAN parameters, the last thing we'll
294 * have done before it goes inactive is making the PAN
295 * parameters be WLAN-only.
296 */
297 if (!ctx_pan->is_active)
298 return 0;
299
290 memset(&cmd, 0, sizeof(cmd)); 300 memset(&cmd, 0, sizeof(cmd));
291 301
292 /* only 2 slots are currently allowed */ 302 /* only 2 slots are currently allowed */
@@ -312,7 +322,7 @@ static int iwlagn_set_pan_params(struct iwl_priv *priv)
312 bcnint = max_t(int, bcnint, 322 bcnint = max_t(int, bcnint,
313 ctx_bss->vif->bss_conf.beacon_int); 323 ctx_bss->vif->bss_conf.beacon_int);
314 if (!bcnint) 324 if (!bcnint)
315 bcnint = 100; 325 bcnint = DEFAULT_BEACON_INTERVAL;
316 slot0 = bcnint / 2; 326 slot0 = bcnint / 2;
317 slot1 = bcnint - slot0; 327 slot1 = bcnint - slot0;
318 328
@@ -330,7 +340,12 @@ static int iwlagn_set_pan_params(struct iwl_priv *priv)
330 slot0 = 0; 340 slot0 = 0;
331 slot1 = max_t(int, 1, ctx_pan->vif->bss_conf.dtim_period) * 341 slot1 = max_t(int, 1, ctx_pan->vif->bss_conf.dtim_period) *
332 ctx_pan->vif->bss_conf.beacon_int; 342 ctx_pan->vif->bss_conf.beacon_int;
333 slot1 = max_t(int, 100, slot1); 343 slot1 = max_t(int, DEFAULT_BEACON_INTERVAL, slot1);
344
345 if (test_bit(STATUS_SCAN_HW, &priv->status)) {
346 slot0 = slot1 * 3 - 20;
347 slot1 = 20;
348 }
334 } 349 }
335 350
336 cmd.slots[0].width = cpu_to_le16(slot0); 351 cmd.slots[0].width = cpu_to_le16(slot0);
@@ -345,8 +360,8 @@ static int iwlagn_set_pan_params(struct iwl_priv *priv)
345 360
346struct iwl_hcmd_ops iwlagn_hcmd = { 361struct iwl_hcmd_ops iwlagn_hcmd = {
347 .rxon_assoc = iwlagn_send_rxon_assoc, 362 .rxon_assoc = iwlagn_send_rxon_assoc,
348 .commit_rxon = iwl_commit_rxon, 363 .commit_rxon = iwlagn_commit_rxon,
349 .set_rxon_chain = iwl_set_rxon_chain, 364 .set_rxon_chain = iwlagn_set_rxon_chain,
350 .set_tx_ant = iwlagn_send_tx_ant_config, 365 .set_tx_ant = iwlagn_send_tx_ant_config,
351 .send_bt_config = iwl_send_bt_config, 366 .send_bt_config = iwl_send_bt_config,
352 .set_pan_params = iwlagn_set_pan_params, 367 .set_pan_params = iwlagn_set_pan_params,
@@ -354,8 +369,8 @@ struct iwl_hcmd_ops iwlagn_hcmd = {
354 369
355struct iwl_hcmd_ops iwlagn_bt_hcmd = { 370struct iwl_hcmd_ops iwlagn_bt_hcmd = {
356 .rxon_assoc = iwlagn_send_rxon_assoc, 371 .rxon_assoc = iwlagn_send_rxon_assoc,
357 .commit_rxon = iwl_commit_rxon, 372 .commit_rxon = iwlagn_commit_rxon,
358 .set_rxon_chain = iwl_set_rxon_chain, 373 .set_rxon_chain = iwlagn_set_rxon_chain,
359 .set_tx_ant = iwlagn_send_tx_ant_config, 374 .set_tx_ant = iwlagn_send_tx_ant_config,
360 .send_bt_config = iwlagn_send_advance_bt_config, 375 .send_bt_config = iwlagn_send_advance_bt_config,
361 .set_pan_params = iwlagn_set_pan_params, 376 .set_pan_params = iwlagn_set_pan_params,
@@ -369,4 +384,5 @@ struct iwl_hcmd_utils_ops iwlagn_hcmd_utils = {
369 .tx_cmd_protection = iwlagn_tx_cmd_protection, 384 .tx_cmd_protection = iwlagn_tx_cmd_protection,
370 .calc_rssi = iwlagn_calc_rssi, 385 .calc_rssi = iwlagn_calc_rssi,
371 .request_scan = iwlagn_request_scan, 386 .request_scan = iwlagn_request_scan,
387 .post_scan = iwlagn_post_scan,
372}; 388};
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-ict.c b/drivers/net/wireless/iwlwifi/iwl-agn-ict.c
index c92b2c0cbd91..a5dbfea1bfad 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-ict.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-ict.c
@@ -59,7 +59,7 @@ void iwl_free_isr_ict(struct iwl_priv *priv)
59int iwl_alloc_isr_ict(struct iwl_priv *priv) 59int iwl_alloc_isr_ict(struct iwl_priv *priv)
60{ 60{
61 61
62 if (priv->cfg->use_isr_legacy) 62 if (priv->cfg->base_params->use_isr_legacy)
63 return 0; 63 return 0;
64 /* allocate shrared data table */ 64 /* allocate shrared data table */
65 priv->_agn.ict_tbl_vir = 65 priv->_agn.ict_tbl_vir =
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-lib.c b/drivers/net/wireless/iwlwifi/iwl-agn-lib.c
index a8f2adfd799e..b555edd53354 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-lib.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-lib.c
@@ -40,22 +40,195 @@
40#include "iwl-agn.h" 40#include "iwl-agn.h"
41#include "iwl-sta.h" 41#include "iwl-sta.h"
42 42
43static inline u32 iwlagn_get_scd_ssn(struct iwl5000_tx_resp *tx_resp) 43static inline u32 iwlagn_get_scd_ssn(struct iwlagn_tx_resp *tx_resp)
44{ 44{
45 return le32_to_cpup((__le32 *)&tx_resp->status + 45 return le32_to_cpup((__le32 *)&tx_resp->status +
46 tx_resp->frame_count) & MAX_SN; 46 tx_resp->frame_count) & MAX_SN;
47} 47}
48 48
49static void iwlagn_count_tx_err_status(struct iwl_priv *priv, u16 status)
50{
51 status &= TX_STATUS_MSK;
52
53 switch (status) {
54 case TX_STATUS_POSTPONE_DELAY:
55 priv->_agn.reply_tx_stats.pp_delay++;
56 break;
57 case TX_STATUS_POSTPONE_FEW_BYTES:
58 priv->_agn.reply_tx_stats.pp_few_bytes++;
59 break;
60 case TX_STATUS_POSTPONE_BT_PRIO:
61 priv->_agn.reply_tx_stats.pp_bt_prio++;
62 break;
63 case TX_STATUS_POSTPONE_QUIET_PERIOD:
64 priv->_agn.reply_tx_stats.pp_quiet_period++;
65 break;
66 case TX_STATUS_POSTPONE_CALC_TTAK:
67 priv->_agn.reply_tx_stats.pp_calc_ttak++;
68 break;
69 case TX_STATUS_FAIL_INTERNAL_CROSSED_RETRY:
70 priv->_agn.reply_tx_stats.int_crossed_retry++;
71 break;
72 case TX_STATUS_FAIL_SHORT_LIMIT:
73 priv->_agn.reply_tx_stats.short_limit++;
74 break;
75 case TX_STATUS_FAIL_LONG_LIMIT:
76 priv->_agn.reply_tx_stats.long_limit++;
77 break;
78 case TX_STATUS_FAIL_FIFO_UNDERRUN:
79 priv->_agn.reply_tx_stats.fifo_underrun++;
80 break;
81 case TX_STATUS_FAIL_DRAIN_FLOW:
82 priv->_agn.reply_tx_stats.drain_flow++;
83 break;
84 case TX_STATUS_FAIL_RFKILL_FLUSH:
85 priv->_agn.reply_tx_stats.rfkill_flush++;
86 break;
87 case TX_STATUS_FAIL_LIFE_EXPIRE:
88 priv->_agn.reply_tx_stats.life_expire++;
89 break;
90 case TX_STATUS_FAIL_DEST_PS:
91 priv->_agn.reply_tx_stats.dest_ps++;
92 break;
93 case TX_STATUS_FAIL_HOST_ABORTED:
94 priv->_agn.reply_tx_stats.host_abort++;
95 break;
96 case TX_STATUS_FAIL_BT_RETRY:
97 priv->_agn.reply_tx_stats.bt_retry++;
98 break;
99 case TX_STATUS_FAIL_STA_INVALID:
100 priv->_agn.reply_tx_stats.sta_invalid++;
101 break;
102 case TX_STATUS_FAIL_FRAG_DROPPED:
103 priv->_agn.reply_tx_stats.frag_drop++;
104 break;
105 case TX_STATUS_FAIL_TID_DISABLE:
106 priv->_agn.reply_tx_stats.tid_disable++;
107 break;
108 case TX_STATUS_FAIL_FIFO_FLUSHED:
109 priv->_agn.reply_tx_stats.fifo_flush++;
110 break;
111 case TX_STATUS_FAIL_INSUFFICIENT_CF_POLL:
112 priv->_agn.reply_tx_stats.insuff_cf_poll++;
113 break;
114 case TX_STATUS_FAIL_PASSIVE_NO_RX:
115 priv->_agn.reply_tx_stats.fail_hw_drop++;
116 break;
117 case TX_STATUS_FAIL_NO_BEACON_ON_RADAR:
118 priv->_agn.reply_tx_stats.sta_color_mismatch++;
119 break;
120 default:
121 priv->_agn.reply_tx_stats.unknown++;
122 break;
123 }
124}
125
126static void iwlagn_count_agg_tx_err_status(struct iwl_priv *priv, u16 status)
127{
128 status &= AGG_TX_STATUS_MSK;
129
130 switch (status) {
131 case AGG_TX_STATE_UNDERRUN_MSK:
132 priv->_agn.reply_agg_tx_stats.underrun++;
133 break;
134 case AGG_TX_STATE_BT_PRIO_MSK:
135 priv->_agn.reply_agg_tx_stats.bt_prio++;
136 break;
137 case AGG_TX_STATE_FEW_BYTES_MSK:
138 priv->_agn.reply_agg_tx_stats.few_bytes++;
139 break;
140 case AGG_TX_STATE_ABORT_MSK:
141 priv->_agn.reply_agg_tx_stats.abort++;
142 break;
143 case AGG_TX_STATE_LAST_SENT_TTL_MSK:
144 priv->_agn.reply_agg_tx_stats.last_sent_ttl++;
145 break;
146 case AGG_TX_STATE_LAST_SENT_TRY_CNT_MSK:
147 priv->_agn.reply_agg_tx_stats.last_sent_try++;
148 break;
149 case AGG_TX_STATE_LAST_SENT_BT_KILL_MSK:
150 priv->_agn.reply_agg_tx_stats.last_sent_bt_kill++;
151 break;
152 case AGG_TX_STATE_SCD_QUERY_MSK:
153 priv->_agn.reply_agg_tx_stats.scd_query++;
154 break;
155 case AGG_TX_STATE_TEST_BAD_CRC32_MSK:
156 priv->_agn.reply_agg_tx_stats.bad_crc32++;
157 break;
158 case AGG_TX_STATE_RESPONSE_MSK:
159 priv->_agn.reply_agg_tx_stats.response++;
160 break;
161 case AGG_TX_STATE_DUMP_TX_MSK:
162 priv->_agn.reply_agg_tx_stats.dump_tx++;
163 break;
164 case AGG_TX_STATE_DELAY_TX_MSK:
165 priv->_agn.reply_agg_tx_stats.delay_tx++;
166 break;
167 default:
168 priv->_agn.reply_agg_tx_stats.unknown++;
169 break;
170 }
171}
172
173static void iwlagn_set_tx_status(struct iwl_priv *priv,
174 struct ieee80211_tx_info *info,
175 struct iwlagn_tx_resp *tx_resp,
176 int txq_id, bool is_agg)
177{
178 u16 status = le16_to_cpu(tx_resp->status.status);
179
180 info->status.rates[0].count = tx_resp->failure_frame + 1;
181 if (is_agg)
182 info->flags &= ~IEEE80211_TX_CTL_AMPDU;
183 info->flags |= iwl_tx_status_to_mac80211(status);
184 iwlagn_hwrate_to_tx_control(priv, le32_to_cpu(tx_resp->rate_n_flags),
185 info);
186 if (!iwl_is_tx_success(status))
187 iwlagn_count_tx_err_status(priv, status);
188
189 IWL_DEBUG_TX_REPLY(priv, "TXQ %d status %s (0x%08x) rate_n_flags "
190 "0x%x retries %d\n",
191 txq_id,
192 iwl_get_tx_fail_reason(status), status,
193 le32_to_cpu(tx_resp->rate_n_flags),
194 tx_resp->failure_frame);
195}
196
197#ifdef CONFIG_IWLWIFI_DEBUG
198#define AGG_TX_STATE_FAIL(x) case AGG_TX_STATE_ ## x: return #x
199
200const char *iwl_get_agg_tx_fail_reason(u16 status)
201{
202 status &= AGG_TX_STATUS_MSK;
203 switch (status) {
204 case AGG_TX_STATE_TRANSMITTED:
205 return "SUCCESS";
206 AGG_TX_STATE_FAIL(UNDERRUN_MSK);
207 AGG_TX_STATE_FAIL(BT_PRIO_MSK);
208 AGG_TX_STATE_FAIL(FEW_BYTES_MSK);
209 AGG_TX_STATE_FAIL(ABORT_MSK);
210 AGG_TX_STATE_FAIL(LAST_SENT_TTL_MSK);
211 AGG_TX_STATE_FAIL(LAST_SENT_TRY_CNT_MSK);
212 AGG_TX_STATE_FAIL(LAST_SENT_BT_KILL_MSK);
213 AGG_TX_STATE_FAIL(SCD_QUERY_MSK);
214 AGG_TX_STATE_FAIL(TEST_BAD_CRC32_MSK);
215 AGG_TX_STATE_FAIL(RESPONSE_MSK);
216 AGG_TX_STATE_FAIL(DUMP_TX_MSK);
217 AGG_TX_STATE_FAIL(DELAY_TX_MSK);
218 }
219
220 return "UNKNOWN";
221}
222#endif /* CONFIG_IWLWIFI_DEBUG */
223
49static int iwlagn_tx_status_reply_tx(struct iwl_priv *priv, 224static int iwlagn_tx_status_reply_tx(struct iwl_priv *priv,
50 struct iwl_ht_agg *agg, 225 struct iwl_ht_agg *agg,
51 struct iwl5000_tx_resp *tx_resp, 226 struct iwlagn_tx_resp *tx_resp,
52 int txq_id, u16 start_idx) 227 int txq_id, u16 start_idx)
53{ 228{
54 u16 status; 229 u16 status;
55 struct agg_tx_status *frame_status = &tx_resp->status; 230 struct agg_tx_status *frame_status = &tx_resp->status;
56 struct ieee80211_tx_info *info = NULL;
57 struct ieee80211_hdr *hdr = NULL; 231 struct ieee80211_hdr *hdr = NULL;
58 u32 rate_n_flags = le32_to_cpu(tx_resp->rate_n_flags);
59 int i, sh, idx; 232 int i, sh, idx;
60 u16 seq; 233 u16 seq;
61 234
@@ -64,31 +237,20 @@ static int iwlagn_tx_status_reply_tx(struct iwl_priv *priv,
64 237
65 agg->frame_count = tx_resp->frame_count; 238 agg->frame_count = tx_resp->frame_count;
66 agg->start_idx = start_idx; 239 agg->start_idx = start_idx;
67 agg->rate_n_flags = rate_n_flags; 240 agg->rate_n_flags = le32_to_cpu(tx_resp->rate_n_flags);
68 agg->bitmap = 0; 241 agg->bitmap = 0;
69 242
70 /* # frames attempted by Tx command */ 243 /* # frames attempted by Tx command */
71 if (agg->frame_count == 1) { 244 if (agg->frame_count == 1) {
72 /* Only one frame was attempted; no block-ack will arrive */ 245 /* Only one frame was attempted; no block-ack will arrive */
73 status = le16_to_cpu(frame_status[0].status);
74 idx = start_idx; 246 idx = start_idx;
75 247
76 /* FIXME: code repetition */
77 IWL_DEBUG_TX_REPLY(priv, "FrameCnt = %d, StartIdx=%d idx=%d\n", 248 IWL_DEBUG_TX_REPLY(priv, "FrameCnt = %d, StartIdx=%d idx=%d\n",
78 agg->frame_count, agg->start_idx, idx); 249 agg->frame_count, agg->start_idx, idx);
79 250 iwlagn_set_tx_status(priv,
80 info = IEEE80211_SKB_CB(priv->txq[txq_id].txb[idx].skb); 251 IEEE80211_SKB_CB(
81 info->status.rates[0].count = tx_resp->failure_frame + 1; 252 priv->txq[txq_id].txb[idx].skb),
82 info->flags &= ~IEEE80211_TX_CTL_AMPDU; 253 tx_resp, txq_id, true);
83 info->flags |= iwl_tx_status_to_mac80211(status);
84 iwlagn_hwrate_to_tx_control(priv, rate_n_flags, info);
85
86 /* FIXME: code repetition end */
87
88 IWL_DEBUG_TX_REPLY(priv, "1 Frame 0x%x failure :%d\n",
89 status & 0xff, tx_resp->failure_frame);
90 IWL_DEBUG_TX_REPLY(priv, "Rate Info rate_n_flags=%x\n", rate_n_flags);
91
92 agg->wait_for_ba = 0; 254 agg->wait_for_ba = 0;
93 } else { 255 } else {
94 /* Two or more frames were attempted; expect block-ack */ 256 /* Two or more frames were attempted; expect block-ack */
@@ -109,12 +271,20 @@ static int iwlagn_tx_status_reply_tx(struct iwl_priv *priv,
109 idx = SEQ_TO_INDEX(seq); 271 idx = SEQ_TO_INDEX(seq);
110 txq_id = SEQ_TO_QUEUE(seq); 272 txq_id = SEQ_TO_QUEUE(seq);
111 273
274 if (status & AGG_TX_STATUS_MSK)
275 iwlagn_count_agg_tx_err_status(priv, status);
276
112 if (status & (AGG_TX_STATE_FEW_BYTES_MSK | 277 if (status & (AGG_TX_STATE_FEW_BYTES_MSK |
113 AGG_TX_STATE_ABORT_MSK)) 278 AGG_TX_STATE_ABORT_MSK))
114 continue; 279 continue;
115 280
116 IWL_DEBUG_TX_REPLY(priv, "FrameCnt = %d, txq_id=%d idx=%d\n", 281 IWL_DEBUG_TX_REPLY(priv, "FrameCnt = %d, txq_id=%d idx=%d\n",
117 agg->frame_count, txq_id, idx); 282 agg->frame_count, txq_id, idx);
283 IWL_DEBUG_TX_REPLY(priv, "status %s (0x%08x), "
284 "try-count (0x%08x)\n",
285 iwl_get_agg_tx_fail_reason(status),
286 status & AGG_TX_STATUS_MSK,
287 status & AGG_TX_TRY_MSK);
118 288
119 hdr = iwl_tx_queue_get_hdr(priv, txq_id, idx); 289 hdr = iwl_tx_queue_get_hdr(priv, txq_id, idx);
120 if (!hdr) { 290 if (!hdr) {
@@ -220,7 +390,7 @@ static void iwlagn_rx_reply_tx(struct iwl_priv *priv,
220 int index = SEQ_TO_INDEX(sequence); 390 int index = SEQ_TO_INDEX(sequence);
221 struct iwl_tx_queue *txq = &priv->txq[txq_id]; 391 struct iwl_tx_queue *txq = &priv->txq[txq_id];
222 struct ieee80211_tx_info *info; 392 struct ieee80211_tx_info *info;
223 struct iwl5000_tx_resp *tx_resp = (void *)&pkt->u.raw[0]; 393 struct iwlagn_tx_resp *tx_resp = (void *)&pkt->u.raw[0];
224 u32 status = le16_to_cpu(tx_resp->status.status); 394 u32 status = le16_to_cpu(tx_resp->status.status);
225 int tid; 395 int tid;
226 int sta_id; 396 int sta_id;
@@ -238,8 +408,10 @@ static void iwlagn_rx_reply_tx(struct iwl_priv *priv,
238 info = IEEE80211_SKB_CB(txq->txb[txq->q.read_ptr].skb); 408 info = IEEE80211_SKB_CB(txq->txb[txq->q.read_ptr].skb);
239 memset(&info->status, 0, sizeof(info->status)); 409 memset(&info->status, 0, sizeof(info->status));
240 410
241 tid = (tx_resp->ra_tid & IWL50_TX_RES_TID_MSK) >> IWL50_TX_RES_TID_POS; 411 tid = (tx_resp->ra_tid & IWLAGN_TX_RES_TID_MSK) >>
242 sta_id = (tx_resp->ra_tid & IWL50_TX_RES_RA_MSK) >> IWL50_TX_RES_RA_POS; 412 IWLAGN_TX_RES_TID_POS;
413 sta_id = (tx_resp->ra_tid & IWLAGN_TX_RES_RA_MSK) >>
414 IWLAGN_TX_RES_RA_POS;
243 415
244 spin_lock_irqsave(&priv->sta_lock, flags); 416 spin_lock_irqsave(&priv->sta_lock, flags);
245 if (txq->sched_retry) { 417 if (txq->sched_retry) {
@@ -252,7 +424,8 @@ static void iwlagn_rx_reply_tx(struct iwl_priv *priv,
252 * notification again. 424 * notification again.
253 */ 425 */
254 if (tx_resp->bt_kill_count && tx_resp->frame_count == 1 && 426 if (tx_resp->bt_kill_count && tx_resp->frame_count == 1 &&
255 priv->cfg->advanced_bt_coexist) { 427 priv->cfg->bt_params &&
428 priv->cfg->bt_params->advanced_bt_coexist) {
256 IWL_WARN(priv, "receive reply tx with bt_kill\n"); 429 IWL_WARN(priv, "receive reply tx with bt_kill\n");
257 } 430 }
258 iwlagn_tx_status_reply_tx(priv, agg, tx_resp, txq_id, index); 431 iwlagn_tx_status_reply_tx(priv, agg, tx_resp, txq_id, index);
@@ -281,20 +454,7 @@ static void iwlagn_rx_reply_tx(struct iwl_priv *priv,
281 } 454 }
282 } else { 455 } else {
283 BUG_ON(txq_id != txq->swq_id); 456 BUG_ON(txq_id != txq->swq_id);
284 457 iwlagn_set_tx_status(priv, info, tx_resp, txq_id, false);
285 info->status.rates[0].count = tx_resp->failure_frame + 1;
286 info->flags |= iwl_tx_status_to_mac80211(status);
287 iwlagn_hwrate_to_tx_control(priv,
288 le32_to_cpu(tx_resp->rate_n_flags),
289 info);
290
291 IWL_DEBUG_TX_REPLY(priv, "TXQ %d status %s (0x%08x) rate_n_flags "
292 "0x%x retries %d\n",
293 txq_id,
294 iwl_get_tx_fail_reason(status), status,
295 le32_to_cpu(tx_resp->rate_n_flags),
296 tx_resp->failure_frame);
297
298 freed = iwlagn_tx_queue_reclaim(priv, txq_id, index); 458 freed = iwlagn_tx_queue_reclaim(priv, txq_id, index);
299 iwl_free_tfds_in_queue(priv, sta_id, tid, freed); 459 iwl_free_tfds_in_queue(priv, sta_id, tid, freed);
300 460
@@ -333,7 +493,7 @@ int iwlagn_hw_valid_rtc_data_addr(u32 addr)
333 493
334int iwlagn_send_tx_power(struct iwl_priv *priv) 494int iwlagn_send_tx_power(struct iwl_priv *priv)
335{ 495{
336 struct iwl5000_tx_power_dbm_cmd tx_power_cmd; 496 struct iwlagn_tx_power_dbm_cmd tx_power_cmd;
337 u8 tx_ant_cfg_cmd; 497 u8 tx_ant_cfg_cmd;
338 498
339 /* half dBm need to multiply */ 499 /* half dBm need to multiply */
@@ -354,8 +514,8 @@ int iwlagn_send_tx_power(struct iwl_priv *priv)
354 */ 514 */
355 tx_power_cmd.global_lmt = priv->tx_power_lmt_in_half_dbm; 515 tx_power_cmd.global_lmt = priv->tx_power_lmt_in_half_dbm;
356 } 516 }
357 tx_power_cmd.flags = IWL50_TX_POWER_NO_CLOSED; 517 tx_power_cmd.flags = IWLAGN_TX_POWER_NO_CLOSED;
358 tx_power_cmd.srv_chan_lmt = IWL50_TX_POWER_AUTO; 518 tx_power_cmd.srv_chan_lmt = IWLAGN_TX_POWER_AUTO;
359 519
360 if (IWL_UCODE_API(priv->ucode_ver) == 1) 520 if (IWL_UCODE_API(priv->ucode_ver) == 1)
361 tx_ant_cfg_cmd = REPLY_TX_POWER_DBM_CMD_V1; 521 tx_ant_cfg_cmd = REPLY_TX_POWER_DBM_CMD_V1;
@@ -432,7 +592,7 @@ const u8 *iwlagn_eeprom_query_addr(const struct iwl_priv *priv,
432 size_t offset) 592 size_t offset)
433{ 593{
434 u32 address = eeprom_indirect_address(priv, offset); 594 u32 address = eeprom_indirect_address(priv, offset);
435 BUG_ON(address >= priv->cfg->eeprom_size); 595 BUG_ON(address >= priv->cfg->base_params->eeprom_size);
436 return &priv->eeprom[address]; 596 return &priv->eeprom[address];
437} 597}
438 598
@@ -480,7 +640,7 @@ int iwlagn_rx_init(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
480 const u32 rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */ 640 const u32 rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */
481 u32 rb_timeout = 0; /* FIXME: RX_RB_TIMEOUT for all devices? */ 641 u32 rb_timeout = 0; /* FIXME: RX_RB_TIMEOUT for all devices? */
482 642
483 if (!priv->cfg->use_isr_legacy) 643 if (!priv->cfg->base_params->use_isr_legacy)
484 rb_timeout = RX_RB_TIMEOUT; 644 rb_timeout = RX_RB_TIMEOUT;
485 645
486 if (priv->cfg->mod_params->amsdu_size_8K) 646 if (priv->cfg->mod_params->amsdu_size_8K)
@@ -525,6 +685,23 @@ int iwlagn_rx_init(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
525 return 0; 685 return 0;
526} 686}
527 687
688static void iwlagn_set_pwr_vmain(struct iwl_priv *priv)
689{
690/*
691 * (for documentation purposes)
692 * to set power to V_AUX, do:
693
694 if (pci_pme_capable(priv->pci_dev, PCI_D3cold))
695 iwl_set_bits_mask_prph(priv, APMG_PS_CTRL_REG,
696 APMG_PS_CTRL_VAL_PWR_SRC_VAUX,
697 ~APMG_PS_CTRL_MSK_PWR_SRC);
698 */
699
700 iwl_set_bits_mask_prph(priv, APMG_PS_CTRL_REG,
701 APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
702 ~APMG_PS_CTRL_MSK_PWR_SRC);
703}
704
528int iwlagn_hw_nic_init(struct iwl_priv *priv) 705int iwlagn_hw_nic_init(struct iwl_priv *priv)
529{ 706{
530 unsigned long flags; 707 unsigned long flags;
@@ -540,7 +717,7 @@ int iwlagn_hw_nic_init(struct iwl_priv *priv)
540 717
541 spin_unlock_irqrestore(&priv->lock, flags); 718 spin_unlock_irqrestore(&priv->lock, flags);
542 719
543 ret = priv->cfg->ops->lib->apm_ops.set_pwr_src(priv, IWL_PWR_SRC_VMAIN); 720 iwlagn_set_pwr_vmain(priv);
544 721
545 priv->cfg->ops->lib->apm_ops.config(priv); 722 priv->cfg->ops->lib->apm_ops.config(priv);
546 723
@@ -1154,7 +1331,7 @@ static int iwl_get_channels_for_scan(struct iwl_priv *priv,
1154 return added; 1331 return added;
1155} 1332}
1156 1333
1157void iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif) 1334int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
1158{ 1335{
1159 struct iwl_host_cmd cmd = { 1336 struct iwl_host_cmd cmd = {
1160 .id = REPLY_SCAN_CMD, 1337 .id = REPLY_SCAN_CMD,
@@ -1162,7 +1339,6 @@ void iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
1162 .flags = CMD_SIZE_HUGE, 1339 .flags = CMD_SIZE_HUGE,
1163 }; 1340 };
1164 struct iwl_scan_cmd *scan; 1341 struct iwl_scan_cmd *scan;
1165 struct ieee80211_conf *conf = NULL;
1166 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; 1342 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
1167 u32 rate_flags = 0; 1343 u32 rate_flags = 0;
1168 u16 cmd_len; 1344 u16 cmd_len;
@@ -1175,59 +1351,20 @@ void iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
1175 int chan_mod; 1351 int chan_mod;
1176 u8 active_chains; 1352 u8 active_chains;
1177 u8 scan_tx_antennas = priv->hw_params.valid_tx_ant; 1353 u8 scan_tx_antennas = priv->hw_params.valid_tx_ant;
1354 int ret;
1355
1356 lockdep_assert_held(&priv->mutex);
1178 1357
1179 if (vif) 1358 if (vif)
1180 ctx = iwl_rxon_ctx_from_vif(vif); 1359 ctx = iwl_rxon_ctx_from_vif(vif);
1181 1360
1182 conf = ieee80211_get_hw_conf(priv->hw);
1183
1184 cancel_delayed_work(&priv->scan_check);
1185
1186 if (!iwl_is_ready(priv)) {
1187 IWL_WARN(priv, "request scan called when driver not ready.\n");
1188 goto done;
1189 }
1190
1191 /* Make sure the scan wasn't canceled before this queued work
1192 * was given the chance to run... */
1193 if (!test_bit(STATUS_SCANNING, &priv->status))
1194 goto done;
1195
1196 /* This should never be called or scheduled if there is currently
1197 * a scan active in the hardware. */
1198 if (test_bit(STATUS_SCAN_HW, &priv->status)) {
1199 IWL_DEBUG_INFO(priv, "Multiple concurrent scan requests in parallel. "
1200 "Ignoring second request.\n");
1201 goto done;
1202 }
1203
1204 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) {
1205 IWL_DEBUG_SCAN(priv, "Aborting scan due to device shutdown\n");
1206 goto done;
1207 }
1208
1209 if (test_bit(STATUS_SCAN_ABORTING, &priv->status)) {
1210 IWL_DEBUG_HC(priv, "Scan request while abort pending. Queuing.\n");
1211 goto done;
1212 }
1213
1214 if (iwl_is_rfkill(priv)) {
1215 IWL_DEBUG_HC(priv, "Aborting scan due to RF Kill activation\n");
1216 goto done;
1217 }
1218
1219 if (!test_bit(STATUS_READY, &priv->status)) {
1220 IWL_DEBUG_HC(priv, "Scan request while uninitialized. Queuing.\n");
1221 goto done;
1222 }
1223
1224 if (!priv->scan_cmd) { 1361 if (!priv->scan_cmd) {
1225 priv->scan_cmd = kmalloc(sizeof(struct iwl_scan_cmd) + 1362 priv->scan_cmd = kmalloc(sizeof(struct iwl_scan_cmd) +
1226 IWL_MAX_SCAN_SIZE, GFP_KERNEL); 1363 IWL_MAX_SCAN_SIZE, GFP_KERNEL);
1227 if (!priv->scan_cmd) { 1364 if (!priv->scan_cmd) {
1228 IWL_DEBUG_SCAN(priv, 1365 IWL_DEBUG_SCAN(priv,
1229 "fail to allocate memory for scan\n"); 1366 "fail to allocate memory for scan\n");
1230 goto done; 1367 return -ENOMEM;
1231 } 1368 }
1232 } 1369 }
1233 scan = priv->scan_cmd; 1370 scan = priv->scan_cmd;
@@ -1307,37 +1444,38 @@ void iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
1307 * Internal scans are passive, so we can indiscriminately set 1444 * Internal scans are passive, so we can indiscriminately set
1308 * the BT ignore flag on 2.4 GHz since it applies to TX only. 1445 * the BT ignore flag on 2.4 GHz since it applies to TX only.
1309 */ 1446 */
1310 if (priv->cfg->advanced_bt_coexist) 1447 if (priv->cfg->bt_params &&
1448 priv->cfg->bt_params->advanced_bt_coexist)
1311 scan->tx_cmd.tx_flags |= TX_CMD_FLG_IGNORE_BT; 1449 scan->tx_cmd.tx_flags |= TX_CMD_FLG_IGNORE_BT;
1312 scan->good_CRC_th = IWL_GOOD_CRC_TH_DISABLED;
1313 break; 1450 break;
1314 case IEEE80211_BAND_5GHZ: 1451 case IEEE80211_BAND_5GHZ:
1315 rate = IWL_RATE_6M_PLCP; 1452 rate = IWL_RATE_6M_PLCP;
1316 /*
1317 * If active scanning is requested but a certain channel is
1318 * marked passive, we can do active scanning if we detect
1319 * transmissions.
1320 *
1321 * There is an issue with some firmware versions that triggers
1322 * a sysassert on a "good CRC threshold" of zero (== disabled),
1323 * on a radar channel even though this means that we should NOT
1324 * send probes.
1325 *
1326 * The "good CRC threshold" is the number of frames that we
1327 * need to receive during our dwell time on a channel before
1328 * sending out probes -- setting this to a huge value will
1329 * mean we never reach it, but at the same time work around
1330 * the aforementioned issue. Thus use IWL_GOOD_CRC_TH_NEVER
1331 * here instead of IWL_GOOD_CRC_TH_DISABLED.
1332 */
1333 scan->good_CRC_th = is_active ? IWL_GOOD_CRC_TH_DEFAULT :
1334 IWL_GOOD_CRC_TH_NEVER;
1335 break; 1453 break;
1336 default: 1454 default:
1337 IWL_WARN(priv, "Invalid scan band count\n"); 1455 IWL_WARN(priv, "Invalid scan band\n");
1338 goto done; 1456 return -EIO;
1339 } 1457 }
1340 1458
1459 /*
1460 * If active scanning is requested but a certain channel is
1461 * marked passive, we can do active scanning if we detect
1462 * transmissions.
1463 *
1464 * There is an issue with some firmware versions that triggers
1465 * a sysassert on a "good CRC threshold" of zero (== disabled),
1466 * on a radar channel even though this means that we should NOT
1467 * send probes.
1468 *
1469 * The "good CRC threshold" is the number of frames that we
1470 * need to receive during our dwell time on a channel before
1471 * sending out probes -- setting this to a huge value will
1472 * mean we never reach it, but at the same time work around
1473 * the aforementioned issue. Thus use IWL_GOOD_CRC_TH_NEVER
1474 * here instead of IWL_GOOD_CRC_TH_DISABLED.
1475 */
1476 scan->good_CRC_th = is_active ? IWL_GOOD_CRC_TH_DEFAULT :
1477 IWL_GOOD_CRC_TH_NEVER;
1478
1341 band = priv->scan_band; 1479 band = priv->scan_band;
1342 1480
1343 if (priv->cfg->scan_rx_antennas[band]) 1481 if (priv->cfg->scan_rx_antennas[band])
@@ -1346,10 +1484,12 @@ void iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
1346 if (priv->cfg->scan_tx_antennas[band]) 1484 if (priv->cfg->scan_tx_antennas[band])
1347 scan_tx_antennas = priv->cfg->scan_tx_antennas[band]; 1485 scan_tx_antennas = priv->cfg->scan_tx_antennas[band];
1348 1486
1349 if (priv->cfg->advanced_bt_coexist && priv->bt_full_concurrent) { 1487 if (priv->cfg->bt_params &&
1488 priv->cfg->bt_params->advanced_bt_coexist &&
1489 priv->bt_full_concurrent) {
1350 /* operated as 1x1 in full concurrency mode */ 1490 /* operated as 1x1 in full concurrency mode */
1351 scan_tx_antennas = 1491 scan_tx_antennas = first_antenna(
1352 first_antenna(priv->cfg->scan_tx_antennas[band]); 1492 priv->cfg->scan_tx_antennas[band]);
1353 } 1493 }
1354 1494
1355 priv->scan_tx_ant[band] = iwl_toggle_tx_ant(priv, priv->scan_tx_ant[band], 1495 priv->scan_tx_ant[band] = iwl_toggle_tx_ant(priv, priv->scan_tx_ant[band],
@@ -1370,7 +1510,9 @@ void iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
1370 1510
1371 rx_ant = first_antenna(active_chains); 1511 rx_ant = first_antenna(active_chains);
1372 } 1512 }
1373 if (priv->cfg->advanced_bt_coexist && priv->bt_full_concurrent) { 1513 if (priv->cfg->bt_params &&
1514 priv->cfg->bt_params->advanced_bt_coexist &&
1515 priv->bt_full_concurrent) {
1374 /* operated as 1x1 in full concurrency mode */ 1516 /* operated as 1x1 in full concurrency mode */
1375 rx_ant = first_antenna(rx_ant); 1517 rx_ant = first_antenna(rx_ant);
1376 } 1518 }
@@ -1415,7 +1557,7 @@ void iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
1415 } 1557 }
1416 if (scan->channel_count == 0) { 1558 if (scan->channel_count == 0) {
1417 IWL_DEBUG_SCAN(priv, "channel count %d\n", scan->channel_count); 1559 IWL_DEBUG_SCAN(priv, "channel count %d\n", scan->channel_count);
1418 goto done; 1560 return -EIO;
1419 } 1561 }
1420 1562
1421 cmd.len += le16_to_cpu(scan->tx_cmd.len) + 1563 cmd.len += le16_to_cpu(scan->tx_cmd.len) +
@@ -1423,30 +1565,39 @@ void iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
1423 cmd.data = scan; 1565 cmd.data = scan;
1424 scan->len = cpu_to_le16(cmd.len); 1566 scan->len = cpu_to_le16(cmd.len);
1425 1567
1568 /* set scan bit here for PAN params */
1426 set_bit(STATUS_SCAN_HW, &priv->status); 1569 set_bit(STATUS_SCAN_HW, &priv->status);
1427 1570
1428 if (priv->cfg->ops->hcmd->set_pan_params && 1571 if (priv->cfg->ops->hcmd->set_pan_params) {
1429 priv->cfg->ops->hcmd->set_pan_params(priv)) 1572 ret = priv->cfg->ops->hcmd->set_pan_params(priv);
1430 goto done; 1573 if (ret)
1574 return ret;
1575 }
1431 1576
1432 if (iwl_send_cmd_sync(priv, &cmd)) 1577 ret = iwl_send_cmd_sync(priv, &cmd);
1433 goto done; 1578 if (ret) {
1579 clear_bit(STATUS_SCAN_HW, &priv->status);
1580 if (priv->cfg->ops->hcmd->set_pan_params)
1581 priv->cfg->ops->hcmd->set_pan_params(priv);
1582 }
1583
1584 return ret;
1585}
1586
1587void iwlagn_post_scan(struct iwl_priv *priv)
1588{
1589 struct iwl_rxon_context *ctx;
1590
1591 /*
1592 * Since setting the RXON may have been deferred while
1593 * performing the scan, fire one off if needed
1594 */
1595 for_each_context(priv, ctx)
1596 if (memcmp(&ctx->staging, &ctx->active, sizeof(ctx->staging)))
1597 iwlagn_commit_rxon(priv, ctx);
1434 1598
1435 queue_delayed_work(priv->workqueue, &priv->scan_check, 1599 if (priv->cfg->ops->hcmd->set_pan_params)
1436 IWL_SCAN_CHECK_WATCHDOG); 1600 priv->cfg->ops->hcmd->set_pan_params(priv);
1437
1438 return;
1439
1440 done:
1441 /* Cannot perform scan. Make sure we clear scanning
1442 * bits from status so next scan request can be performed.
1443 * If we don't clear scanning status bit here all next scan
1444 * will fail
1445 */
1446 clear_bit(STATUS_SCAN_HW, &priv->status);
1447 clear_bit(STATUS_SCANNING, &priv->status);
1448 /* inform mac80211 scan aborted */
1449 queue_work(priv->workqueue, &priv->scan_completed);
1450} 1601}
1451 1602
1452int iwlagn_manage_ibss_station(struct iwl_priv *priv, 1603int iwlagn_manage_ibss_station(struct iwl_priv *priv,
@@ -1455,9 +1606,9 @@ int iwlagn_manage_ibss_station(struct iwl_priv *priv,
1455 struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv; 1606 struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
1456 1607
1457 if (add) 1608 if (add)
1458 return iwl_add_bssid_station(priv, vif_priv->ctx, 1609 return iwlagn_add_bssid_station(priv, vif_priv->ctx,
1459 vif->bss_conf.bssid, true, 1610 vif->bss_conf.bssid,
1460 &vif_priv->ibss_bssid_sta_id); 1611 &vif_priv->ibss_bssid_sta_id);
1461 return iwl_remove_station(priv, vif_priv->ibss_bssid_sta_id, 1612 return iwl_remove_station(priv, vif_priv->ibss_bssid_sta_id,
1462 vif->bss_conf.bssid); 1613 vif->bss_conf.bssid);
1463} 1614}
@@ -1669,10 +1820,15 @@ void iwlagn_send_advance_bt_config(struct iwl_priv *priv)
1669 BUILD_BUG_ON(sizeof(iwlagn_def_3w_lookup) != 1820 BUILD_BUG_ON(sizeof(iwlagn_def_3w_lookup) !=
1670 sizeof(bt_cmd.bt3_lookup_table)); 1821 sizeof(bt_cmd.bt3_lookup_table));
1671 1822
1672 bt_cmd.prio_boost = priv->cfg->bt_prio_boost; 1823 if (priv->cfg->bt_params)
1824 bt_cmd.prio_boost = priv->cfg->bt_params->bt_prio_boost;
1825 else
1826 bt_cmd.prio_boost = 0;
1673 bt_cmd.kill_ack_mask = priv->kill_ack_mask; 1827 bt_cmd.kill_ack_mask = priv->kill_ack_mask;
1674 bt_cmd.kill_cts_mask = priv->kill_cts_mask; 1828 bt_cmd.kill_cts_mask = priv->kill_cts_mask;
1675 bt_cmd.valid = priv->bt_valid; 1829 bt_cmd.valid = priv->bt_valid;
1830 bt_cmd.tx_prio_boost = 0;
1831 bt_cmd.rx_prio_boost = 0;
1676 1832
1677 /* 1833 /*
1678 * Configure BT coex mode to "no coexistence" when the 1834 * Configure BT coex mode to "no coexistence" when the
@@ -1928,3 +2084,290 @@ void iwlagn_bt_cancel_deferred_work(struct iwl_priv *priv)
1928{ 2084{
1929 cancel_work_sync(&priv->bt_traffic_change_work); 2085 cancel_work_sync(&priv->bt_traffic_change_work);
1930} 2086}
2087
2088static bool is_single_rx_stream(struct iwl_priv *priv)
2089{
2090 return priv->current_ht_config.smps == IEEE80211_SMPS_STATIC ||
2091 priv->current_ht_config.single_chain_sufficient;
2092}
2093
2094#define IWL_NUM_RX_CHAINS_MULTIPLE 3
2095#define IWL_NUM_RX_CHAINS_SINGLE 2
2096#define IWL_NUM_IDLE_CHAINS_DUAL 2
2097#define IWL_NUM_IDLE_CHAINS_SINGLE 1
2098
2099/*
2100 * Determine how many receiver/antenna chains to use.
2101 *
2102 * More provides better reception via diversity. Fewer saves power
2103 * at the expense of throughput, but only when not in powersave to
2104 * start with.
2105 *
2106 * MIMO (dual stream) requires at least 2, but works better with 3.
2107 * This does not determine *which* chains to use, just how many.
2108 */
2109static int iwl_get_active_rx_chain_count(struct iwl_priv *priv)
2110{
2111 if (priv->cfg->bt_params &&
2112 priv->cfg->bt_params->advanced_bt_coexist &&
2113 (priv->bt_full_concurrent ||
2114 priv->bt_traffic_load >= IWL_BT_COEX_TRAFFIC_LOAD_HIGH)) {
2115 /*
2116 * only use chain 'A' in bt high traffic load or
2117 * full concurrency mode
2118 */
2119 return IWL_NUM_RX_CHAINS_SINGLE;
2120 }
2121 /* # of Rx chains to use when expecting MIMO. */
2122 if (is_single_rx_stream(priv))
2123 return IWL_NUM_RX_CHAINS_SINGLE;
2124 else
2125 return IWL_NUM_RX_CHAINS_MULTIPLE;
2126}
2127
2128/*
2129 * When we are in power saving mode, unless device support spatial
2130 * multiplexing power save, use the active count for rx chain count.
2131 */
2132static int iwl_get_idle_rx_chain_count(struct iwl_priv *priv, int active_cnt)
2133{
2134 /* # Rx chains when idling, depending on SMPS mode */
2135 switch (priv->current_ht_config.smps) {
2136 case IEEE80211_SMPS_STATIC:
2137 case IEEE80211_SMPS_DYNAMIC:
2138 return IWL_NUM_IDLE_CHAINS_SINGLE;
2139 case IEEE80211_SMPS_OFF:
2140 return active_cnt;
2141 default:
2142 WARN(1, "invalid SMPS mode %d",
2143 priv->current_ht_config.smps);
2144 return active_cnt;
2145 }
2146}
2147
2148/* up to 4 chains */
2149static u8 iwl_count_chain_bitmap(u32 chain_bitmap)
2150{
2151 u8 res;
2152 res = (chain_bitmap & BIT(0)) >> 0;
2153 res += (chain_bitmap & BIT(1)) >> 1;
2154 res += (chain_bitmap & BIT(2)) >> 2;
2155 res += (chain_bitmap & BIT(3)) >> 3;
2156 return res;
2157}
2158
2159/**
2160 * iwlagn_set_rxon_chain - Set up Rx chain usage in "staging" RXON image
2161 *
2162 * Selects how many and which Rx receivers/antennas/chains to use.
2163 * This should not be used for scan command ... it puts data in wrong place.
2164 */
2165void iwlagn_set_rxon_chain(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
2166{
2167 bool is_single = is_single_rx_stream(priv);
2168 bool is_cam = !test_bit(STATUS_POWER_PMI, &priv->status);
2169 u8 idle_rx_cnt, active_rx_cnt, valid_rx_cnt;
2170 u32 active_chains;
2171 u16 rx_chain;
2172
2173 /* Tell uCode which antennas are actually connected.
2174 * Before first association, we assume all antennas are connected.
2175 * Just after first association, iwl_chain_noise_calibration()
2176 * checks which antennas actually *are* connected. */
2177 if (priv->chain_noise_data.active_chains)
2178 active_chains = priv->chain_noise_data.active_chains;
2179 else
2180 active_chains = priv->hw_params.valid_rx_ant;
2181
2182 if (priv->cfg->bt_params &&
2183 priv->cfg->bt_params->advanced_bt_coexist &&
2184 (priv->bt_full_concurrent ||
2185 priv->bt_traffic_load >= IWL_BT_COEX_TRAFFIC_LOAD_HIGH)) {
2186 /*
2187 * only use chain 'A' in bt high traffic load or
2188 * full concurrency mode
2189 */
2190 active_chains = first_antenna(active_chains);
2191 }
2192
2193 rx_chain = active_chains << RXON_RX_CHAIN_VALID_POS;
2194
2195 /* How many receivers should we use? */
2196 active_rx_cnt = iwl_get_active_rx_chain_count(priv);
2197 idle_rx_cnt = iwl_get_idle_rx_chain_count(priv, active_rx_cnt);
2198
2199
2200 /* correct rx chain count according hw settings
2201 * and chain noise calibration
2202 */
2203 valid_rx_cnt = iwl_count_chain_bitmap(active_chains);
2204 if (valid_rx_cnt < active_rx_cnt)
2205 active_rx_cnt = valid_rx_cnt;
2206
2207 if (valid_rx_cnt < idle_rx_cnt)
2208 idle_rx_cnt = valid_rx_cnt;
2209
2210 rx_chain |= active_rx_cnt << RXON_RX_CHAIN_MIMO_CNT_POS;
2211 rx_chain |= idle_rx_cnt << RXON_RX_CHAIN_CNT_POS;
2212
2213 ctx->staging.rx_chain = cpu_to_le16(rx_chain);
2214
2215 if (!is_single && (active_rx_cnt >= IWL_NUM_RX_CHAINS_SINGLE) && is_cam)
2216 ctx->staging.rx_chain |= RXON_RX_CHAIN_MIMO_FORCE_MSK;
2217 else
2218 ctx->staging.rx_chain &= ~RXON_RX_CHAIN_MIMO_FORCE_MSK;
2219
2220 IWL_DEBUG_ASSOC(priv, "rx_chain=0x%X active=%d idle=%d\n",
2221 ctx->staging.rx_chain,
2222 active_rx_cnt, idle_rx_cnt);
2223
2224 WARN_ON(active_rx_cnt == 0 || idle_rx_cnt == 0 ||
2225 active_rx_cnt < idle_rx_cnt);
2226}
2227
2228u8 iwl_toggle_tx_ant(struct iwl_priv *priv, u8 ant, u8 valid)
2229{
2230 int i;
2231 u8 ind = ant;
2232
2233 if (priv->band == IEEE80211_BAND_2GHZ &&
2234 priv->bt_traffic_load >= IWL_BT_COEX_TRAFFIC_LOAD_HIGH)
2235 return 0;
2236
2237 for (i = 0; i < RATE_ANT_NUM - 1; i++) {
2238 ind = (ind + 1) < RATE_ANT_NUM ? ind + 1 : 0;
2239 if (valid & BIT(ind))
2240 return ind;
2241 }
2242 return ant;
2243}
2244
2245static const char *get_csr_string(int cmd)
2246{
2247 switch (cmd) {
2248 IWL_CMD(CSR_HW_IF_CONFIG_REG);
2249 IWL_CMD(CSR_INT_COALESCING);
2250 IWL_CMD(CSR_INT);
2251 IWL_CMD(CSR_INT_MASK);
2252 IWL_CMD(CSR_FH_INT_STATUS);
2253 IWL_CMD(CSR_GPIO_IN);
2254 IWL_CMD(CSR_RESET);
2255 IWL_CMD(CSR_GP_CNTRL);
2256 IWL_CMD(CSR_HW_REV);
2257 IWL_CMD(CSR_EEPROM_REG);
2258 IWL_CMD(CSR_EEPROM_GP);
2259 IWL_CMD(CSR_OTP_GP_REG);
2260 IWL_CMD(CSR_GIO_REG);
2261 IWL_CMD(CSR_GP_UCODE_REG);
2262 IWL_CMD(CSR_GP_DRIVER_REG);
2263 IWL_CMD(CSR_UCODE_DRV_GP1);
2264 IWL_CMD(CSR_UCODE_DRV_GP2);
2265 IWL_CMD(CSR_LED_REG);
2266 IWL_CMD(CSR_DRAM_INT_TBL_REG);
2267 IWL_CMD(CSR_GIO_CHICKEN_BITS);
2268 IWL_CMD(CSR_ANA_PLL_CFG);
2269 IWL_CMD(CSR_HW_REV_WA_REG);
2270 IWL_CMD(CSR_DBG_HPET_MEM_REG);
2271 default:
2272 return "UNKNOWN";
2273 }
2274}
2275
2276void iwl_dump_csr(struct iwl_priv *priv)
2277{
2278 int i;
2279 u32 csr_tbl[] = {
2280 CSR_HW_IF_CONFIG_REG,
2281 CSR_INT_COALESCING,
2282 CSR_INT,
2283 CSR_INT_MASK,
2284 CSR_FH_INT_STATUS,
2285 CSR_GPIO_IN,
2286 CSR_RESET,
2287 CSR_GP_CNTRL,
2288 CSR_HW_REV,
2289 CSR_EEPROM_REG,
2290 CSR_EEPROM_GP,
2291 CSR_OTP_GP_REG,
2292 CSR_GIO_REG,
2293 CSR_GP_UCODE_REG,
2294 CSR_GP_DRIVER_REG,
2295 CSR_UCODE_DRV_GP1,
2296 CSR_UCODE_DRV_GP2,
2297 CSR_LED_REG,
2298 CSR_DRAM_INT_TBL_REG,
2299 CSR_GIO_CHICKEN_BITS,
2300 CSR_ANA_PLL_CFG,
2301 CSR_HW_REV_WA_REG,
2302 CSR_DBG_HPET_MEM_REG
2303 };
2304 IWL_ERR(priv, "CSR values:\n");
2305 IWL_ERR(priv, "(2nd byte of CSR_INT_COALESCING is "
2306 "CSR_INT_PERIODIC_REG)\n");
2307 for (i = 0; i < ARRAY_SIZE(csr_tbl); i++) {
2308 IWL_ERR(priv, " %25s: 0X%08x\n",
2309 get_csr_string(csr_tbl[i]),
2310 iwl_read32(priv, csr_tbl[i]));
2311 }
2312}
2313
2314static const char *get_fh_string(int cmd)
2315{
2316 switch (cmd) {
2317 IWL_CMD(FH_RSCSR_CHNL0_STTS_WPTR_REG);
2318 IWL_CMD(FH_RSCSR_CHNL0_RBDCB_BASE_REG);
2319 IWL_CMD(FH_RSCSR_CHNL0_WPTR);
2320 IWL_CMD(FH_MEM_RCSR_CHNL0_CONFIG_REG);
2321 IWL_CMD(FH_MEM_RSSR_SHARED_CTRL_REG);
2322 IWL_CMD(FH_MEM_RSSR_RX_STATUS_REG);
2323 IWL_CMD(FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV);
2324 IWL_CMD(FH_TSSR_TX_STATUS_REG);
2325 IWL_CMD(FH_TSSR_TX_ERROR_REG);
2326 default:
2327 return "UNKNOWN";
2328 }
2329}
2330
2331int iwl_dump_fh(struct iwl_priv *priv, char **buf, bool display)
2332{
2333 int i;
2334#ifdef CONFIG_IWLWIFI_DEBUG
2335 int pos = 0;
2336 size_t bufsz = 0;
2337#endif
2338 u32 fh_tbl[] = {
2339 FH_RSCSR_CHNL0_STTS_WPTR_REG,
2340 FH_RSCSR_CHNL0_RBDCB_BASE_REG,
2341 FH_RSCSR_CHNL0_WPTR,
2342 FH_MEM_RCSR_CHNL0_CONFIG_REG,
2343 FH_MEM_RSSR_SHARED_CTRL_REG,
2344 FH_MEM_RSSR_RX_STATUS_REG,
2345 FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV,
2346 FH_TSSR_TX_STATUS_REG,
2347 FH_TSSR_TX_ERROR_REG
2348 };
2349#ifdef CONFIG_IWLWIFI_DEBUG
2350 if (display) {
2351 bufsz = ARRAY_SIZE(fh_tbl) * 48 + 40;
2352 *buf = kmalloc(bufsz, GFP_KERNEL);
2353 if (!*buf)
2354 return -ENOMEM;
2355 pos += scnprintf(*buf + pos, bufsz - pos,
2356 "FH register values:\n");
2357 for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) {
2358 pos += scnprintf(*buf + pos, bufsz - pos,
2359 " %34s: 0X%08x\n",
2360 get_fh_string(fh_tbl[i]),
2361 iwl_read_direct32(priv, fh_tbl[i]));
2362 }
2363 return pos;
2364 }
2365#endif
2366 IWL_ERR(priv, "FH register values:\n");
2367 for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) {
2368 IWL_ERR(priv, " %34s: 0X%08x\n",
2369 get_fh_string(fh_tbl[i]),
2370 iwl_read_direct32(priv, fh_tbl[i]));
2371 }
2372 return 0;
2373}
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
index 57629fba3a7d..5abe2e9ff0d2 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
@@ -39,6 +39,7 @@
39#include "iwl-dev.h" 39#include "iwl-dev.h"
40#include "iwl-sta.h" 40#include "iwl-sta.h"
41#include "iwl-core.h" 41#include "iwl-core.h"
42#include "iwl-agn.h"
42 43
43#define RS_NAME "iwl-agn-rs" 44#define RS_NAME "iwl-agn-rs"
44 45
@@ -76,6 +77,74 @@ static const u8 ant_toggle_lookup[] = {
76 /*ANT_ABC -> */ ANT_ABC, 77 /*ANT_ABC -> */ ANT_ABC,
77}; 78};
78 79
80#define IWL_DECLARE_RATE_INFO(r, s, ip, in, rp, rn, pp, np) \
81 [IWL_RATE_##r##M_INDEX] = { IWL_RATE_##r##M_PLCP, \
82 IWL_RATE_SISO_##s##M_PLCP, \
83 IWL_RATE_MIMO2_##s##M_PLCP,\
84 IWL_RATE_MIMO3_##s##M_PLCP,\
85 IWL_RATE_##r##M_IEEE, \
86 IWL_RATE_##ip##M_INDEX, \
87 IWL_RATE_##in##M_INDEX, \
88 IWL_RATE_##rp##M_INDEX, \
89 IWL_RATE_##rn##M_INDEX, \
90 IWL_RATE_##pp##M_INDEX, \
91 IWL_RATE_##np##M_INDEX }
92
93/*
94 * Parameter order:
95 * rate, ht rate, prev rate, next rate, prev tgg rate, next tgg rate
96 *
97 * If there isn't a valid next or previous rate then INV is used which
98 * maps to IWL_RATE_INVALID
99 *
100 */
101const struct iwl_rate_info iwl_rates[IWL_RATE_COUNT] = {
102 IWL_DECLARE_RATE_INFO(1, INV, INV, 2, INV, 2, INV, 2), /* 1mbps */
103 IWL_DECLARE_RATE_INFO(2, INV, 1, 5, 1, 5, 1, 5), /* 2mbps */
104 IWL_DECLARE_RATE_INFO(5, INV, 2, 6, 2, 11, 2, 11), /*5.5mbps */
105 IWL_DECLARE_RATE_INFO(11, INV, 9, 12, 9, 12, 5, 18), /* 11mbps */
106 IWL_DECLARE_RATE_INFO(6, 6, 5, 9, 5, 11, 5, 11), /* 6mbps */
107 IWL_DECLARE_RATE_INFO(9, 6, 6, 11, 6, 11, 5, 11), /* 9mbps */
108 IWL_DECLARE_RATE_INFO(12, 12, 11, 18, 11, 18, 11, 18), /* 12mbps */
109 IWL_DECLARE_RATE_INFO(18, 18, 12, 24, 12, 24, 11, 24), /* 18mbps */
110 IWL_DECLARE_RATE_INFO(24, 24, 18, 36, 18, 36, 18, 36), /* 24mbps */
111 IWL_DECLARE_RATE_INFO(36, 36, 24, 48, 24, 48, 24, 48), /* 36mbps */
112 IWL_DECLARE_RATE_INFO(48, 48, 36, 54, 36, 54, 36, 54), /* 48mbps */
113 IWL_DECLARE_RATE_INFO(54, 54, 48, INV, 48, INV, 48, INV),/* 54mbps */
114 IWL_DECLARE_RATE_INFO(60, 60, 48, INV, 48, INV, 48, INV),/* 60mbps */
115 /* FIXME:RS: ^^ should be INV (legacy) */
116};
117
118static int iwl_hwrate_to_plcp_idx(u32 rate_n_flags)
119{
120 int idx = 0;
121
122 /* HT rate format */
123 if (rate_n_flags & RATE_MCS_HT_MSK) {
124 idx = (rate_n_flags & 0xff);
125
126 if (idx >= IWL_RATE_MIMO3_6M_PLCP)
127 idx = idx - IWL_RATE_MIMO3_6M_PLCP;
128 else if (idx >= IWL_RATE_MIMO2_6M_PLCP)
129 idx = idx - IWL_RATE_MIMO2_6M_PLCP;
130
131 idx += IWL_FIRST_OFDM_RATE;
132 /* skip 9M not supported in ht*/
133 if (idx >= IWL_RATE_9M_INDEX)
134 idx += 1;
135 if ((idx >= IWL_FIRST_OFDM_RATE) && (idx <= IWL_LAST_OFDM_RATE))
136 return idx;
137
138 /* legacy rate format, search for match in table */
139 } else {
140 for (idx = 0; idx < ARRAY_SIZE(iwl_rates); idx++)
141 if (iwl_rates[idx].plcp == (rate_n_flags & 0xFF))
142 return idx;
143 }
144
145 return -1;
146}
147
79static void rs_rate_scale_perform(struct iwl_priv *priv, 148static void rs_rate_scale_perform(struct iwl_priv *priv,
80 struct sk_buff *skb, 149 struct sk_buff *skb,
81 struct ieee80211_sta *sta, 150 struct ieee80211_sta *sta,
@@ -2939,11 +3008,14 @@ static void rs_fill_link_cmd(struct iwl_priv *priv,
2939 * overwrite if needed, pass aggregation time limit 3008 * overwrite if needed, pass aggregation time limit
2940 * to uCode in uSec 3009 * to uCode in uSec
2941 */ 3010 */
2942 if (priv && priv->cfg->agg_time_limit && 3011 if (priv && priv->cfg->bt_params &&
2943 priv->cfg->agg_time_limit >= LINK_QUAL_AGG_TIME_LIMIT_MIN && 3012 priv->cfg->bt_params->agg_time_limit &&
2944 priv->cfg->agg_time_limit <= LINK_QUAL_AGG_TIME_LIMIT_MAX) 3013 priv->cfg->bt_params->agg_time_limit >=
3014 LINK_QUAL_AGG_TIME_LIMIT_MIN &&
3015 priv->cfg->bt_params->agg_time_limit <=
3016 LINK_QUAL_AGG_TIME_LIMIT_MAX)
2945 lq_cmd->agg_params.agg_time_limit = 3017 lq_cmd->agg_params.agg_time_limit =
2946 cpu_to_le16(priv->cfg->agg_time_limit); 3018 cpu_to_le16(priv->cfg->bt_params->agg_time_limit);
2947} 3019}
2948 3020
2949static void *rs_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir) 3021static void *rs_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir)
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rs.h b/drivers/net/wireless/iwlwifi/iwl-agn-rs.h
index 3970ab1deaf9..75e50d33ecb3 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-rs.h
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-rs.h
@@ -299,7 +299,6 @@ enum {
299#define TIME_WRAP_AROUND(x, y) (((y) > (x)) ? (y) - (x) : (0-(x)) + (y)) 299#define TIME_WRAP_AROUND(x, y) (((y) > (x)) ? (y) - (x) : (0-(x)) + (y))
300 300
301extern const struct iwl_rate_info iwl_rates[IWL_RATE_COUNT]; 301extern const struct iwl_rate_info iwl_rates[IWL_RATE_COUNT];
302extern const struct iwl3945_rate_info iwl3945_rates[IWL_RATE_COUNT_3945];
303 302
304enum iwl_table_type { 303enum iwl_table_type {
305 LQ_NONE, 304 LQ_NONE,
@@ -453,24 +452,6 @@ static inline u8 first_antenna(u8 mask)
453} 452}
454 453
455 454
456static inline u8 iwl_get_prev_ieee_rate(u8 rate_index)
457{
458 u8 rate = iwl_rates[rate_index].prev_ieee;
459
460 if (rate == IWL_RATE_INVALID)
461 rate = rate_index;
462 return rate;
463}
464
465static inline u8 iwl3945_get_prev_ieee_rate(u8 rate_index)
466{
467 u8 rate = iwl3945_rates[rate_index].prev_ieee;
468
469 if (rate == IWL_RATE_INVALID)
470 rate = rate_index;
471 return rate;
472}
473
474/** 455/**
475 * iwl3945_rate_scale_init - Initialize the rate scale table based on assoc info 456 * iwl3945_rate_scale_init - Initialize the rate scale table based on assoc info
476 * 457 *
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rx.c b/drivers/net/wireless/iwlwifi/iwl-agn-rx.c
index 9490eced1198..bbd40b7dd597 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-rx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-rx.c
@@ -34,7 +34,7 @@
34 34
35#include "iwl-dev.h" 35#include "iwl-dev.h"
36#include "iwl-core.h" 36#include "iwl-core.h"
37#include "iwl-calib.h" 37#include "iwl-agn-calib.h"
38#include "iwl-sta.h" 38#include "iwl-sta.h"
39#include "iwl-io.h" 39#include "iwl-io.h"
40#include "iwl-helpers.h" 40#include "iwl-helpers.h"
@@ -73,7 +73,8 @@ static void iwl_rx_calc_noise(struct iwl_priv *priv)
73 int bcn_silence_a, bcn_silence_b, bcn_silence_c; 73 int bcn_silence_a, bcn_silence_b, bcn_silence_c;
74 int last_rx_noise; 74 int last_rx_noise;
75 75
76 if (priv->cfg->bt_statistics) 76 if (priv->cfg->bt_params &&
77 priv->cfg->bt_params->bt_statistics)
77 rx_info = &(priv->_agn.statistics_bt.rx.general.common); 78 rx_info = &(priv->_agn.statistics_bt.rx.general.common);
78 else 79 else
79 rx_info = &(priv->_agn.statistics.rx.general); 80 rx_info = &(priv->_agn.statistics.rx.general);
@@ -124,7 +125,8 @@ static void iwl_accumulative_statistics(struct iwl_priv *priv,
124 struct statistics_general_common *general, *accum_general; 125 struct statistics_general_common *general, *accum_general;
125 struct statistics_tx *tx, *accum_tx; 126 struct statistics_tx *tx, *accum_tx;
126 127
127 if (priv->cfg->bt_statistics) { 128 if (priv->cfg->bt_params &&
129 priv->cfg->bt_params->bt_statistics) {
128 prev_stats = (__le32 *)&priv->_agn.statistics_bt; 130 prev_stats = (__le32 *)&priv->_agn.statistics_bt;
129 accum_stats = (u32 *)&priv->_agn.accum_statistics_bt; 131 accum_stats = (u32 *)&priv->_agn.accum_statistics_bt;
130 size = sizeof(struct iwl_bt_notif_statistics); 132 size = sizeof(struct iwl_bt_notif_statistics);
@@ -183,7 +185,7 @@ bool iwl_good_plcp_health(struct iwl_priv *priv,
183 unsigned int plcp_msec; 185 unsigned int plcp_msec;
184 unsigned long plcp_received_jiffies; 186 unsigned long plcp_received_jiffies;
185 187
186 if (priv->cfg->plcp_delta_threshold == 188 if (priv->cfg->base_params->plcp_delta_threshold ==
187 IWL_MAX_PLCP_ERR_THRESHOLD_DISABLE) { 189 IWL_MAX_PLCP_ERR_THRESHOLD_DISABLE) {
188 IWL_DEBUG_RADIO(priv, "plcp_err check disabled\n"); 190 IWL_DEBUG_RADIO(priv, "plcp_err check disabled\n");
189 return rc; 191 return rc;
@@ -205,7 +207,8 @@ bool iwl_good_plcp_health(struct iwl_priv *priv,
205 struct statistics_rx_phy *ofdm; 207 struct statistics_rx_phy *ofdm;
206 struct statistics_rx_ht_phy *ofdm_ht; 208 struct statistics_rx_ht_phy *ofdm_ht;
207 209
208 if (priv->cfg->bt_statistics) { 210 if (priv->cfg->bt_params &&
211 priv->cfg->bt_params->bt_statistics) {
209 ofdm = &pkt->u.stats_bt.rx.ofdm; 212 ofdm = &pkt->u.stats_bt.rx.ofdm;
210 ofdm_ht = &pkt->u.stats_bt.rx.ofdm_ht; 213 ofdm_ht = &pkt->u.stats_bt.rx.ofdm_ht;
211 combined_plcp_delta = 214 combined_plcp_delta =
@@ -229,7 +232,7 @@ bool iwl_good_plcp_health(struct iwl_priv *priv,
229 232
230 if ((combined_plcp_delta > 0) && 233 if ((combined_plcp_delta > 0) &&
231 ((combined_plcp_delta * 100) / plcp_msec) > 234 ((combined_plcp_delta * 100) / plcp_msec) >
232 priv->cfg->plcp_delta_threshold) { 235 priv->cfg->base_params->plcp_delta_threshold) {
233 /* 236 /*
234 * if plcp_err exceed the threshold, 237 * if plcp_err exceed the threshold,
235 * the following data is printed in csv format: 238 * the following data is printed in csv format:
@@ -242,13 +245,13 @@ bool iwl_good_plcp_health(struct iwl_priv *priv,
242 * plcp_msec 245 * plcp_msec
243 */ 246 */
244 IWL_DEBUG_RADIO(priv, "plcp_err exceeded %u, " 247 IWL_DEBUG_RADIO(priv, "plcp_err exceeded %u, "
245 "%u, %u, %u, %u, %d, %u mSecs\n", 248 "%u, %u, %u, %u, %d, %u mSecs\n",
246 priv->cfg->plcp_delta_threshold, 249 priv->cfg->base_params->plcp_delta_threshold,
247 le32_to_cpu(ofdm->plcp_err), 250 le32_to_cpu(ofdm->plcp_err),
248 le32_to_cpu(ofdm->plcp_err), 251 le32_to_cpu(ofdm->plcp_err),
249 le32_to_cpu(ofdm_ht->plcp_err), 252 le32_to_cpu(ofdm_ht->plcp_err),
250 le32_to_cpu(ofdm_ht->plcp_err), 253 le32_to_cpu(ofdm_ht->plcp_err),
251 combined_plcp_delta, plcp_msec); 254 combined_plcp_delta, plcp_msec);
252 255
253 rc = false; 256 rc = false;
254 } 257 }
@@ -262,7 +265,8 @@ void iwl_rx_statistics(struct iwl_priv *priv,
262 int change; 265 int change;
263 struct iwl_rx_packet *pkt = rxb_addr(rxb); 266 struct iwl_rx_packet *pkt = rxb_addr(rxb);
264 267
265 if (priv->cfg->bt_statistics) { 268 if (priv->cfg->bt_params &&
269 priv->cfg->bt_params->bt_statistics) {
266 IWL_DEBUG_RX(priv, 270 IWL_DEBUG_RX(priv,
267 "Statistics notification received (%d vs %d).\n", 271 "Statistics notification received (%d vs %d).\n",
268 (int)sizeof(struct iwl_bt_notif_statistics), 272 (int)sizeof(struct iwl_bt_notif_statistics),
@@ -300,7 +304,8 @@ void iwl_rx_statistics(struct iwl_priv *priv,
300 304
301 iwl_recover_from_statistics(priv, pkt); 305 iwl_recover_from_statistics(priv, pkt);
302 306
303 if (priv->cfg->bt_statistics) 307 if (priv->cfg->bt_params &&
308 priv->cfg->bt_params->bt_statistics)
304 memcpy(&priv->_agn.statistics_bt, &pkt->u.stats_bt, 309 memcpy(&priv->_agn.statistics_bt, &pkt->u.stats_bt,
305 sizeof(priv->_agn.statistics_bt)); 310 sizeof(priv->_agn.statistics_bt));
306 else 311 else
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-sta.c b/drivers/net/wireless/iwlwifi/iwl-agn-sta.c
new file mode 100644
index 000000000000..35a30d2e0734
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-sta.c
@@ -0,0 +1,716 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved.
4 *
5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
20 *
21 * The full GNU General Public License is included in this distribution in the
22 * file called LICENSE.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *
28 *****************************************************************************/
29
30#include <net/mac80211.h>
31
32#include "iwl-dev.h"
33#include "iwl-core.h"
34#include "iwl-sta.h"
35#include "iwl-agn.h"
36
37static struct iwl_link_quality_cmd *
38iwl_sta_alloc_lq(struct iwl_priv *priv, u8 sta_id)
39{
40 int i, r;
41 struct iwl_link_quality_cmd *link_cmd;
42 u32 rate_flags = 0;
43 __le32 rate_n_flags;
44
45 link_cmd = kzalloc(sizeof(struct iwl_link_quality_cmd), GFP_KERNEL);
46 if (!link_cmd) {
47 IWL_ERR(priv, "Unable to allocate memory for LQ cmd.\n");
48 return NULL;
49 }
50 /* Set up the rate scaling to start at selected rate, fall back
51 * all the way down to 1M in IEEE order, and then spin on 1M */
52 if (priv->band == IEEE80211_BAND_5GHZ)
53 r = IWL_RATE_6M_INDEX;
54 else
55 r = IWL_RATE_1M_INDEX;
56
57 if (r >= IWL_FIRST_CCK_RATE && r <= IWL_LAST_CCK_RATE)
58 rate_flags |= RATE_MCS_CCK_MSK;
59
60 rate_flags |= first_antenna(priv->hw_params.valid_tx_ant) <<
61 RATE_MCS_ANT_POS;
62 rate_n_flags = iwl_hw_set_rate_n_flags(iwl_rates[r].plcp, rate_flags);
63 for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++)
64 link_cmd->rs_table[i].rate_n_flags = rate_n_flags;
65
66 link_cmd->general_params.single_stream_ant_msk =
67 first_antenna(priv->hw_params.valid_tx_ant);
68
69 link_cmd->general_params.dual_stream_ant_msk =
70 priv->hw_params.valid_tx_ant &
71 ~first_antenna(priv->hw_params.valid_tx_ant);
72 if (!link_cmd->general_params.dual_stream_ant_msk) {
73 link_cmd->general_params.dual_stream_ant_msk = ANT_AB;
74 } else if (num_of_ant(priv->hw_params.valid_tx_ant) == 2) {
75 link_cmd->general_params.dual_stream_ant_msk =
76 priv->hw_params.valid_tx_ant;
77 }
78
79 link_cmd->agg_params.agg_dis_start_th = LINK_QUAL_AGG_DISABLE_START_DEF;
80 link_cmd->agg_params.agg_time_limit =
81 cpu_to_le16(LINK_QUAL_AGG_TIME_LIMIT_DEF);
82
83 link_cmd->sta_id = sta_id;
84
85 return link_cmd;
86}
87
88/*
89 * iwlagn_add_bssid_station - Add the special IBSS BSSID station
90 *
91 * Function sleeps.
92 */
93int iwlagn_add_bssid_station(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
94 const u8 *addr, u8 *sta_id_r)
95{
96 int ret;
97 u8 sta_id;
98 struct iwl_link_quality_cmd *link_cmd;
99 unsigned long flags;
100
101 if (sta_id_r)
102 *sta_id_r = IWL_INVALID_STATION;
103
104 ret = iwl_add_station_common(priv, ctx, addr, 0, NULL, &sta_id);
105 if (ret) {
106 IWL_ERR(priv, "Unable to add station %pM\n", addr);
107 return ret;
108 }
109
110 if (sta_id_r)
111 *sta_id_r = sta_id;
112
113 spin_lock_irqsave(&priv->sta_lock, flags);
114 priv->stations[sta_id].used |= IWL_STA_LOCAL;
115 spin_unlock_irqrestore(&priv->sta_lock, flags);
116
117 /* Set up default rate scaling table in device's station table */
118 link_cmd = iwl_sta_alloc_lq(priv, sta_id);
119 if (!link_cmd) {
120 IWL_ERR(priv, "Unable to initialize rate scaling for station %pM.\n",
121 addr);
122 return -ENOMEM;
123 }
124
125 ret = iwl_send_lq_cmd(priv, ctx, link_cmd, CMD_SYNC, true);
126 if (ret)
127 IWL_ERR(priv, "Link quality command failed (%d)\n", ret);
128
129 spin_lock_irqsave(&priv->sta_lock, flags);
130 priv->stations[sta_id].lq = link_cmd;
131 spin_unlock_irqrestore(&priv->sta_lock, flags);
132
133 return 0;
134}
135
136static int iwl_send_static_wepkey_cmd(struct iwl_priv *priv,
137 struct iwl_rxon_context *ctx,
138 bool send_if_empty)
139{
140 int i, not_empty = 0;
141 u8 buff[sizeof(struct iwl_wep_cmd) +
142 sizeof(struct iwl_wep_key) * WEP_KEYS_MAX];
143 struct iwl_wep_cmd *wep_cmd = (struct iwl_wep_cmd *)buff;
144 size_t cmd_size = sizeof(struct iwl_wep_cmd);
145 struct iwl_host_cmd cmd = {
146 .id = ctx->wep_key_cmd,
147 .data = wep_cmd,
148 .flags = CMD_SYNC,
149 };
150
151 might_sleep();
152
153 memset(wep_cmd, 0, cmd_size +
154 (sizeof(struct iwl_wep_key) * WEP_KEYS_MAX));
155
156 for (i = 0; i < WEP_KEYS_MAX ; i++) {
157 wep_cmd->key[i].key_index = i;
158 if (ctx->wep_keys[i].key_size) {
159 wep_cmd->key[i].key_offset = i;
160 not_empty = 1;
161 } else {
162 wep_cmd->key[i].key_offset = WEP_INVALID_OFFSET;
163 }
164
165 wep_cmd->key[i].key_size = ctx->wep_keys[i].key_size;
166 memcpy(&wep_cmd->key[i].key[3], ctx->wep_keys[i].key,
167 ctx->wep_keys[i].key_size);
168 }
169
170 wep_cmd->global_key_type = WEP_KEY_WEP_TYPE;
171 wep_cmd->num_keys = WEP_KEYS_MAX;
172
173 cmd_size += sizeof(struct iwl_wep_key) * WEP_KEYS_MAX;
174
175 cmd.len = cmd_size;
176
177 if (not_empty || send_if_empty)
178 return iwl_send_cmd(priv, &cmd);
179 else
180 return 0;
181}
182
183int iwl_restore_default_wep_keys(struct iwl_priv *priv,
184 struct iwl_rxon_context *ctx)
185{
186 lockdep_assert_held(&priv->mutex);
187
188 return iwl_send_static_wepkey_cmd(priv, ctx, false);
189}
190
191int iwl_remove_default_wep_key(struct iwl_priv *priv,
192 struct iwl_rxon_context *ctx,
193 struct ieee80211_key_conf *keyconf)
194{
195 int ret;
196
197 lockdep_assert_held(&priv->mutex);
198
199 IWL_DEBUG_WEP(priv, "Removing default WEP key: idx=%d\n",
200 keyconf->keyidx);
201
202 memset(&ctx->wep_keys[keyconf->keyidx], 0, sizeof(ctx->wep_keys[0]));
203 if (iwl_is_rfkill(priv)) {
204 IWL_DEBUG_WEP(priv, "Not sending REPLY_WEPKEY command due to RFKILL.\n");
205 /* but keys in device are clear anyway so return success */
206 return 0;
207 }
208 ret = iwl_send_static_wepkey_cmd(priv, ctx, 1);
209 IWL_DEBUG_WEP(priv, "Remove default WEP key: idx=%d ret=%d\n",
210 keyconf->keyidx, ret);
211
212 return ret;
213}
214
215int iwl_set_default_wep_key(struct iwl_priv *priv,
216 struct iwl_rxon_context *ctx,
217 struct ieee80211_key_conf *keyconf)
218{
219 int ret;
220
221 lockdep_assert_held(&priv->mutex);
222
223 if (keyconf->keylen != WEP_KEY_LEN_128 &&
224 keyconf->keylen != WEP_KEY_LEN_64) {
225 IWL_DEBUG_WEP(priv, "Bad WEP key length %d\n", keyconf->keylen);
226 return -EINVAL;
227 }
228
229 keyconf->flags &= ~IEEE80211_KEY_FLAG_GENERATE_IV;
230 keyconf->hw_key_idx = HW_KEY_DEFAULT;
231 priv->stations[ctx->ap_sta_id].keyinfo.cipher = keyconf->cipher;
232
233 ctx->wep_keys[keyconf->keyidx].key_size = keyconf->keylen;
234 memcpy(&ctx->wep_keys[keyconf->keyidx].key, &keyconf->key,
235 keyconf->keylen);
236
237 ret = iwl_send_static_wepkey_cmd(priv, ctx, false);
238 IWL_DEBUG_WEP(priv, "Set default WEP key: len=%d idx=%d ret=%d\n",
239 keyconf->keylen, keyconf->keyidx, ret);
240
241 return ret;
242}
243
244static int iwl_set_wep_dynamic_key_info(struct iwl_priv *priv,
245 struct iwl_rxon_context *ctx,
246 struct ieee80211_key_conf *keyconf,
247 u8 sta_id)
248{
249 unsigned long flags;
250 __le16 key_flags = 0;
251 struct iwl_addsta_cmd sta_cmd;
252
253 lockdep_assert_held(&priv->mutex);
254
255 keyconf->flags &= ~IEEE80211_KEY_FLAG_GENERATE_IV;
256
257 key_flags |= (STA_KEY_FLG_WEP | STA_KEY_FLG_MAP_KEY_MSK);
258 key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
259 key_flags &= ~STA_KEY_FLG_INVALID;
260
261 if (keyconf->keylen == WEP_KEY_LEN_128)
262 key_flags |= STA_KEY_FLG_KEY_SIZE_MSK;
263
264 if (sta_id == ctx->bcast_sta_id)
265 key_flags |= STA_KEY_MULTICAST_MSK;
266
267 spin_lock_irqsave(&priv->sta_lock, flags);
268
269 priv->stations[sta_id].keyinfo.cipher = keyconf->cipher;
270 priv->stations[sta_id].keyinfo.keylen = keyconf->keylen;
271 priv->stations[sta_id].keyinfo.keyidx = keyconf->keyidx;
272
273 memcpy(priv->stations[sta_id].keyinfo.key,
274 keyconf->key, keyconf->keylen);
275
276 memcpy(&priv->stations[sta_id].sta.key.key[3],
277 keyconf->key, keyconf->keylen);
278
279 if ((priv->stations[sta_id].sta.key.key_flags & STA_KEY_FLG_ENCRYPT_MSK)
280 == STA_KEY_FLG_NO_ENC)
281 priv->stations[sta_id].sta.key.key_offset =
282 iwl_get_free_ucode_key_index(priv);
283 /* else, we are overriding an existing key => no need to allocated room
284 * in uCode. */
285
286 WARN(priv->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET,
287 "no space for a new key");
288
289 priv->stations[sta_id].sta.key.key_flags = key_flags;
290 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
291 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
292
293 memcpy(&sta_cmd, &priv->stations[sta_id].sta, sizeof(struct iwl_addsta_cmd));
294 spin_unlock_irqrestore(&priv->sta_lock, flags);
295
296 return iwl_send_add_sta(priv, &sta_cmd, CMD_SYNC);
297}
298
299static int iwl_set_ccmp_dynamic_key_info(struct iwl_priv *priv,
300 struct iwl_rxon_context *ctx,
301 struct ieee80211_key_conf *keyconf,
302 u8 sta_id)
303{
304 unsigned long flags;
305 __le16 key_flags = 0;
306 struct iwl_addsta_cmd sta_cmd;
307
308 lockdep_assert_held(&priv->mutex);
309
310 key_flags |= (STA_KEY_FLG_CCMP | STA_KEY_FLG_MAP_KEY_MSK);
311 key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
312 key_flags &= ~STA_KEY_FLG_INVALID;
313
314 if (sta_id == ctx->bcast_sta_id)
315 key_flags |= STA_KEY_MULTICAST_MSK;
316
317 keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
318
319 spin_lock_irqsave(&priv->sta_lock, flags);
320 priv->stations[sta_id].keyinfo.cipher = keyconf->cipher;
321 priv->stations[sta_id].keyinfo.keylen = keyconf->keylen;
322
323 memcpy(priv->stations[sta_id].keyinfo.key, keyconf->key,
324 keyconf->keylen);
325
326 memcpy(priv->stations[sta_id].sta.key.key, keyconf->key,
327 keyconf->keylen);
328
329 if ((priv->stations[sta_id].sta.key.key_flags & STA_KEY_FLG_ENCRYPT_MSK)
330 == STA_KEY_FLG_NO_ENC)
331 priv->stations[sta_id].sta.key.key_offset =
332 iwl_get_free_ucode_key_index(priv);
333 /* else, we are overriding an existing key => no need to allocated room
334 * in uCode. */
335
336 WARN(priv->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET,
337 "no space for a new key");
338
339 priv->stations[sta_id].sta.key.key_flags = key_flags;
340 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
341 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
342
343 memcpy(&sta_cmd, &priv->stations[sta_id].sta, sizeof(struct iwl_addsta_cmd));
344 spin_unlock_irqrestore(&priv->sta_lock, flags);
345
346 return iwl_send_add_sta(priv, &sta_cmd, CMD_SYNC);
347}
348
349static int iwl_set_tkip_dynamic_key_info(struct iwl_priv *priv,
350 struct iwl_rxon_context *ctx,
351 struct ieee80211_key_conf *keyconf,
352 u8 sta_id)
353{
354 unsigned long flags;
355 int ret = 0;
356 __le16 key_flags = 0;
357
358 key_flags |= (STA_KEY_FLG_TKIP | STA_KEY_FLG_MAP_KEY_MSK);
359 key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
360 key_flags &= ~STA_KEY_FLG_INVALID;
361
362 if (sta_id == ctx->bcast_sta_id)
363 key_flags |= STA_KEY_MULTICAST_MSK;
364
365 keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
366 keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
367
368 spin_lock_irqsave(&priv->sta_lock, flags);
369
370 priv->stations[sta_id].keyinfo.cipher = keyconf->cipher;
371 priv->stations[sta_id].keyinfo.keylen = 16;
372
373 if ((priv->stations[sta_id].sta.key.key_flags & STA_KEY_FLG_ENCRYPT_MSK)
374 == STA_KEY_FLG_NO_ENC)
375 priv->stations[sta_id].sta.key.key_offset =
376 iwl_get_free_ucode_key_index(priv);
377 /* else, we are overriding an existing key => no need to allocated room
378 * in uCode. */
379
380 WARN(priv->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET,
381 "no space for a new key");
382
383 priv->stations[sta_id].sta.key.key_flags = key_flags;
384
385
386 /* This copy is acutally not needed: we get the key with each TX */
387 memcpy(priv->stations[sta_id].keyinfo.key, keyconf->key, 16);
388
389 memcpy(priv->stations[sta_id].sta.key.key, keyconf->key, 16);
390
391 spin_unlock_irqrestore(&priv->sta_lock, flags);
392
393 return ret;
394}
395
396void iwl_update_tkip_key(struct iwl_priv *priv,
397 struct iwl_rxon_context *ctx,
398 struct ieee80211_key_conf *keyconf,
399 struct ieee80211_sta *sta, u32 iv32, u16 *phase1key)
400{
401 u8 sta_id;
402 unsigned long flags;
403 int i;
404
405 if (iwl_scan_cancel(priv)) {
406 /* cancel scan failed, just live w/ bad key and rely
407 briefly on SW decryption */
408 return;
409 }
410
411 sta_id = iwl_sta_id_or_broadcast(priv, ctx, sta);
412 if (sta_id == IWL_INVALID_STATION)
413 return;
414
415 spin_lock_irqsave(&priv->sta_lock, flags);
416
417 priv->stations[sta_id].sta.key.tkip_rx_tsc_byte2 = (u8) iv32;
418
419 for (i = 0; i < 5; i++)
420 priv->stations[sta_id].sta.key.tkip_rx_ttak[i] =
421 cpu_to_le16(phase1key[i]);
422
423 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
424 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
425
426 iwl_send_add_sta(priv, &priv->stations[sta_id].sta, CMD_ASYNC);
427
428 spin_unlock_irqrestore(&priv->sta_lock, flags);
429
430}
431
432int iwl_remove_dynamic_key(struct iwl_priv *priv,
433 struct iwl_rxon_context *ctx,
434 struct ieee80211_key_conf *keyconf,
435 u8 sta_id)
436{
437 unsigned long flags;
438 u16 key_flags;
439 u8 keyidx;
440 struct iwl_addsta_cmd sta_cmd;
441
442 lockdep_assert_held(&priv->mutex);
443
444 ctx->key_mapping_keys--;
445
446 spin_lock_irqsave(&priv->sta_lock, flags);
447 key_flags = le16_to_cpu(priv->stations[sta_id].sta.key.key_flags);
448 keyidx = (key_flags >> STA_KEY_FLG_KEYID_POS) & 0x3;
449
450 IWL_DEBUG_WEP(priv, "Remove dynamic key: idx=%d sta=%d\n",
451 keyconf->keyidx, sta_id);
452
453 if (keyconf->keyidx != keyidx) {
454 /* We need to remove a key with index different that the one
455 * in the uCode. This means that the key we need to remove has
456 * been replaced by another one with different index.
457 * Don't do anything and return ok
458 */
459 spin_unlock_irqrestore(&priv->sta_lock, flags);
460 return 0;
461 }
462
463 if (priv->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET) {
464 IWL_WARN(priv, "Removing wrong key %d 0x%x\n",
465 keyconf->keyidx, key_flags);
466 spin_unlock_irqrestore(&priv->sta_lock, flags);
467 return 0;
468 }
469
470 if (!test_and_clear_bit(priv->stations[sta_id].sta.key.key_offset,
471 &priv->ucode_key_table))
472 IWL_ERR(priv, "index %d not used in uCode key table.\n",
473 priv->stations[sta_id].sta.key.key_offset);
474 memset(&priv->stations[sta_id].keyinfo, 0,
475 sizeof(struct iwl_hw_key));
476 memset(&priv->stations[sta_id].sta.key, 0,
477 sizeof(struct iwl4965_keyinfo));
478 priv->stations[sta_id].sta.key.key_flags =
479 STA_KEY_FLG_NO_ENC | STA_KEY_FLG_INVALID;
480 priv->stations[sta_id].sta.key.key_offset = WEP_INVALID_OFFSET;
481 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
482 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
483
484 if (iwl_is_rfkill(priv)) {
485 IWL_DEBUG_WEP(priv, "Not sending REPLY_ADD_STA command because RFKILL enabled.\n");
486 spin_unlock_irqrestore(&priv->sta_lock, flags);
487 return 0;
488 }
489 memcpy(&sta_cmd, &priv->stations[sta_id].sta, sizeof(struct iwl_addsta_cmd));
490 spin_unlock_irqrestore(&priv->sta_lock, flags);
491
492 return iwl_send_add_sta(priv, &sta_cmd, CMD_SYNC);
493}
494
495int iwl_set_dynamic_key(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
496 struct ieee80211_key_conf *keyconf, u8 sta_id)
497{
498 int ret;
499
500 lockdep_assert_held(&priv->mutex);
501
502 ctx->key_mapping_keys++;
503 keyconf->hw_key_idx = HW_KEY_DYNAMIC;
504
505 switch (keyconf->cipher) {
506 case WLAN_CIPHER_SUITE_CCMP:
507 ret = iwl_set_ccmp_dynamic_key_info(priv, ctx, keyconf, sta_id);
508 break;
509 case WLAN_CIPHER_SUITE_TKIP:
510 ret = iwl_set_tkip_dynamic_key_info(priv, ctx, keyconf, sta_id);
511 break;
512 case WLAN_CIPHER_SUITE_WEP40:
513 case WLAN_CIPHER_SUITE_WEP104:
514 ret = iwl_set_wep_dynamic_key_info(priv, ctx, keyconf, sta_id);
515 break;
516 default:
517 IWL_ERR(priv,
518 "Unknown alg: %s cipher = %x\n", __func__,
519 keyconf->cipher);
520 ret = -EINVAL;
521 }
522
523 IWL_DEBUG_WEP(priv, "Set dynamic key: cipher=%x len=%d idx=%d sta=%d ret=%d\n",
524 keyconf->cipher, keyconf->keylen, keyconf->keyidx,
525 sta_id, ret);
526
527 return ret;
528}
529
530/**
531 * iwlagn_alloc_bcast_station - add broadcast station into driver's station table.
532 *
533 * This adds the broadcast station into the driver's station table
534 * and marks it driver active, so that it will be restored to the
535 * device at the next best time.
536 */
537int iwlagn_alloc_bcast_station(struct iwl_priv *priv,
538 struct iwl_rxon_context *ctx)
539{
540 struct iwl_link_quality_cmd *link_cmd;
541 unsigned long flags;
542 u8 sta_id;
543
544 spin_lock_irqsave(&priv->sta_lock, flags);
545 sta_id = iwl_prep_station(priv, ctx, iwl_bcast_addr, false, NULL);
546 if (sta_id == IWL_INVALID_STATION) {
547 IWL_ERR(priv, "Unable to prepare broadcast station\n");
548 spin_unlock_irqrestore(&priv->sta_lock, flags);
549
550 return -EINVAL;
551 }
552
553 priv->stations[sta_id].used |= IWL_STA_DRIVER_ACTIVE;
554 priv->stations[sta_id].used |= IWL_STA_BCAST;
555 spin_unlock_irqrestore(&priv->sta_lock, flags);
556
557 link_cmd = iwl_sta_alloc_lq(priv, sta_id);
558 if (!link_cmd) {
559 IWL_ERR(priv,
560 "Unable to initialize rate scaling for bcast station.\n");
561 return -ENOMEM;
562 }
563
564 spin_lock_irqsave(&priv->sta_lock, flags);
565 priv->stations[sta_id].lq = link_cmd;
566 spin_unlock_irqrestore(&priv->sta_lock, flags);
567
568 return 0;
569}
570
571/**
572 * iwl_update_bcast_station - update broadcast station's LQ command
573 *
574 * Only used by iwlagn. Placed here to have all bcast station management
575 * code together.
576 */
577static int iwl_update_bcast_station(struct iwl_priv *priv,
578 struct iwl_rxon_context *ctx)
579{
580 unsigned long flags;
581 struct iwl_link_quality_cmd *link_cmd;
582 u8 sta_id = ctx->bcast_sta_id;
583
584 link_cmd = iwl_sta_alloc_lq(priv, sta_id);
585 if (!link_cmd) {
586 IWL_ERR(priv, "Unable to initialize rate scaling for bcast station.\n");
587 return -ENOMEM;
588 }
589
590 spin_lock_irqsave(&priv->sta_lock, flags);
591 if (priv->stations[sta_id].lq)
592 kfree(priv->stations[sta_id].lq);
593 else
594 IWL_DEBUG_INFO(priv, "Bcast station rate scaling has not been initialized yet.\n");
595 priv->stations[sta_id].lq = link_cmd;
596 spin_unlock_irqrestore(&priv->sta_lock, flags);
597
598 return 0;
599}
600
601int iwl_update_bcast_stations(struct iwl_priv *priv)
602{
603 struct iwl_rxon_context *ctx;
604 int ret = 0;
605
606 for_each_context(priv, ctx) {
607 ret = iwl_update_bcast_station(priv, ctx);
608 if (ret)
609 break;
610 }
611
612 return ret;
613}
614
615/**
616 * iwl_sta_tx_modify_enable_tid - Enable Tx for this TID in station table
617 */
618int iwl_sta_tx_modify_enable_tid(struct iwl_priv *priv, int sta_id, int tid)
619{
620 unsigned long flags;
621 struct iwl_addsta_cmd sta_cmd;
622
623 lockdep_assert_held(&priv->mutex);
624
625 /* Remove "disable" flag, to enable Tx for this TID */
626 spin_lock_irqsave(&priv->sta_lock, flags);
627 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_TID_DISABLE_TX;
628 priv->stations[sta_id].sta.tid_disable_tx &= cpu_to_le16(~(1 << tid));
629 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
630 memcpy(&sta_cmd, &priv->stations[sta_id].sta, sizeof(struct iwl_addsta_cmd));
631 spin_unlock_irqrestore(&priv->sta_lock, flags);
632
633 return iwl_send_add_sta(priv, &sta_cmd, CMD_SYNC);
634}
635
636int iwl_sta_rx_agg_start(struct iwl_priv *priv, struct ieee80211_sta *sta,
637 int tid, u16 ssn)
638{
639 unsigned long flags;
640 int sta_id;
641 struct iwl_addsta_cmd sta_cmd;
642
643 lockdep_assert_held(&priv->mutex);
644
645 sta_id = iwl_sta_id(sta);
646 if (sta_id == IWL_INVALID_STATION)
647 return -ENXIO;
648
649 spin_lock_irqsave(&priv->sta_lock, flags);
650 priv->stations[sta_id].sta.station_flags_msk = 0;
651 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_ADDBA_TID_MSK;
652 priv->stations[sta_id].sta.add_immediate_ba_tid = (u8)tid;
653 priv->stations[sta_id].sta.add_immediate_ba_ssn = cpu_to_le16(ssn);
654 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
655 memcpy(&sta_cmd, &priv->stations[sta_id].sta, sizeof(struct iwl_addsta_cmd));
656 spin_unlock_irqrestore(&priv->sta_lock, flags);
657
658 return iwl_send_add_sta(priv, &sta_cmd, CMD_SYNC);
659}
660
661int iwl_sta_rx_agg_stop(struct iwl_priv *priv, struct ieee80211_sta *sta,
662 int tid)
663{
664 unsigned long flags;
665 int sta_id;
666 struct iwl_addsta_cmd sta_cmd;
667
668 lockdep_assert_held(&priv->mutex);
669
670 sta_id = iwl_sta_id(sta);
671 if (sta_id == IWL_INVALID_STATION) {
672 IWL_ERR(priv, "Invalid station for AGG tid %d\n", tid);
673 return -ENXIO;
674 }
675
676 spin_lock_irqsave(&priv->sta_lock, flags);
677 priv->stations[sta_id].sta.station_flags_msk = 0;
678 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_DELBA_TID_MSK;
679 priv->stations[sta_id].sta.remove_immediate_ba_tid = (u8)tid;
680 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
681 memcpy(&sta_cmd, &priv->stations[sta_id].sta, sizeof(struct iwl_addsta_cmd));
682 spin_unlock_irqrestore(&priv->sta_lock, flags);
683
684 return iwl_send_add_sta(priv, &sta_cmd, CMD_SYNC);
685}
686
687void iwl_sta_modify_ps_wake(struct iwl_priv *priv, int sta_id)
688{
689 unsigned long flags;
690
691 spin_lock_irqsave(&priv->sta_lock, flags);
692 priv->stations[sta_id].sta.station_flags &= ~STA_FLG_PWR_SAVE_MSK;
693 priv->stations[sta_id].sta.station_flags_msk = STA_FLG_PWR_SAVE_MSK;
694 priv->stations[sta_id].sta.sta.modify_mask = 0;
695 priv->stations[sta_id].sta.sleep_tx_count = 0;
696 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
697 iwl_send_add_sta(priv, &priv->stations[sta_id].sta, CMD_ASYNC);
698 spin_unlock_irqrestore(&priv->sta_lock, flags);
699
700}
701
702void iwl_sta_modify_sleep_tx_count(struct iwl_priv *priv, int sta_id, int cnt)
703{
704 unsigned long flags;
705
706 spin_lock_irqsave(&priv->sta_lock, flags);
707 priv->stations[sta_id].sta.station_flags |= STA_FLG_PWR_SAVE_MSK;
708 priv->stations[sta_id].sta.station_flags_msk = STA_FLG_PWR_SAVE_MSK;
709 priv->stations[sta_id].sta.sta.modify_mask =
710 STA_MODIFY_SLEEP_TX_COUNT_MSK;
711 priv->stations[sta_id].sta.sleep_tx_count = cpu_to_le16(cnt);
712 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
713 iwl_send_add_sta(priv, &priv->stations[sta_id].sta, CMD_ASYNC);
714 spin_unlock_irqrestore(&priv->sta_lock, flags);
715
716}
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-tt.c b/drivers/net/wireless/iwlwifi/iwl-agn-tt.c
index 07b2c6cadf51..e3a8216a033c 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-tt.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-tt.c
@@ -114,7 +114,7 @@ static bool iwl_within_ct_kill_margin(struct iwl_priv *priv)
114 s32 temp = priv->temperature; /* degrees CELSIUS except specified */ 114 s32 temp = priv->temperature; /* degrees CELSIUS except specified */
115 bool within_margin = false; 115 bool within_margin = false;
116 116
117 if (priv->cfg->temperature_kelvin) 117 if (priv->cfg->base_params->temperature_kelvin)
118 temp = KELVIN_TO_CELSIUS(priv->temperature); 118 temp = KELVIN_TO_CELSIUS(priv->temperature);
119 119
120 if (!priv->thermal_throttle.advanced_tt) 120 if (!priv->thermal_throttle.advanced_tt)
@@ -571,7 +571,6 @@ void iwl_tt_enter_ct_kill(struct iwl_priv *priv)
571 IWL_DEBUG_POWER(priv, "Queueing critical temperature enter.\n"); 571 IWL_DEBUG_POWER(priv, "Queueing critical temperature enter.\n");
572 queue_work(priv->workqueue, &priv->ct_enter); 572 queue_work(priv->workqueue, &priv->ct_enter);
573} 573}
574EXPORT_SYMBOL(iwl_tt_enter_ct_kill);
575 574
576void iwl_tt_exit_ct_kill(struct iwl_priv *priv) 575void iwl_tt_exit_ct_kill(struct iwl_priv *priv)
577{ 576{
@@ -581,7 +580,6 @@ void iwl_tt_exit_ct_kill(struct iwl_priv *priv)
581 IWL_DEBUG_POWER(priv, "Queueing critical temperature exit.\n"); 580 IWL_DEBUG_POWER(priv, "Queueing critical temperature exit.\n");
582 queue_work(priv->workqueue, &priv->ct_exit); 581 queue_work(priv->workqueue, &priv->ct_exit);
583} 582}
584EXPORT_SYMBOL(iwl_tt_exit_ct_kill);
585 583
586static void iwl_bg_tt_work(struct work_struct *work) 584static void iwl_bg_tt_work(struct work_struct *work)
587{ 585{
@@ -591,7 +589,7 @@ static void iwl_bg_tt_work(struct work_struct *work)
591 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) 589 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
592 return; 590 return;
593 591
594 if (priv->cfg->temperature_kelvin) 592 if (priv->cfg->base_params->temperature_kelvin)
595 temp = KELVIN_TO_CELSIUS(priv->temperature); 593 temp = KELVIN_TO_CELSIUS(priv->temperature);
596 594
597 if (!priv->thermal_throttle.advanced_tt) 595 if (!priv->thermal_throttle.advanced_tt)
@@ -608,7 +606,6 @@ void iwl_tt_handler(struct iwl_priv *priv)
608 IWL_DEBUG_POWER(priv, "Queueing thermal throttling work.\n"); 606 IWL_DEBUG_POWER(priv, "Queueing thermal throttling work.\n");
609 queue_work(priv->workqueue, &priv->tt_work); 607 queue_work(priv->workqueue, &priv->tt_work);
610} 608}
611EXPORT_SYMBOL(iwl_tt_handler);
612 609
613/* Thermal throttling initialization 610/* Thermal throttling initialization
614 * For advance thermal throttling: 611 * For advance thermal throttling:
@@ -640,7 +637,7 @@ void iwl_tt_initialize(struct iwl_priv *priv)
640 INIT_WORK(&priv->ct_enter, iwl_bg_ct_enter); 637 INIT_WORK(&priv->ct_enter, iwl_bg_ct_enter);
641 INIT_WORK(&priv->ct_exit, iwl_bg_ct_exit); 638 INIT_WORK(&priv->ct_exit, iwl_bg_ct_exit);
642 639
643 if (priv->cfg->adv_thermal_throttle) { 640 if (priv->cfg->base_params->adv_thermal_throttle) {
644 IWL_DEBUG_POWER(priv, "Advanced Thermal Throttling\n"); 641 IWL_DEBUG_POWER(priv, "Advanced Thermal Throttling\n");
645 tt->restriction = kzalloc(sizeof(struct iwl_tt_restriction) * 642 tt->restriction = kzalloc(sizeof(struct iwl_tt_restriction) *
646 IWL_TI_STATE_MAX, GFP_KERNEL); 643 IWL_TI_STATE_MAX, GFP_KERNEL);
@@ -678,7 +675,6 @@ void iwl_tt_initialize(struct iwl_priv *priv)
678 priv->thermal_throttle.advanced_tt = false; 675 priv->thermal_throttle.advanced_tt = false;
679 } 676 }
680} 677}
681EXPORT_SYMBOL(iwl_tt_initialize);
682 678
683/* cleanup thermal throttling management related memory and timer */ 679/* cleanup thermal throttling management related memory and timer */
684void iwl_tt_exit(struct iwl_priv *priv) 680void iwl_tt_exit(struct iwl_priv *priv)
@@ -701,4 +697,3 @@ void iwl_tt_exit(struct iwl_priv *priv)
701 tt->transaction = NULL; 697 tt->transaction = NULL;
702 } 698 }
703} 699}
704EXPORT_SYMBOL(iwl_tt_exit);
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-tx.c b/drivers/net/wireless/iwlwifi/iwl-agn-tx.c
index 5950184d9860..db57aea629d9 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-tx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-tx.c
@@ -224,13 +224,13 @@ int iwlagn_txq_agg_enable(struct iwl_priv *priv, int txq_id,
224 int ret; 224 int ret;
225 225
226 if ((IWLAGN_FIRST_AMPDU_QUEUE > txq_id) || 226 if ((IWLAGN_FIRST_AMPDU_QUEUE > txq_id) ||
227 (IWLAGN_FIRST_AMPDU_QUEUE + priv->cfg->num_of_ampdu_queues 227 (IWLAGN_FIRST_AMPDU_QUEUE +
228 <= txq_id)) { 228 priv->cfg->base_params->num_of_ampdu_queues <= txq_id)) {
229 IWL_WARN(priv, 229 IWL_WARN(priv,
230 "queue number out of range: %d, must be %d to %d\n", 230 "queue number out of range: %d, must be %d to %d\n",
231 txq_id, IWLAGN_FIRST_AMPDU_QUEUE, 231 txq_id, IWLAGN_FIRST_AMPDU_QUEUE,
232 IWLAGN_FIRST_AMPDU_QUEUE + 232 IWLAGN_FIRST_AMPDU_QUEUE +
233 priv->cfg->num_of_ampdu_queues - 1); 233 priv->cfg->base_params->num_of_ampdu_queues - 1);
234 return -EINVAL; 234 return -EINVAL;
235 } 235 }
236 236
@@ -286,13 +286,13 @@ int iwlagn_txq_agg_disable(struct iwl_priv *priv, u16 txq_id,
286 u16 ssn_idx, u8 tx_fifo) 286 u16 ssn_idx, u8 tx_fifo)
287{ 287{
288 if ((IWLAGN_FIRST_AMPDU_QUEUE > txq_id) || 288 if ((IWLAGN_FIRST_AMPDU_QUEUE > txq_id) ||
289 (IWLAGN_FIRST_AMPDU_QUEUE + priv->cfg->num_of_ampdu_queues 289 (IWLAGN_FIRST_AMPDU_QUEUE +
290 <= txq_id)) { 290 priv->cfg->base_params->num_of_ampdu_queues <= txq_id)) {
291 IWL_ERR(priv, 291 IWL_ERR(priv,
292 "queue number out of range: %d, must be %d to %d\n", 292 "queue number out of range: %d, must be %d to %d\n",
293 txq_id, IWLAGN_FIRST_AMPDU_QUEUE, 293 txq_id, IWLAGN_FIRST_AMPDU_QUEUE,
294 IWLAGN_FIRST_AMPDU_QUEUE + 294 IWLAGN_FIRST_AMPDU_QUEUE +
295 priv->cfg->num_of_ampdu_queues - 1); 295 priv->cfg->base_params->num_of_ampdu_queues - 1);
296 return -EINVAL; 296 return -EINVAL;
297 } 297 }
298 298
@@ -350,7 +350,8 @@ static void iwlagn_tx_cmd_build_basic(struct iwl_priv *priv,
350 if (ieee80211_is_back_req(fc)) 350 if (ieee80211_is_back_req(fc))
351 tx_flags |= TX_CMD_FLG_ACK_MSK | TX_CMD_FLG_IMM_BA_RSP_MASK; 351 tx_flags |= TX_CMD_FLG_ACK_MSK | TX_CMD_FLG_IMM_BA_RSP_MASK;
352 else if (info->band == IEEE80211_BAND_2GHZ && 352 else if (info->band == IEEE80211_BAND_2GHZ &&
353 priv->cfg->advanced_bt_coexist && 353 priv->cfg->bt_params &&
354 priv->cfg->bt_params->advanced_bt_coexist &&
354 (ieee80211_is_auth(fc) || ieee80211_is_assoc_req(fc) || 355 (ieee80211_is_auth(fc) || ieee80211_is_assoc_req(fc) ||
355 ieee80211_is_reassoc_req(fc) || 356 ieee80211_is_reassoc_req(fc) ||
356 skb->protocol == cpu_to_be16(ETH_P_PAE))) 357 skb->protocol == cpu_to_be16(ETH_P_PAE)))
@@ -444,7 +445,9 @@ static void iwlagn_tx_cmd_build_rate(struct iwl_priv *priv,
444 rate_flags |= RATE_MCS_CCK_MSK; 445 rate_flags |= RATE_MCS_CCK_MSK;
445 446
446 /* Set up antennas */ 447 /* Set up antennas */
447 if (priv->cfg->advanced_bt_coexist && priv->bt_full_concurrent) { 448 if (priv->cfg->bt_params &&
449 priv->cfg->bt_params->advanced_bt_coexist &&
450 priv->bt_full_concurrent) {
448 /* operated as 1x1 in full concurrency mode */ 451 /* operated as 1x1 in full concurrency mode */
449 priv->mgmt_tx_ant = iwl_toggle_tx_ant(priv, priv->mgmt_tx_ant, 452 priv->mgmt_tx_ant = iwl_toggle_tx_ant(priv, priv->mgmt_tx_ant,
450 first_antenna(priv->hw_params.valid_tx_ant)); 453 first_antenna(priv->hw_params.valid_tx_ant));
@@ -1388,3 +1391,43 @@ void iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv,
1388 1391
1389 spin_unlock_irqrestore(&priv->sta_lock, flags); 1392 spin_unlock_irqrestore(&priv->sta_lock, flags);
1390} 1393}
1394
1395#ifdef CONFIG_IWLWIFI_DEBUG
1396const char *iwl_get_tx_fail_reason(u32 status)
1397{
1398#define TX_STATUS_FAIL(x) case TX_STATUS_FAIL_ ## x: return #x
1399#define TX_STATUS_POSTPONE(x) case TX_STATUS_POSTPONE_ ## x: return #x
1400
1401 switch (status & TX_STATUS_MSK) {
1402 case TX_STATUS_SUCCESS:
1403 return "SUCCESS";
1404 TX_STATUS_POSTPONE(DELAY);
1405 TX_STATUS_POSTPONE(FEW_BYTES);
1406 TX_STATUS_POSTPONE(BT_PRIO);
1407 TX_STATUS_POSTPONE(QUIET_PERIOD);
1408 TX_STATUS_POSTPONE(CALC_TTAK);
1409 TX_STATUS_FAIL(INTERNAL_CROSSED_RETRY);
1410 TX_STATUS_FAIL(SHORT_LIMIT);
1411 TX_STATUS_FAIL(LONG_LIMIT);
1412 TX_STATUS_FAIL(FIFO_UNDERRUN);
1413 TX_STATUS_FAIL(DRAIN_FLOW);
1414 TX_STATUS_FAIL(RFKILL_FLUSH);
1415 TX_STATUS_FAIL(LIFE_EXPIRE);
1416 TX_STATUS_FAIL(DEST_PS);
1417 TX_STATUS_FAIL(HOST_ABORTED);
1418 TX_STATUS_FAIL(BT_RETRY);
1419 TX_STATUS_FAIL(STA_INVALID);
1420 TX_STATUS_FAIL(FRAG_DROPPED);
1421 TX_STATUS_FAIL(TID_DISABLE);
1422 TX_STATUS_FAIL(FIFO_FLUSHED);
1423 TX_STATUS_FAIL(INSUFFICIENT_CF_POLL);
1424 TX_STATUS_FAIL(PASSIVE_NO_RX);
1425 TX_STATUS_FAIL(NO_BEACON_ON_RADAR);
1426 }
1427
1428 return "UNKNOWN";
1429
1430#undef TX_STATUS_FAIL
1431#undef TX_STATUS_POSTPONE
1432}
1433#endif /* CONFIG_IWLWIFI_DEBUG */
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-ucode.c b/drivers/net/wireless/iwlwifi/iwl-agn-ucode.c
index a7961bf395fc..703621107dac 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-ucode.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-ucode.c
@@ -38,6 +38,7 @@
38#include "iwl-helpers.h" 38#include "iwl-helpers.h"
39#include "iwl-agn-hw.h" 39#include "iwl-agn-hw.h"
40#include "iwl-agn.h" 40#include "iwl-agn.h"
41#include "iwl-agn-calib.h"
41 42
42static const s8 iwlagn_default_queue_to_tx_fifo[] = { 43static const s8 iwlagn_default_queue_to_tx_fifo[] = {
43 IWL_TX_FIFO_VO, 44 IWL_TX_FIFO_VO,
@@ -214,6 +215,25 @@ static int iwlagn_set_Xtal_calib(struct iwl_priv *priv)
214 (u8 *)&cmd, sizeof(cmd)); 215 (u8 *)&cmd, sizeof(cmd));
215} 216}
216 217
218static int iwlagn_set_temperature_offset_calib(struct iwl_priv *priv)
219{
220 struct iwl_calib_temperature_offset_cmd cmd;
221 __le16 *offset_calib =
222 (__le16 *)iwl_eeprom_query_addr(priv, EEPROM_5000_TEMPERATURE);
223 cmd.hdr.op_code = IWL_PHY_CALIBRATE_TEMP_OFFSET_CMD;
224 cmd.hdr.first_group = 0;
225 cmd.hdr.groups_num = 1;
226 cmd.hdr.data_valid = 1;
227 cmd.radio_sensor_offset = le16_to_cpu(offset_calib[1]);
228 if (!(cmd.radio_sensor_offset))
229 cmd.radio_sensor_offset = DEFAULT_RADIO_SENSOR_OFFSET;
230 cmd.reserved = 0;
231 IWL_DEBUG_CALIB(priv, "Radio sensor offset: %d\n",
232 cmd.radio_sensor_offset);
233 return iwl_calib_set(&priv->calib_results[IWL_CALIB_TEMP_OFFSET],
234 (u8 *)&cmd, sizeof(cmd));
235}
236
217static int iwlagn_send_calib_cfg(struct iwl_priv *priv) 237static int iwlagn_send_calib_cfg(struct iwl_priv *priv)
218{ 238{
219 struct iwl_calib_cfg_cmd calib_cfg_cmd; 239 struct iwl_calib_cfg_cmd calib_cfg_cmd;
@@ -307,7 +327,27 @@ void iwlagn_init_alive_start(struct iwl_priv *priv)
307 goto restart; 327 goto restart;
308 } 328 }
309 329
330 if (priv->cfg->bt_params &&
331 priv->cfg->bt_params->advanced_bt_coexist) {
332 /*
333 * Tell uCode we are ready to perform calibration
334 * need to perform this before any calibration
335 * no need to close the envlope since we are going
336 * to load the runtime uCode later.
337 */
338 iwlagn_send_bt_env(priv, IWL_BT_COEX_ENV_OPEN,
339 BT_COEX_PRIO_TBL_EVT_INIT_CALIB2);
340
341 }
310 iwlagn_send_calib_cfg(priv); 342 iwlagn_send_calib_cfg(priv);
343
344 /**
345 * temperature offset calibration is only needed for runtime ucode,
346 * so prepare the value now.
347 */
348 if (priv->cfg->need_temp_offset_calib)
349 iwlagn_set_temperature_offset_calib(priv);
350
311 return; 351 return;
312 352
313restart: 353restart:
@@ -319,7 +359,7 @@ static int iwlagn_send_wimax_coex(struct iwl_priv *priv)
319{ 359{
320 struct iwl_wimax_coex_cmd coex_cmd; 360 struct iwl_wimax_coex_cmd coex_cmd;
321 361
322 if (priv->cfg->support_wimax_coexist) { 362 if (priv->cfg->base_params->support_wimax_coexist) {
323 /* UnMask wake up src at associated sleep */ 363 /* UnMask wake up src at associated sleep */
324 coex_cmd.flags = COEX_FLAGS_ASSOC_WA_UNMASK_MSK; 364 coex_cmd.flags = COEX_FLAGS_ASSOC_WA_UNMASK_MSK;
325 365
@@ -364,7 +404,7 @@ static const u8 iwlagn_bt_prio_tbl[BT_COEX_PRIO_TBL_EVT_MAX] = {
364 0, 0, 0, 0, 0, 0, 0 404 0, 0, 0, 0, 0, 0, 0
365}; 405};
366 406
367static void iwlagn_send_prio_tbl(struct iwl_priv *priv) 407void iwlagn_send_prio_tbl(struct iwl_priv *priv)
368{ 408{
369 struct iwl_bt_coex_prio_table_cmd prio_tbl_cmd; 409 struct iwl_bt_coex_prio_table_cmd prio_tbl_cmd;
370 410
@@ -375,7 +415,7 @@ static void iwlagn_send_prio_tbl(struct iwl_priv *priv)
375 IWL_ERR(priv, "failed to send BT prio tbl command\n"); 415 IWL_ERR(priv, "failed to send BT prio tbl command\n");
376} 416}
377 417
378static void iwlagn_send_bt_env(struct iwl_priv *priv, u8 action, u8 type) 418void iwlagn_send_bt_env(struct iwl_priv *priv, u8 action, u8 type)
379{ 419{
380 struct iwl_bt_coex_prot_env_cmd env_cmd; 420 struct iwl_bt_coex_prot_env_cmd env_cmd;
381 421
@@ -482,25 +522,6 @@ int iwlagn_alive_notify(struct iwl_priv *priv)
482 522
483 spin_unlock_irqrestore(&priv->lock, flags); 523 spin_unlock_irqrestore(&priv->lock, flags);
484 524
485 if (priv->cfg->advanced_bt_coexist) {
486 /* Configure Bluetooth device coexistence support */
487 /* need to perform this before any calibration */
488 priv->bt_valid = IWLAGN_BT_ALL_VALID_MSK;
489 priv->kill_ack_mask = IWLAGN_BT_KILL_ACK_MASK_DEFAULT;
490 priv->kill_cts_mask = IWLAGN_BT_KILL_CTS_MASK_DEFAULT;
491 priv->cfg->ops->hcmd->send_bt_config(priv);
492 priv->bt_valid = IWLAGN_BT_VALID_ENABLE_FLAGS;
493
494 if (bt_coex_active && priv->iw_mode != NL80211_IFTYPE_ADHOC) {
495 iwlagn_send_prio_tbl(priv);
496 iwlagn_send_bt_env(priv, IWL_BT_COEX_ENV_OPEN,
497 BT_COEX_PRIO_TBL_EVT_INIT_CALIB2);
498 iwlagn_send_bt_env(priv, IWL_BT_COEX_ENV_CLOSE,
499 BT_COEX_PRIO_TBL_EVT_INIT_CALIB2);
500 }
501
502 }
503
504 iwlagn_send_wimax_coex(priv); 525 iwlagn_send_wimax_coex(priv);
505 526
506 iwlagn_set_Xtal_calib(priv); 527 iwlagn_set_Xtal_calib(priv);
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c
index ad0e67f5c0d4..c2636a7ab9ee 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn.c
@@ -57,7 +57,7 @@
57#include "iwl-io.h" 57#include "iwl-io.h"
58#include "iwl-helpers.h" 58#include "iwl-helpers.h"
59#include "iwl-sta.h" 59#include "iwl-sta.h"
60#include "iwl-calib.h" 60#include "iwl-agn-calib.h"
61#include "iwl-agn.h" 61#include "iwl-agn.h"
62 62
63 63
@@ -91,14 +91,14 @@ static int iwlagn_ant_coupling;
91static bool iwlagn_bt_ch_announce = 1; 91static bool iwlagn_bt_ch_announce = 1;
92 92
93/** 93/**
94 * iwl_commit_rxon - commit staging_rxon to hardware 94 * iwlagn_commit_rxon - commit staging_rxon to hardware
95 * 95 *
96 * The RXON command in staging_rxon is committed to the hardware and 96 * The RXON command in staging_rxon is committed to the hardware and
97 * the active_rxon structure is updated with the new data. This 97 * the active_rxon structure is updated with the new data. This
98 * function correctly transitions out of the RXON_ASSOC_MSK state if 98 * function correctly transitions out of the RXON_ASSOC_MSK state if
99 * a HW tune is required based on the RXON structure changes. 99 * a HW tune is required based on the RXON structure changes.
100 */ 100 */
101int iwl_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx) 101int iwlagn_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
102{ 102{
103 /* cast away the const for active_rxon in this function */ 103 /* cast away the const for active_rxon in this function */
104 struct iwl_rxon_cmd *active_rxon = (void *)&ctx->active; 104 struct iwl_rxon_cmd *active_rxon = (void *)&ctx->active;
@@ -110,6 +110,9 @@ int iwl_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
110 if (!iwl_is_alive(priv)) 110 if (!iwl_is_alive(priv))
111 return -EBUSY; 111 return -EBUSY;
112 112
113 if (!ctx->is_active)
114 return 0;
115
113 /* always get timestamp with Rx frame */ 116 /* always get timestamp with Rx frame */
114 ctx->staging.flags |= RXON_FLG_TSF2HOST_MSK; 117 ctx->staging.flags |= RXON_FLG_TSF2HOST_MSK;
115 118
@@ -223,9 +226,8 @@ int iwl_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
223 return ret; 226 return ret;
224 } 227 }
225 } 228 }
226
227 priv->start_calib = 0;
228 if (new_assoc) { 229 if (new_assoc) {
230 priv->start_calib = 0;
229 /* Apply the new configuration 231 /* Apply the new configuration
230 * RXON assoc doesn't clear the station table in uCode, 232 * RXON assoc doesn't clear the station table in uCode,
231 */ 233 */
@@ -312,24 +314,26 @@ static void iwl_free_frame(struct iwl_priv *priv, struct iwl_frame *frame)
312} 314}
313 315
314static u32 iwl_fill_beacon_frame(struct iwl_priv *priv, 316static u32 iwl_fill_beacon_frame(struct iwl_priv *priv,
315 struct ieee80211_hdr *hdr, 317 struct ieee80211_hdr *hdr,
316 int left) 318 int left)
317{ 319{
318 if (!priv->ibss_beacon) 320 lockdep_assert_held(&priv->mutex);
321
322 if (!priv->beacon_skb)
319 return 0; 323 return 0;
320 324
321 if (priv->ibss_beacon->len > left) 325 if (priv->beacon_skb->len > left)
322 return 0; 326 return 0;
323 327
324 memcpy(hdr, priv->ibss_beacon->data, priv->ibss_beacon->len); 328 memcpy(hdr, priv->beacon_skb->data, priv->beacon_skb->len);
325 329
326 return priv->ibss_beacon->len; 330 return priv->beacon_skb->len;
327} 331}
328 332
329/* Parse the beacon frame to find the TIM element and set tim_idx & tim_size */ 333/* Parse the beacon frame to find the TIM element and set tim_idx & tim_size */
330static void iwl_set_beacon_tim(struct iwl_priv *priv, 334static void iwl_set_beacon_tim(struct iwl_priv *priv,
331 struct iwl_tx_beacon_cmd *tx_beacon_cmd, 335 struct iwl_tx_beacon_cmd *tx_beacon_cmd,
332 u8 *beacon, u32 frame_size) 336 u8 *beacon, u32 frame_size)
333{ 337{
334 u16 tim_idx; 338 u16 tim_idx;
335 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)beacon; 339 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)beacon;
@@ -369,7 +373,7 @@ static unsigned int iwl_hw_get_beacon_cmd(struct iwl_priv *priv,
369 373
370 if (!priv->beacon_ctx) { 374 if (!priv->beacon_ctx) {
371 IWL_ERR(priv, "trying to build beacon w/o beacon context!\n"); 375 IWL_ERR(priv, "trying to build beacon w/o beacon context!\n");
372 return -EINVAL; 376 return 0;
373 } 377 }
374 378
375 /* Initialize memory */ 379 /* Initialize memory */
@@ -381,6 +385,8 @@ static unsigned int iwl_hw_get_beacon_cmd(struct iwl_priv *priv,
381 sizeof(frame->u) - sizeof(*tx_beacon_cmd)); 385 sizeof(frame->u) - sizeof(*tx_beacon_cmd));
382 if (WARN_ON_ONCE(frame_size > MAX_MPDU_SIZE)) 386 if (WARN_ON_ONCE(frame_size > MAX_MPDU_SIZE))
383 return 0; 387 return 0;
388 if (!frame_size)
389 return 0;
384 390
385 /* Set up TX command fields */ 391 /* Set up TX command fields */
386 tx_beacon_cmd->tx.len = cpu_to_le16((u16)frame_size); 392 tx_beacon_cmd->tx.len = cpu_to_le16((u16)frame_size);
@@ -391,7 +397,7 @@ static unsigned int iwl_hw_get_beacon_cmd(struct iwl_priv *priv,
391 397
392 /* Set up TX beacon command fields */ 398 /* Set up TX beacon command fields */
393 iwl_set_beacon_tim(priv, tx_beacon_cmd, (u8 *)tx_beacon_cmd->frame, 399 iwl_set_beacon_tim(priv, tx_beacon_cmd, (u8 *)tx_beacon_cmd->frame,
394 frame_size); 400 frame_size);
395 401
396 /* Set up packet rate and flags */ 402 /* Set up packet rate and flags */
397 rate = iwl_rate_get_lowest_plcp(priv, priv->beacon_ctx); 403 rate = iwl_rate_get_lowest_plcp(priv, priv->beacon_ctx);
@@ -646,15 +652,14 @@ static void iwl_bg_beacon_update(struct work_struct *work)
646 /* Pull updated AP beacon from mac80211. will fail if not in AP mode */ 652 /* Pull updated AP beacon from mac80211. will fail if not in AP mode */
647 beacon = ieee80211_beacon_get(priv->hw, priv->beacon_ctx->vif); 653 beacon = ieee80211_beacon_get(priv->hw, priv->beacon_ctx->vif);
648 if (!beacon) { 654 if (!beacon) {
649 IWL_ERR(priv, "update beacon failed\n"); 655 IWL_ERR(priv, "update beacon failed -- keeping old\n");
650 goto out; 656 goto out;
651 } 657 }
652 658
653 /* new beacon skb is allocated every time; dispose previous.*/ 659 /* new beacon skb is allocated every time; dispose previous.*/
654 if (priv->ibss_beacon) 660 dev_kfree_skb(priv->beacon_skb);
655 dev_kfree_skb(priv->ibss_beacon);
656 661
657 priv->ibss_beacon = beacon; 662 priv->beacon_skb = beacon;
658 663
659 iwl_send_beacon_cmd(priv); 664 iwl_send_beacon_cmd(priv);
660 out: 665 out:
@@ -933,22 +938,6 @@ static void iwl_rx_card_state_notif(struct iwl_priv *priv,
933 wake_up_interruptible(&priv->wait_command_queue); 938 wake_up_interruptible(&priv->wait_command_queue);
934} 939}
935 940
936int iwl_set_pwr_src(struct iwl_priv *priv, enum iwl_pwr_src src)
937{
938 if (src == IWL_PWR_SRC_VAUX) {
939 if (pci_pme_capable(priv->pci_dev, PCI_D3cold))
940 iwl_set_bits_mask_prph(priv, APMG_PS_CTRL_REG,
941 APMG_PS_CTRL_VAL_PWR_SRC_VAUX,
942 ~APMG_PS_CTRL_MSK_PWR_SRC);
943 } else {
944 iwl_set_bits_mask_prph(priv, APMG_PS_CTRL_REG,
945 APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
946 ~APMG_PS_CTRL_MSK_PWR_SRC);
947 }
948
949 return 0;
950}
951
952static void iwl_bg_tx_flush(struct work_struct *work) 941static void iwl_bg_tx_flush(struct work_struct *work)
953{ 942{
954 struct iwl_priv *priv = 943 struct iwl_priv *priv =
@@ -1278,7 +1267,6 @@ static void iwl_irq_tasklet_legacy(struct iwl_priv *priv)
1278 IWL_ERR(priv, "Microcode SW error detected. " 1267 IWL_ERR(priv, "Microcode SW error detected. "
1279 " Restarting 0x%X.\n", inta); 1268 " Restarting 0x%X.\n", inta);
1280 priv->isr_stats.sw++; 1269 priv->isr_stats.sw++;
1281 priv->isr_stats.sw_err = inta;
1282 iwl_irq_handle_error(priv); 1270 iwl_irq_handle_error(priv);
1283 handled |= CSR_INT_BIT_SW_ERR; 1271 handled |= CSR_INT_BIT_SW_ERR;
1284 } 1272 }
@@ -1459,7 +1447,6 @@ static void iwl_irq_tasklet(struct iwl_priv *priv)
1459 IWL_ERR(priv, "Microcode SW error detected. " 1447 IWL_ERR(priv, "Microcode SW error detected. "
1460 " Restarting 0x%X.\n", inta); 1448 " Restarting 0x%X.\n", inta);
1461 priv->isr_stats.sw++; 1449 priv->isr_stats.sw++;
1462 priv->isr_stats.sw_err = inta;
1463 iwl_irq_handle_error(priv); 1450 iwl_irq_handle_error(priv);
1464 handled |= CSR_INT_BIT_SW_ERR; 1451 handled |= CSR_INT_BIT_SW_ERR;
1465 } 1452 }
@@ -2078,7 +2065,7 @@ static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context)
2078 struct iwlagn_ucode_capabilities ucode_capa = { 2065 struct iwlagn_ucode_capabilities ucode_capa = {
2079 .max_probe_length = 200, 2066 .max_probe_length = 200,
2080 .standard_phy_calibration_size = 2067 .standard_phy_calibration_size =
2081 IWL_MAX_STANDARD_PHY_CALIBRATE_TBL_SIZE, 2068 IWL_DEFAULT_STANDARD_PHY_CALIBRATE_TBL_SIZE,
2082 }; 2069 };
2083 2070
2084 memset(&pieces, 0, sizeof(pieces)); 2071 memset(&pieces, 0, sizeof(pieces));
@@ -2120,18 +2107,23 @@ static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context)
2120 * firmware filename ... but we don't check for that and only rely 2107 * firmware filename ... but we don't check for that and only rely
2121 * on the API version read from firmware header from here on forward 2108 * on the API version read from firmware header from here on forward
2122 */ 2109 */
2123 if (api_ver < api_min || api_ver > api_max) { 2110 /* no api version check required for experimental uCode */
2124 IWL_ERR(priv, "Driver unable to support your firmware API. " 2111 if (priv->fw_index != UCODE_EXPERIMENTAL_INDEX) {
2125 "Driver supports v%u, firmware is v%u.\n", 2112 if (api_ver < api_min || api_ver > api_max) {
2126 api_max, api_ver); 2113 IWL_ERR(priv,
2127 goto try_again; 2114 "Driver unable to support your firmware API. "
2128 } 2115 "Driver supports v%u, firmware is v%u.\n",
2116 api_max, api_ver);
2117 goto try_again;
2118 }
2129 2119
2130 if (api_ver != api_max) 2120 if (api_ver != api_max)
2131 IWL_ERR(priv, "Firmware has old API version. Expected v%u, " 2121 IWL_ERR(priv,
2132 "got v%u. New firmware can be obtained " 2122 "Firmware has old API version. Expected v%u, "
2133 "from http://www.intellinuxwireless.org.\n", 2123 "got v%u. New firmware can be obtained "
2134 api_max, api_ver); 2124 "from http://www.intellinuxwireless.org.\n",
2125 api_max, api_ver);
2126 }
2135 2127
2136 if (build) 2128 if (build)
2137 sprintf(buildstr, " build %u%s", build, 2129 sprintf(buildstr, " build %u%s", build,
@@ -2256,13 +2248,15 @@ static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context)
2256 if (pieces.init_evtlog_size) 2248 if (pieces.init_evtlog_size)
2257 priv->_agn.init_evtlog_size = (pieces.init_evtlog_size - 16)/12; 2249 priv->_agn.init_evtlog_size = (pieces.init_evtlog_size - 16)/12;
2258 else 2250 else
2259 priv->_agn.init_evtlog_size = priv->cfg->max_event_log_size; 2251 priv->_agn.init_evtlog_size =
2252 priv->cfg->base_params->max_event_log_size;
2260 priv->_agn.init_errlog_ptr = pieces.init_errlog_ptr; 2253 priv->_agn.init_errlog_ptr = pieces.init_errlog_ptr;
2261 priv->_agn.inst_evtlog_ptr = pieces.inst_evtlog_ptr; 2254 priv->_agn.inst_evtlog_ptr = pieces.inst_evtlog_ptr;
2262 if (pieces.inst_evtlog_size) 2255 if (pieces.inst_evtlog_size)
2263 priv->_agn.inst_evtlog_size = (pieces.inst_evtlog_size - 16)/12; 2256 priv->_agn.inst_evtlog_size = (pieces.inst_evtlog_size - 16)/12;
2264 else 2257 else
2265 priv->_agn.inst_evtlog_size = priv->cfg->max_event_log_size; 2258 priv->_agn.inst_evtlog_size =
2259 priv->cfg->base_params->max_event_log_size;
2266 priv->_agn.inst_errlog_ptr = pieces.inst_errlog_ptr; 2260 priv->_agn.inst_errlog_ptr = pieces.inst_errlog_ptr;
2267 2261
2268 if (ucode_capa.pan) { 2262 if (ucode_capa.pan) {
@@ -2467,6 +2461,7 @@ void iwl_dump_nic_error_log(struct iwl_priv *priv)
2467 } 2461 }
2468 2462
2469 desc = iwl_read_targ_mem(priv, base + 1 * sizeof(u32)); 2463 desc = iwl_read_targ_mem(priv, base + 1 * sizeof(u32));
2464 priv->isr_stats.err_code = desc;
2470 pc = iwl_read_targ_mem(priv, base + 2 * sizeof(u32)); 2465 pc = iwl_read_targ_mem(priv, base + 2 * sizeof(u32));
2471 blink1 = iwl_read_targ_mem(priv, base + 3 * sizeof(u32)); 2466 blink1 = iwl_read_targ_mem(priv, base + 3 * sizeof(u32));
2472 blink2 = iwl_read_targ_mem(priv, base + 4 * sizeof(u32)); 2467 blink2 = iwl_read_targ_mem(priv, base + 4 * sizeof(u32));
@@ -2731,7 +2726,7 @@ static void iwl_rf_kill_ct_config(struct iwl_priv *priv)
2731 spin_unlock_irqrestore(&priv->lock, flags); 2726 spin_unlock_irqrestore(&priv->lock, flags);
2732 priv->thermal_throttle.ct_kill_toggle = false; 2727 priv->thermal_throttle.ct_kill_toggle = false;
2733 2728
2734 if (priv->cfg->support_ct_kill_exit) { 2729 if (priv->cfg->base_params->support_ct_kill_exit) {
2735 adv_cmd.critical_temperature_enter = 2730 adv_cmd.critical_temperature_enter =
2736 cpu_to_le32(priv->hw_params.ct_kill_threshold); 2731 cpu_to_le32(priv->hw_params.ct_kill_threshold);
2737 adv_cmd.critical_temperature_exit = 2732 adv_cmd.critical_temperature_exit =
@@ -2764,6 +2759,23 @@ static void iwl_rf_kill_ct_config(struct iwl_priv *priv)
2764 } 2759 }
2765} 2760}
2766 2761
2762static int iwlagn_send_calib_cfg_rt(struct iwl_priv *priv, u32 cfg)
2763{
2764 struct iwl_calib_cfg_cmd calib_cfg_cmd;
2765 struct iwl_host_cmd cmd = {
2766 .id = CALIBRATION_CFG_CMD,
2767 .len = sizeof(struct iwl_calib_cfg_cmd),
2768 .data = &calib_cfg_cmd,
2769 };
2770
2771 memset(&calib_cfg_cmd, 0, sizeof(calib_cfg_cmd));
2772 calib_cfg_cmd.ucd_calib_cfg.once.is_enable = IWL_CALIB_INIT_CFG_ALL;
2773 calib_cfg_cmd.ucd_calib_cfg.once.start = cpu_to_le32(cfg);
2774
2775 return iwl_send_cmd(priv, &cmd);
2776}
2777
2778
2767/** 2779/**
2768 * iwl_alive_start - called after REPLY_ALIVE notification received 2780 * iwl_alive_start - called after REPLY_ALIVE notification received
2769 * from protocol/runtime uCode (initialization uCode's 2781 * from protocol/runtime uCode (initialization uCode's
@@ -2800,6 +2812,7 @@ static void iwl_alive_start(struct iwl_priv *priv)
2800 goto restart; 2812 goto restart;
2801 } 2813 }
2802 2814
2815
2803 /* After the ALIVE response, we can send host commands to the uCode */ 2816 /* After the ALIVE response, we can send host commands to the uCode */
2804 set_bit(STATUS_ALIVE, &priv->status); 2817 set_bit(STATUS_ALIVE, &priv->status);
2805 2818
@@ -2807,12 +2820,33 @@ static void iwl_alive_start(struct iwl_priv *priv)
2807 /* Enable timer to monitor the driver queues */ 2820 /* Enable timer to monitor the driver queues */
2808 mod_timer(&priv->monitor_recover, 2821 mod_timer(&priv->monitor_recover,
2809 jiffies + 2822 jiffies +
2810 msecs_to_jiffies(priv->cfg->monitor_recover_period)); 2823 msecs_to_jiffies(
2824 priv->cfg->base_params->monitor_recover_period));
2811 } 2825 }
2812 2826
2813 if (iwl_is_rfkill(priv)) 2827 if (iwl_is_rfkill(priv))
2814 return; 2828 return;
2815 2829
2830 /* download priority table before any calibration request */
2831 if (priv->cfg->bt_params &&
2832 priv->cfg->bt_params->advanced_bt_coexist) {
2833 /* Configure Bluetooth device coexistence support */
2834 priv->bt_valid = IWLAGN_BT_ALL_VALID_MSK;
2835 priv->kill_ack_mask = IWLAGN_BT_KILL_ACK_MASK_DEFAULT;
2836 priv->kill_cts_mask = IWLAGN_BT_KILL_CTS_MASK_DEFAULT;
2837 priv->cfg->ops->hcmd->send_bt_config(priv);
2838 priv->bt_valid = IWLAGN_BT_VALID_ENABLE_FLAGS;
2839 iwlagn_send_prio_tbl(priv);
2840
2841 /* FIXME: w/a to force change uCode BT state machine */
2842 iwlagn_send_bt_env(priv, IWL_BT_COEX_ENV_OPEN,
2843 BT_COEX_PRIO_TBL_EVT_INIT_CALIB2);
2844 iwlagn_send_bt_env(priv, IWL_BT_COEX_ENV_CLOSE,
2845 BT_COEX_PRIO_TBL_EVT_INIT_CALIB2);
2846 }
2847 if (priv->hw_params.calib_rt_cfg)
2848 iwlagn_send_calib_cfg_rt(priv, priv->hw_params.calib_rt_cfg);
2849
2816 ieee80211_wake_queues(priv->hw); 2850 ieee80211_wake_queues(priv->hw);
2817 2851
2818 priv->active_rate = IWL_RATES_MASK; 2852 priv->active_rate = IWL_RATES_MASK;
@@ -2837,7 +2871,8 @@ static void iwl_alive_start(struct iwl_priv *priv)
2837 priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx); 2871 priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
2838 } 2872 }
2839 2873
2840 if (!priv->cfg->advanced_bt_coexist) { 2874 if (priv->cfg->bt_params &&
2875 !priv->cfg->bt_params->advanced_bt_coexist) {
2841 /* Configure Bluetooth device coexistence support */ 2876 /* Configure Bluetooth device coexistence support */
2842 priv->cfg->ops->hcmd->send_bt_config(priv); 2877 priv->cfg->ops->hcmd->send_bt_config(priv);
2843 } 2878 }
@@ -2875,8 +2910,9 @@ static void __iwl_down(struct iwl_priv *priv)
2875 2910
2876 IWL_DEBUG_INFO(priv, DRV_NAME " is going down\n"); 2911 IWL_DEBUG_INFO(priv, DRV_NAME " is going down\n");
2877 2912
2878 if (!exit_pending) 2913 iwl_scan_cancel_timeout(priv, 200);
2879 set_bit(STATUS_EXIT_PENDING, &priv->status); 2914
2915 exit_pending = test_and_set_bit(STATUS_EXIT_PENDING, &priv->status);
2880 2916
2881 /* Stop TX queues watchdog. We need to have STATUS_EXIT_PENDING bit set 2917 /* Stop TX queues watchdog. We need to have STATUS_EXIT_PENDING bit set
2882 * to prevent rearm timer */ 2918 * to prevent rearm timer */
@@ -2889,7 +2925,11 @@ static void __iwl_down(struct iwl_priv *priv)
2889 2925
2890 /* reset BT coex data */ 2926 /* reset BT coex data */
2891 priv->bt_status = 0; 2927 priv->bt_status = 0;
2892 priv->bt_traffic_load = priv->cfg->bt_init_traffic_load; 2928 if (priv->cfg->bt_params)
2929 priv->bt_traffic_load =
2930 priv->cfg->bt_params->bt_init_traffic_load;
2931 else
2932 priv->bt_traffic_load = 0;
2893 priv->bt_sco_active = false; 2933 priv->bt_sco_active = false;
2894 priv->bt_full_concurrent = false; 2934 priv->bt_full_concurrent = false;
2895 priv->bt_ci_compliance = 0; 2935 priv->bt_ci_compliance = 0;
@@ -2951,14 +2991,13 @@ static void __iwl_down(struct iwl_priv *priv)
2951 iwl_clear_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 2991 iwl_clear_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
2952 2992
2953 /* Stop the device, and put it in low power state */ 2993 /* Stop the device, and put it in low power state */
2954 priv->cfg->ops->lib->apm_ops.stop(priv); 2994 iwl_apm_stop(priv);
2955 2995
2956 exit: 2996 exit:
2957 memset(&priv->card_alive, 0, sizeof(struct iwl_alive_resp)); 2997 memset(&priv->card_alive, 0, sizeof(struct iwl_alive_resp));
2958 2998
2959 if (priv->ibss_beacon) 2999 dev_kfree_skb(priv->beacon_skb);
2960 dev_kfree_skb(priv->ibss_beacon); 3000 priv->beacon_skb = NULL;
2961 priv->ibss_beacon = NULL;
2962 3001
2963 /* clear out any free frames */ 3002 /* clear out any free frames */
2964 iwl_clear_free_frames(priv); 3003 iwl_clear_free_frames(priv);
@@ -3041,7 +3080,7 @@ static int __iwl_up(struct iwl_priv *priv)
3041 } 3080 }
3042 3081
3043 for_each_context(priv, ctx) { 3082 for_each_context(priv, ctx) {
3044 ret = iwl_alloc_bcast_station(priv, ctx, true); 3083 ret = iwlagn_alloc_bcast_station(priv, ctx);
3045 if (ret) { 3084 if (ret) {
3046 iwl_dealloc_bcast_stations(priv); 3085 iwl_dealloc_bcast_stations(priv);
3047 return ret; 3086 return ret;
@@ -3183,7 +3222,8 @@ static void iwl_bg_run_time_calib_work(struct work_struct *work)
3183 } 3222 }
3184 3223
3185 if (priv->start_calib) { 3224 if (priv->start_calib) {
3186 if (priv->cfg->bt_statistics) { 3225 if (priv->cfg->bt_params &&
3226 priv->cfg->bt_params->bt_statistics) {
3187 iwl_chain_noise_calibration(priv, 3227 iwl_chain_noise_calibration(priv,
3188 (void *)&priv->_agn.statistics_bt); 3228 (void *)&priv->_agn.statistics_bt);
3189 iwl_sensitivity_calibration(priv, 3229 iwl_sensitivity_calibration(priv,
@@ -3382,7 +3422,7 @@ static int iwl_mac_setup_register(struct iwl_priv *priv,
3382 IEEE80211_HW_NEED_DTIM_PERIOD | 3422 IEEE80211_HW_NEED_DTIM_PERIOD |
3383 IEEE80211_HW_SPECTRUM_MGMT; 3423 IEEE80211_HW_SPECTRUM_MGMT;
3384 3424
3385 if (!priv->cfg->broken_powersave) 3425 if (!priv->cfg->base_params->broken_powersave)
3386 hw->flags |= IEEE80211_HW_SUPPORTS_PS | 3426 hw->flags |= IEEE80211_HW_SUPPORTS_PS |
3387 IEEE80211_HW_SUPPORTS_DYNAMIC_PS; 3427 IEEE80211_HW_SUPPORTS_DYNAMIC_PS;
3388 3428
@@ -3486,15 +3526,6 @@ static void iwl_mac_stop(struct ieee80211_hw *hw)
3486 3526
3487 priv->is_open = 0; 3527 priv->is_open = 0;
3488 3528
3489 if (iwl_is_ready_rf(priv) || test_bit(STATUS_SCAN_HW, &priv->status)) {
3490 /* stop mac, cancel any scan request and clear
3491 * RXON_FILTER_ASSOC_MSK BIT
3492 */
3493 mutex_lock(&priv->mutex);
3494 iwl_scan_cancel_timeout(priv, 100);
3495 mutex_unlock(&priv->mutex);
3496 }
3497
3498 iwl_down(priv); 3529 iwl_down(priv);
3499 3530
3500 flush_workqueue(priv->workqueue); 3531 flush_workqueue(priv->workqueue);
@@ -3716,7 +3747,8 @@ static int iwl_mac_ampdu_action(struct ieee80211_hw *hw,
3716 } 3747 }
3717 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) 3748 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
3718 ret = 0; 3749 ret = 0;
3719 if (priv->cfg->use_rts_for_aggregation) { 3750 if (priv->cfg->ht_params &&
3751 priv->cfg->ht_params->use_rts_for_aggregation) {
3720 struct iwl_station_priv *sta_priv = 3752 struct iwl_station_priv *sta_priv =
3721 (void *) sta->drv_priv; 3753 (void *) sta->drv_priv;
3722 /* 3754 /*
@@ -3730,7 +3762,8 @@ static int iwl_mac_ampdu_action(struct ieee80211_hw *hw,
3730 } 3762 }
3731 break; 3763 break;
3732 case IEEE80211_AMPDU_TX_OPERATIONAL: 3764 case IEEE80211_AMPDU_TX_OPERATIONAL:
3733 if (priv->cfg->use_rts_for_aggregation) { 3765 if (priv->cfg->ht_params &&
3766 priv->cfg->ht_params->use_rts_for_aggregation) {
3734 struct iwl_station_priv *sta_priv = 3767 struct iwl_station_priv *sta_priv =
3735 (void *) sta->drv_priv; 3768 (void *) sta->drv_priv;
3736 3769
@@ -4048,7 +4081,7 @@ static void iwl_setup_deferred_work(struct iwl_priv *priv)
4048 priv->cfg->ops->lib->recover_from_tx_stall; 4081 priv->cfg->ops->lib->recover_from_tx_stall;
4049 } 4082 }
4050 4083
4051 if (!priv->cfg->use_isr_legacy) 4084 if (!priv->cfg->base_params->use_isr_legacy)
4052 tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long)) 4085 tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long))
4053 iwl_irq_tasklet, (unsigned long)priv); 4086 iwl_irq_tasklet, (unsigned long)priv);
4054 else 4087 else
@@ -4062,13 +4095,15 @@ static void iwl_cancel_deferred_work(struct iwl_priv *priv)
4062 priv->cfg->ops->lib->cancel_deferred_work(priv); 4095 priv->cfg->ops->lib->cancel_deferred_work(priv);
4063 4096
4064 cancel_delayed_work_sync(&priv->init_alive_start); 4097 cancel_delayed_work_sync(&priv->init_alive_start);
4065 cancel_delayed_work(&priv->scan_check);
4066 cancel_work_sync(&priv->start_internal_scan);
4067 cancel_delayed_work(&priv->alive_start); 4098 cancel_delayed_work(&priv->alive_start);
4068 cancel_work_sync(&priv->run_time_calib_work); 4099 cancel_work_sync(&priv->run_time_calib_work);
4069 cancel_work_sync(&priv->beacon_update); 4100 cancel_work_sync(&priv->beacon_update);
4101
4102 iwl_cancel_scan_deferred_work(priv);
4103
4070 cancel_work_sync(&priv->bt_full_concurrency); 4104 cancel_work_sync(&priv->bt_full_concurrency);
4071 cancel_work_sync(&priv->bt_runtime_config); 4105 cancel_work_sync(&priv->bt_runtime_config);
4106
4072 del_timer_sync(&priv->statistics_periodic); 4107 del_timer_sync(&priv->statistics_periodic);
4073 del_timer_sync(&priv->ucode_trace); 4108 del_timer_sync(&priv->ucode_trace);
4074} 4109}
@@ -4098,8 +4133,6 @@ static int iwl_init_drv(struct iwl_priv *priv)
4098{ 4133{
4099 int ret; 4134 int ret;
4100 4135
4101 priv->ibss_beacon = NULL;
4102
4103 spin_lock_init(&priv->sta_lock); 4136 spin_lock_init(&priv->sta_lock);
4104 spin_lock_init(&priv->hcmd_lock); 4137 spin_lock_init(&priv->hcmd_lock);
4105 4138
@@ -4131,7 +4164,8 @@ static int iwl_init_drv(struct iwl_priv *priv)
4131 iwl_init_scan_params(priv); 4164 iwl_init_scan_params(priv);
4132 4165
4133 /* init bt coex */ 4166 /* init bt coex */
4134 if (priv->cfg->advanced_bt_coexist) { 4167 if (priv->cfg->bt_params &&
4168 priv->cfg->bt_params->advanced_bt_coexist) {
4135 priv->kill_ack_mask = IWLAGN_BT_KILL_ACK_MASK_DEFAULT; 4169 priv->kill_ack_mask = IWLAGN_BT_KILL_ACK_MASK_DEFAULT;
4136 priv->kill_cts_mask = IWLAGN_BT_KILL_CTS_MASK_DEFAULT; 4170 priv->kill_cts_mask = IWLAGN_BT_KILL_CTS_MASK_DEFAULT;
4137 priv->bt_valid = IWLAGN_BT_ALL_VALID_MSK; 4171 priv->bt_valid = IWLAGN_BT_ALL_VALID_MSK;
@@ -4262,9 +4296,8 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
4262 /* Disabling hardware scan means that mac80211 will perform scans 4296 /* Disabling hardware scan means that mac80211 will perform scans
4263 * "the hard way", rather than using device's scan. */ 4297 * "the hard way", rather than using device's scan. */
4264 if (cfg->mod_params->disable_hw_scan) { 4298 if (cfg->mod_params->disable_hw_scan) {
4265 if (iwl_debug_level & IWL_DL_INFO) 4299 dev_printk(KERN_DEBUG, &(pdev->dev),
4266 dev_printk(KERN_DEBUG, &(pdev->dev), 4300 "sw scan support is deprecated\n");
4267 "Disabling hw_scan\n");
4268 iwl_hw_ops.hw_scan = NULL; 4301 iwl_hw_ops.hw_scan = NULL;
4269 } 4302 }
4270 4303
@@ -4286,6 +4319,8 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
4286 for (i = 0; i < NUM_IWL_RXON_CTX; i++) 4319 for (i = 0; i < NUM_IWL_RXON_CTX; i++)
4287 priv->contexts[i].ctxid = i; 4320 priv->contexts[i].ctxid = i;
4288 4321
4322 priv->contexts[IWL_RXON_CTX_BSS].always_active = true;
4323 priv->contexts[IWL_RXON_CTX_BSS].is_active = true;
4289 priv->contexts[IWL_RXON_CTX_BSS].rxon_cmd = REPLY_RXON; 4324 priv->contexts[IWL_RXON_CTX_BSS].rxon_cmd = REPLY_RXON;
4290 priv->contexts[IWL_RXON_CTX_BSS].rxon_timing_cmd = REPLY_RXON_TIMING; 4325 priv->contexts[IWL_RXON_CTX_BSS].rxon_timing_cmd = REPLY_RXON_TIMING;
4291 priv->contexts[IWL_RXON_CTX_BSS].rxon_assoc_cmd = REPLY_RXON_ASSOC; 4326 priv->contexts[IWL_RXON_CTX_BSS].rxon_assoc_cmd = REPLY_RXON_ASSOC;
@@ -4567,7 +4602,7 @@ static void __devexit iwl_pci_remove(struct pci_dev *pdev)
4567 * paths to avoid running iwl_down() at all before leaving driver. 4602 * paths to avoid running iwl_down() at all before leaving driver.
4568 * This (inexpensive) call *makes sure* device is reset. 4603 * This (inexpensive) call *makes sure* device is reset.
4569 */ 4604 */
4570 priv->cfg->ops->lib->apm_ops.stop(priv); 4605 iwl_apm_stop(priv);
4571 4606
4572 iwl_tt_exit(priv); 4607 iwl_tt_exit(priv);
4573 4608
@@ -4610,8 +4645,7 @@ static void __devexit iwl_pci_remove(struct pci_dev *pdev)
4610 4645
4611 iwl_free_isr_ict(priv); 4646 iwl_free_isr_ict(priv);
4612 4647
4613 if (priv->ibss_beacon) 4648 dev_kfree_skb(priv->beacon_skb);
4614 dev_kfree_skb(priv->ibss_beacon);
4615 4649
4616 ieee80211_free_hw(priv->hw); 4650 ieee80211_free_hw(priv->hw);
4617} 4651}
@@ -4775,6 +4809,22 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
4775 {IWL_PCI_DEVICE(0x0083, 0x1326, iwl1000_bg_cfg)}, 4809 {IWL_PCI_DEVICE(0x0083, 0x1326, iwl1000_bg_cfg)},
4776 {IWL_PCI_DEVICE(0x0084, 0x1216, iwl1000_bg_cfg)}, 4810 {IWL_PCI_DEVICE(0x0084, 0x1216, iwl1000_bg_cfg)},
4777 {IWL_PCI_DEVICE(0x0084, 0x1316, iwl1000_bg_cfg)}, 4811 {IWL_PCI_DEVICE(0x0084, 0x1316, iwl1000_bg_cfg)},
4812
4813/* 100 Series WiFi */
4814 {IWL_PCI_DEVICE(0x08AE, 0x1005, iwl100_bgn_cfg)},
4815 {IWL_PCI_DEVICE(0x08AF, 0x1015, iwl100_bgn_cfg)},
4816 {IWL_PCI_DEVICE(0x08AE, 0x1025, iwl100_bgn_cfg)},
4817 {IWL_PCI_DEVICE(0x08AE, 0x1007, iwl100_bg_cfg)},
4818 {IWL_PCI_DEVICE(0x08AE, 0x1017, iwl100_bg_cfg)},
4819
4820/* 130 Series WiFi */
4821 {IWL_PCI_DEVICE(0x0896, 0x5005, iwl130_bgn_cfg)},
4822 {IWL_PCI_DEVICE(0x0896, 0x5007, iwl130_bg_cfg)},
4823 {IWL_PCI_DEVICE(0x0897, 0x5015, iwl130_bgn_cfg)},
4824 {IWL_PCI_DEVICE(0x0897, 0x5017, iwl130_bg_cfg)},
4825 {IWL_PCI_DEVICE(0x0896, 0x5025, iwl130_bgn_cfg)},
4826 {IWL_PCI_DEVICE(0x0896, 0x5027, iwl130_bg_cfg)},
4827
4778#endif /* CONFIG_IWL5000 */ 4828#endif /* CONFIG_IWL5000 */
4779 4829
4780 {0} 4830 {0}
@@ -4863,7 +4913,8 @@ module_param_named(fw_restart, iwlagn_mod_params.restart_fw, int, S_IRUGO);
4863MODULE_PARM_DESC(fw_restart, "restart firmware in case of error"); 4913MODULE_PARM_DESC(fw_restart, "restart firmware in case of error");
4864module_param_named( 4914module_param_named(
4865 disable_hw_scan, iwlagn_mod_params.disable_hw_scan, int, S_IRUGO); 4915 disable_hw_scan, iwlagn_mod_params.disable_hw_scan, int, S_IRUGO);
4866MODULE_PARM_DESC(disable_hw_scan, "disable hardware scanning (default 0)"); 4916MODULE_PARM_DESC(disable_hw_scan,
4917 "disable hardware scanning (default 0) (deprecated)");
4867 4918
4868module_param_named(ucode_alternative, iwlagn_wanted_ucode_alternative, int, 4919module_param_named(ucode_alternative, iwlagn_wanted_ucode_alternative, int,
4869 S_IRUGO); 4920 S_IRUGO);
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.h b/drivers/net/wireless/iwlwifi/iwl-agn.h
index 7c542a8c8f81..f525d55f2c0f 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn.h
+++ b/drivers/net/wireless/iwlwifi/iwl-agn.h
@@ -92,6 +92,10 @@ extern struct iwl_cfg iwl6050_2abg_cfg;
92extern struct iwl_cfg iwl6050g2_bgn_cfg; 92extern struct iwl_cfg iwl6050g2_bgn_cfg;
93extern struct iwl_cfg iwl1000_bgn_cfg; 93extern struct iwl_cfg iwl1000_bgn_cfg;
94extern struct iwl_cfg iwl1000_bg_cfg; 94extern struct iwl_cfg iwl1000_bg_cfg;
95extern struct iwl_cfg iwl100_bgn_cfg;
96extern struct iwl_cfg iwl100_bg_cfg;
97extern struct iwl_cfg iwl130_bgn_cfg;
98extern struct iwl_cfg iwl130_bg_cfg;
95 99
96extern struct iwl_mod_params iwlagn_mod_params; 100extern struct iwl_mod_params iwlagn_mod_params;
97extern struct iwl_hcmd_ops iwlagn_hcmd; 101extern struct iwl_hcmd_ops iwlagn_hcmd;
@@ -125,6 +129,10 @@ void iwlagn_txq_set_sched(struct iwl_priv *priv, u32 mask);
125void iwl_free_tfds_in_queue(struct iwl_priv *priv, 129void iwl_free_tfds_in_queue(struct iwl_priv *priv,
126 int sta_id, int tid, int freed); 130 int sta_id, int tid, int freed);
127 131
132/* RXON */
133int iwlagn_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx);
134void iwlagn_set_rxon_chain(struct iwl_priv *priv, struct iwl_rxon_context *ctx);
135
128/* uCode */ 136/* uCode */
129int iwlagn_load_ucode(struct iwl_priv *priv); 137int iwlagn_load_ucode(struct iwl_priv *priv);
130void iwlagn_rx_calib_result(struct iwl_priv *priv, 138void iwlagn_rx_calib_result(struct iwl_priv *priv,
@@ -134,6 +142,8 @@ void iwlagn_rx_calib_complete(struct iwl_priv *priv,
134void iwlagn_init_alive_start(struct iwl_priv *priv); 142void iwlagn_init_alive_start(struct iwl_priv *priv);
135int iwlagn_alive_notify(struct iwl_priv *priv); 143int iwlagn_alive_notify(struct iwl_priv *priv);
136int iwl_verify_ucode(struct iwl_priv *priv); 144int iwl_verify_ucode(struct iwl_priv *priv);
145void iwlagn_send_bt_env(struct iwl_priv *priv, u8 action, u8 type);
146void iwlagn_send_prio_tbl(struct iwl_priv *priv);
137 147
138/* lib */ 148/* lib */
139void iwl_check_abort_status(struct iwl_priv *priv, 149void iwl_check_abort_status(struct iwl_priv *priv,
@@ -152,6 +162,8 @@ int iwlagn_hw_nic_init(struct iwl_priv *priv);
152int iwlagn_wait_tx_queue_empty(struct iwl_priv *priv); 162int iwlagn_wait_tx_queue_empty(struct iwl_priv *priv);
153int iwlagn_txfifo_flush(struct iwl_priv *priv, u16 flush_control); 163int iwlagn_txfifo_flush(struct iwl_priv *priv, u16 flush_control);
154void iwlagn_dev_txfifo_flush(struct iwl_priv *priv, u16 flush_control); 164void iwlagn_dev_txfifo_flush(struct iwl_priv *priv, u16 flush_control);
165void iwl_dump_csr(struct iwl_priv *priv);
166int iwl_dump_fh(struct iwl_priv *priv, char **buf, bool display);
155 167
156/* rx */ 168/* rx */
157void iwlagn_rx_queue_restock(struct iwl_priv *priv); 169void iwlagn_rx_queue_restock(struct iwl_priv *priv);
@@ -165,8 +177,15 @@ void iwlagn_rx_reply_rx(struct iwl_priv *priv,
165 struct iwl_rx_mem_buffer *rxb); 177 struct iwl_rx_mem_buffer *rxb);
166void iwlagn_rx_reply_rx_phy(struct iwl_priv *priv, 178void iwlagn_rx_reply_rx_phy(struct iwl_priv *priv,
167 struct iwl_rx_mem_buffer *rxb); 179 struct iwl_rx_mem_buffer *rxb);
180void iwl_rx_handle(struct iwl_priv *priv);
168 181
169/* tx */ 182/* tx */
183void iwl_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq);
184int iwl_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv,
185 struct iwl_tx_queue *txq,
186 dma_addr_t addr, u16 len, u8 reset, u8 pad);
187int iwl_hw_tx_queue_init(struct iwl_priv *priv,
188 struct iwl_tx_queue *txq);
170void iwlagn_hwrate_to_tx_control(struct iwl_priv *priv, u32 rate_n_flags, 189void iwlagn_hwrate_to_tx_control(struct iwl_priv *priv, u32 rate_n_flags,
171 struct ieee80211_tx_info *info); 190 struct ieee80211_tx_info *info);
172int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb); 191int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb);
@@ -206,6 +225,8 @@ static inline bool iwl_is_tx_success(u32 status)
206 (status == TX_STATUS_DIRECT_DONE); 225 (status == TX_STATUS_DIRECT_DONE);
207} 226}
208 227
228u8 iwl_toggle_tx_ant(struct iwl_priv *priv, u8 ant_idx, u8 valid);
229
209/* rx */ 230/* rx */
210void iwl_rx_missed_beacon_notif(struct iwl_priv *priv, 231void iwl_rx_missed_beacon_notif(struct iwl_priv *priv,
211 struct iwl_rx_mem_buffer *rxb); 232 struct iwl_rx_mem_buffer *rxb);
@@ -217,7 +238,8 @@ void iwl_reply_statistics(struct iwl_priv *priv,
217 struct iwl_rx_mem_buffer *rxb); 238 struct iwl_rx_mem_buffer *rxb);
218 239
219/* scan */ 240/* scan */
220void iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif); 241int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif);
242void iwlagn_post_scan(struct iwl_priv *priv);
221 243
222/* station mgmt */ 244/* station mgmt */
223int iwlagn_manage_ibss_station(struct iwl_priv *priv, 245int iwlagn_manage_ibss_station(struct iwl_priv *priv,
@@ -236,4 +258,64 @@ void iwlagn_bt_rx_handler_setup(struct iwl_priv *priv);
236void iwlagn_bt_setup_deferred_work(struct iwl_priv *priv); 258void iwlagn_bt_setup_deferred_work(struct iwl_priv *priv);
237void iwlagn_bt_cancel_deferred_work(struct iwl_priv *priv); 259void iwlagn_bt_cancel_deferred_work(struct iwl_priv *priv);
238 260
261#ifdef CONFIG_IWLWIFI_DEBUG
262const char *iwl_get_tx_fail_reason(u32 status);
263const char *iwl_get_agg_tx_fail_reason(u16 status);
264#else
265static inline const char *iwl_get_tx_fail_reason(u32 status) { return ""; }
266static inline const char *iwl_get_agg_tx_fail_reason(u16 status) { return ""; }
267#endif
268
269/* station management */
270int iwlagn_alloc_bcast_station(struct iwl_priv *priv,
271 struct iwl_rxon_context *ctx);
272int iwlagn_add_bssid_station(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
273 const u8 *addr, u8 *sta_id_r);
274int iwl_remove_default_wep_key(struct iwl_priv *priv,
275 struct iwl_rxon_context *ctx,
276 struct ieee80211_key_conf *key);
277int iwl_set_default_wep_key(struct iwl_priv *priv,
278 struct iwl_rxon_context *ctx,
279 struct ieee80211_key_conf *key);
280int iwl_restore_default_wep_keys(struct iwl_priv *priv,
281 struct iwl_rxon_context *ctx);
282int iwl_set_dynamic_key(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
283 struct ieee80211_key_conf *key, u8 sta_id);
284int iwl_remove_dynamic_key(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
285 struct ieee80211_key_conf *key, u8 sta_id);
286void iwl_update_tkip_key(struct iwl_priv *priv,
287 struct iwl_rxon_context *ctx,
288 struct ieee80211_key_conf *keyconf,
289 struct ieee80211_sta *sta, u32 iv32, u16 *phase1key);
290int iwl_sta_tx_modify_enable_tid(struct iwl_priv *priv, int sta_id, int tid);
291int iwl_sta_rx_agg_start(struct iwl_priv *priv, struct ieee80211_sta *sta,
292 int tid, u16 ssn);
293int iwl_sta_rx_agg_stop(struct iwl_priv *priv, struct ieee80211_sta *sta,
294 int tid);
295void iwl_sta_modify_ps_wake(struct iwl_priv *priv, int sta_id);
296void iwl_sta_modify_sleep_tx_count(struct iwl_priv *priv, int sta_id, int cnt);
297int iwl_update_bcast_stations(struct iwl_priv *priv);
298
299/* rate */
300static inline u32 iwl_ant_idx_to_flags(u8 ant_idx)
301{
302 return BIT(ant_idx) << RATE_MCS_ANT_POS;
303}
304
305static inline u8 iwl_hw_get_rate(__le32 rate_n_flags)
306{
307 return le32_to_cpu(rate_n_flags) & 0xFF;
308}
309
310static inline __le32 iwl_hw_set_rate_n_flags(u8 rate, u32 flags)
311{
312 return cpu_to_le32(flags|(u32)rate);
313}
314
315/* eeprom */
316void iwlcore_eeprom_enhanced_txpower(struct iwl_priv *priv);
317void iwl_eeprom_get_mac(const struct iwl_priv *priv, u8 *mac);
318int iwlcore_eeprom_acquire_semaphore(struct iwl_priv *priv);
319void iwlcore_eeprom_release_semaphore(struct iwl_priv *priv);
320
239#endif /* __iwl_agn_h__ */ 321#endif /* __iwl_agn_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-commands.h b/drivers/net/wireless/iwlwifi/iwl-commands.h
index 3e4ba31b5d59..424801abc80e 100644
--- a/drivers/net/wireless/iwlwifi/iwl-commands.h
+++ b/drivers/net/wireless/iwlwifi/iwl-commands.h
@@ -420,12 +420,12 @@ struct iwl4965_tx_power_db {
420 420
421/** 421/**
422 * Command REPLY_TX_POWER_DBM_CMD = 0x98 422 * Command REPLY_TX_POWER_DBM_CMD = 0x98
423 * struct iwl5000_tx_power_dbm_cmd 423 * struct iwlagn_tx_power_dbm_cmd
424 */ 424 */
425#define IWL50_TX_POWER_AUTO 0x7f 425#define IWLAGN_TX_POWER_AUTO 0x7f
426#define IWL50_TX_POWER_NO_CLOSED (0x1 << 6) 426#define IWLAGN_TX_POWER_NO_CLOSED (0x1 << 6)
427 427
428struct iwl5000_tx_power_dbm_cmd { 428struct iwlagn_tx_power_dbm_cmd {
429 s8 global_lmt; /*in half-dBm (e.g. 30 = 15 dBm) */ 429 s8 global_lmt; /*in half-dBm (e.g. 30 = 15 dBm) */
430 u8 flags; 430 u8 flags;
431 s8 srv_chan_lmt; /*in half-dBm (e.g. 30 = 15 dBm) */ 431 s8 srv_chan_lmt; /*in half-dBm (e.g. 30 = 15 dBm) */
@@ -1042,7 +1042,7 @@ struct iwl4965_keyinfo {
1042 u8 key[16]; /* 16-byte unicast decryption key */ 1042 u8 key[16]; /* 16-byte unicast decryption key */
1043} __packed; 1043} __packed;
1044 1044
1045/* 5000 */ 1045/* agn */
1046struct iwl_keyinfo { 1046struct iwl_keyinfo {
1047 __le16 key_flags; 1047 __le16 key_flags;
1048 u8 tkip_rx_tsc_byte2; /* TSC[2] for key mix ph1 detection */ 1048 u8 tkip_rx_tsc_byte2; /* TSC[2] for key mix ph1 detection */
@@ -1168,7 +1168,7 @@ struct iwl4965_addsta_cmd {
1168 __le16 reserved2; 1168 __le16 reserved2;
1169} __packed; 1169} __packed;
1170 1170
1171/* 5000 */ 1171/* agn */
1172struct iwl_addsta_cmd { 1172struct iwl_addsta_cmd {
1173 u8 mode; /* 1: modify existing, 0: add new station */ 1173 u8 mode; /* 1: modify existing, 0: add new station */
1174 u8 reserved[3]; 1174 u8 reserved[3];
@@ -1820,13 +1820,8 @@ enum {
1820 TX_STATUS_FAIL_TID_DISABLE = 0x8d, 1820 TX_STATUS_FAIL_TID_DISABLE = 0x8d,
1821 TX_STATUS_FAIL_FIFO_FLUSHED = 0x8e, 1821 TX_STATUS_FAIL_FIFO_FLUSHED = 0x8e,
1822 TX_STATUS_FAIL_INSUFFICIENT_CF_POLL = 0x8f, 1822 TX_STATUS_FAIL_INSUFFICIENT_CF_POLL = 0x8f,
1823 /* uCode drop due to FW drop request */ 1823 TX_STATUS_FAIL_PASSIVE_NO_RX = 0x90,
1824 TX_STATUS_FAIL_FW_DROP = 0x90, 1824 TX_STATUS_FAIL_NO_BEACON_ON_RADAR = 0x91,
1825 /*
1826 * uCode drop due to station color mismatch
1827 * between tx command and station table
1828 */
1829 TX_STATUS_FAIL_STA_COLOR_MISMATCH_DROP = 0x91,
1830}; 1825};
1831 1826
1832#define TX_PACKET_MODE_REGULAR 0x0000 1827#define TX_PACKET_MODE_REGULAR 0x0000
@@ -1868,6 +1863,9 @@ enum {
1868 AGG_TX_STATE_DELAY_TX_MSK = 0x400 1863 AGG_TX_STATE_DELAY_TX_MSK = 0x400
1869}; 1864};
1870 1865
1866#define AGG_TX_STATUS_MSK 0x00000fff /* bits 0:11 */
1867#define AGG_TX_TRY_MSK 0x0000f000 /* bits 12:15 */
1868
1871#define AGG_TX_STATE_LAST_SENT_MSK (AGG_TX_STATE_LAST_SENT_TTL_MSK | \ 1869#define AGG_TX_STATE_LAST_SENT_MSK (AGG_TX_STATE_LAST_SENT_TTL_MSK | \
1872 AGG_TX_STATE_LAST_SENT_TRY_CNT_MSK | \ 1870 AGG_TX_STATE_LAST_SENT_TRY_CNT_MSK | \
1873 AGG_TX_STATE_LAST_SENT_BT_KILL_MSK) 1871 AGG_TX_STATE_LAST_SENT_BT_KILL_MSK)
@@ -1961,12 +1959,12 @@ struct iwl4965_tx_resp {
1961#define IWL50_TX_RES_INV_RATE_INDEX_MSK 0x80 1959#define IWL50_TX_RES_INV_RATE_INDEX_MSK 0x80
1962 1960
1963/* refer to ra_tid */ 1961/* refer to ra_tid */
1964#define IWL50_TX_RES_TID_POS 0 1962#define IWLAGN_TX_RES_TID_POS 0
1965#define IWL50_TX_RES_TID_MSK 0x0f 1963#define IWLAGN_TX_RES_TID_MSK 0x0f
1966#define IWL50_TX_RES_RA_POS 4 1964#define IWLAGN_TX_RES_RA_POS 4
1967#define IWL50_TX_RES_RA_MSK 0xf0 1965#define IWLAGN_TX_RES_RA_MSK 0xf0
1968 1966
1969struct iwl5000_tx_resp { 1967struct iwlagn_tx_resp {
1970 u8 frame_count; /* 1 no aggregation, >1 aggregation */ 1968 u8 frame_count; /* 1 no aggregation, >1 aggregation */
1971 u8 bt_kill_count; /* # blocked by bluetooth (unused for agg) */ 1969 u8 bt_kill_count; /* # blocked by bluetooth (unused for agg) */
1972 u8 failure_rts; /* # failures due to unsuccessful RTS */ 1970 u8 failure_rts; /* # failures due to unsuccessful RTS */
@@ -2488,7 +2486,12 @@ struct iwlagn_bt_cmd {
2488 __le16 bt4_decision_time; /* unused */ 2486 __le16 bt4_decision_time; /* unused */
2489 __le16 valid; 2487 __le16 valid;
2490 u8 prio_boost; 2488 u8 prio_boost;
2491 u8 reserved[3]; 2489 /*
2490 * set IWLAGN_BT_VALID_BOOST to "1" in "valid" bitmask
2491 * if configure the following patterns
2492 */
2493 u8 tx_prio_boost; /* SW boost of WiFi tx priority */
2494 __le16 rx_prio_boost; /* SW boost of WiFi rx priority */
2492}; 2495};
2493 2496
2494#define IWLAGN_BT_SCO_ACTIVE cpu_to_le32(BIT(0)) 2497#define IWLAGN_BT_SCO_ACTIVE cpu_to_le32(BIT(0))
@@ -3781,7 +3784,8 @@ struct iwl_enhance_sensitivity_cmd {
3781 */ 3784 */
3782 3785
3783/* Phy calibration command for series */ 3786/* Phy calibration command for series */
3784 3787/* The default calibrate table size if not specified by firmware */
3788#define IWL_DEFAULT_STANDARD_PHY_CALIBRATE_TBL_SIZE 18
3785enum { 3789enum {
3786 IWL_PHY_CALIBRATE_DIFF_GAIN_CMD = 7, 3790 IWL_PHY_CALIBRATE_DIFF_GAIN_CMD = 7,
3787 IWL_PHY_CALIBRATE_DC_CMD = 8, 3791 IWL_PHY_CALIBRATE_DC_CMD = 8,
@@ -3790,13 +3794,29 @@ enum {
3790 IWL_PHY_CALIBRATE_CRYSTAL_FRQ_CMD = 15, 3794 IWL_PHY_CALIBRATE_CRYSTAL_FRQ_CMD = 15,
3791 IWL_PHY_CALIBRATE_BASE_BAND_CMD = 16, 3795 IWL_PHY_CALIBRATE_BASE_BAND_CMD = 16,
3792 IWL_PHY_CALIBRATE_TX_IQ_PERD_CMD = 17, 3796 IWL_PHY_CALIBRATE_TX_IQ_PERD_CMD = 17,
3793 IWL_MAX_STANDARD_PHY_CALIBRATE_TBL_SIZE = 18, 3797 IWL_PHY_CALIBRATE_TEMP_OFFSET_CMD = 18,
3798 IWL_MAX_STANDARD_PHY_CALIBRATE_TBL_SIZE = 19,
3794}; 3799};
3795 3800
3796#define IWL_MAX_PHY_CALIBRATE_TBL_SIZE (253) 3801#define IWL_MAX_PHY_CALIBRATE_TBL_SIZE (253)
3797 3802
3798#define IWL_CALIB_INIT_CFG_ALL cpu_to_le32(0xffffffff) 3803#define IWL_CALIB_INIT_CFG_ALL cpu_to_le32(0xffffffff)
3799 3804
3805/* This enum defines the bitmap of various calibrations to enable in both
3806 * init ucode and runtime ucode through CALIBRATION_CFG_CMD.
3807 */
3808enum iwl_ucode_calib_cfg {
3809 IWL_CALIB_CFG_RX_BB_IDX,
3810 IWL_CALIB_CFG_DC_IDX,
3811 IWL_CALIB_CFG_TX_IQ_IDX,
3812 IWL_CALIB_CFG_RX_IQ_IDX,
3813 IWL_CALIB_CFG_NOISE_IDX,
3814 IWL_CALIB_CFG_CRYSTAL_IDX,
3815 IWL_CALIB_CFG_TEMPERATURE_IDX,
3816 IWL_CALIB_CFG_PAPD_IDX,
3817};
3818
3819
3800struct iwl_calib_cfg_elmnt_s { 3820struct iwl_calib_cfg_elmnt_s {
3801 __le32 is_enable; 3821 __le32 is_enable;
3802 __le32 start; 3822 __le32 start;
@@ -3845,6 +3865,13 @@ struct iwl_calib_xtal_freq_cmd {
3845 u8 pad[2]; 3865 u8 pad[2];
3846} __packed; 3866} __packed;
3847 3867
3868#define DEFAULT_RADIO_SENSOR_OFFSET 2700
3869struct iwl_calib_temperature_offset_cmd {
3870 struct iwl_calib_hdr hdr;
3871 s16 radio_sensor_offset;
3872 s16 reserved;
3873} __packed;
3874
3848/* IWL_PHY_CALIBRATE_CHAIN_NOISE_RESET_CMD */ 3875/* IWL_PHY_CALIBRATE_CHAIN_NOISE_RESET_CMD */
3849struct iwl_calib_chain_noise_reset_cmd { 3876struct iwl_calib_chain_noise_reset_cmd {
3850 struct iwl_calib_hdr hdr; 3877 struct iwl_calib_hdr hdr;
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.c b/drivers/net/wireless/iwlwifi/iwl-core.c
index 87a2e40972ba..25fb3912342c 100644
--- a/drivers/net/wireless/iwlwifi/iwl-core.c
+++ b/drivers/net/wireless/iwlwifi/iwl-core.c
@@ -69,97 +69,9 @@ EXPORT_SYMBOL_GPL(bt_coex_active);
69module_param(bt_coex_active, bool, S_IRUGO); 69module_param(bt_coex_active, bool, S_IRUGO);
70MODULE_PARM_DESC(bt_coex_active, "enable wifi/bluetooth co-exist"); 70MODULE_PARM_DESC(bt_coex_active, "enable wifi/bluetooth co-exist");
71 71
72#define IWL_DECLARE_RATE_INFO(r, s, ip, in, rp, rn, pp, np) \
73 [IWL_RATE_##r##M_INDEX] = { IWL_RATE_##r##M_PLCP, \
74 IWL_RATE_SISO_##s##M_PLCP, \
75 IWL_RATE_MIMO2_##s##M_PLCP,\
76 IWL_RATE_MIMO3_##s##M_PLCP,\
77 IWL_RATE_##r##M_IEEE, \
78 IWL_RATE_##ip##M_INDEX, \
79 IWL_RATE_##in##M_INDEX, \
80 IWL_RATE_##rp##M_INDEX, \
81 IWL_RATE_##rn##M_INDEX, \
82 IWL_RATE_##pp##M_INDEX, \
83 IWL_RATE_##np##M_INDEX }
84
85u32 iwl_debug_level; 72u32 iwl_debug_level;
86EXPORT_SYMBOL(iwl_debug_level); 73EXPORT_SYMBOL(iwl_debug_level);
87 74
88/*
89 * Parameter order:
90 * rate, ht rate, prev rate, next rate, prev tgg rate, next tgg rate
91 *
92 * If there isn't a valid next or previous rate then INV is used which
93 * maps to IWL_RATE_INVALID
94 *
95 */
96const struct iwl_rate_info iwl_rates[IWL_RATE_COUNT] = {
97 IWL_DECLARE_RATE_INFO(1, INV, INV, 2, INV, 2, INV, 2), /* 1mbps */
98 IWL_DECLARE_RATE_INFO(2, INV, 1, 5, 1, 5, 1, 5), /* 2mbps */
99 IWL_DECLARE_RATE_INFO(5, INV, 2, 6, 2, 11, 2, 11), /*5.5mbps */
100 IWL_DECLARE_RATE_INFO(11, INV, 9, 12, 9, 12, 5, 18), /* 11mbps */
101 IWL_DECLARE_RATE_INFO(6, 6, 5, 9, 5, 11, 5, 11), /* 6mbps */
102 IWL_DECLARE_RATE_INFO(9, 6, 6, 11, 6, 11, 5, 11), /* 9mbps */
103 IWL_DECLARE_RATE_INFO(12, 12, 11, 18, 11, 18, 11, 18), /* 12mbps */
104 IWL_DECLARE_RATE_INFO(18, 18, 12, 24, 12, 24, 11, 24), /* 18mbps */
105 IWL_DECLARE_RATE_INFO(24, 24, 18, 36, 18, 36, 18, 36), /* 24mbps */
106 IWL_DECLARE_RATE_INFO(36, 36, 24, 48, 24, 48, 24, 48), /* 36mbps */
107 IWL_DECLARE_RATE_INFO(48, 48, 36, 54, 36, 54, 36, 54), /* 48mbps */
108 IWL_DECLARE_RATE_INFO(54, 54, 48, INV, 48, INV, 48, INV),/* 54mbps */
109 IWL_DECLARE_RATE_INFO(60, 60, 48, INV, 48, INV, 48, INV),/* 60mbps */
110 /* FIXME:RS: ^^ should be INV (legacy) */
111};
112EXPORT_SYMBOL(iwl_rates);
113
114int iwl_hwrate_to_plcp_idx(u32 rate_n_flags)
115{
116 int idx = 0;
117
118 /* HT rate format */
119 if (rate_n_flags & RATE_MCS_HT_MSK) {
120 idx = (rate_n_flags & 0xff);
121
122 if (idx >= IWL_RATE_MIMO3_6M_PLCP)
123 idx = idx - IWL_RATE_MIMO3_6M_PLCP;
124 else if (idx >= IWL_RATE_MIMO2_6M_PLCP)
125 idx = idx - IWL_RATE_MIMO2_6M_PLCP;
126
127 idx += IWL_FIRST_OFDM_RATE;
128 /* skip 9M not supported in ht*/
129 if (idx >= IWL_RATE_9M_INDEX)
130 idx += 1;
131 if ((idx >= IWL_FIRST_OFDM_RATE) && (idx <= IWL_LAST_OFDM_RATE))
132 return idx;
133
134 /* legacy rate format, search for match in table */
135 } else {
136 for (idx = 0; idx < ARRAY_SIZE(iwl_rates); idx++)
137 if (iwl_rates[idx].plcp == (rate_n_flags & 0xFF))
138 return idx;
139 }
140
141 return -1;
142}
143EXPORT_SYMBOL(iwl_hwrate_to_plcp_idx);
144
145u8 iwl_toggle_tx_ant(struct iwl_priv *priv, u8 ant, u8 valid)
146{
147 int i;
148 u8 ind = ant;
149
150 if (priv->band == IEEE80211_BAND_2GHZ &&
151 priv->bt_traffic_load >= IWL_BT_COEX_TRAFFIC_LOAD_HIGH)
152 return 0;
153
154 for (i = 0; i < RATE_ANT_NUM - 1; i++) {
155 ind = (ind + 1) < RATE_ANT_NUM ? ind + 1 : 0;
156 if (valid & BIT(ind))
157 return ind;
158 }
159 return ant;
160}
161EXPORT_SYMBOL(iwl_toggle_tx_ant);
162
163const u8 iwl_bcast_addr[ETH_ALEN] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }; 75const u8 iwl_bcast_addr[ETH_ALEN] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
164EXPORT_SYMBOL(iwl_bcast_addr); 76EXPORT_SYMBOL(iwl_bcast_addr);
165 77
@@ -196,6 +108,9 @@ static void iwl_update_qos(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
196 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) 108 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
197 return; 109 return;
198 110
111 if (!ctx->is_active)
112 return;
113
199 ctx->qos_data.def_qos_parm.qos_flags = 0; 114 ctx->qos_data.def_qos_parm.qos_flags = 0;
200 115
201 if (ctx->qos_data.qos_active) 116 if (ctx->qos_data.qos_active)
@@ -229,7 +144,8 @@ static void iwlcore_init_ht_hw_capab(const struct iwl_priv *priv,
229 144
230 ht_info->ht_supported = true; 145 ht_info->ht_supported = true;
231 146
232 if (priv->cfg->ht_greenfield_support) 147 if (priv->cfg->ht_params &&
148 priv->cfg->ht_params->ht_greenfield_support)
233 ht_info->cap |= IEEE80211_HT_CAP_GRN_FLD; 149 ht_info->cap |= IEEE80211_HT_CAP_GRN_FLD;
234 ht_info->cap |= IEEE80211_HT_CAP_SGI_20; 150 ht_info->cap |= IEEE80211_HT_CAP_SGI_20;
235 max_bit_rate = MAX_BIT_RATE_20_MHZ; 151 max_bit_rate = MAX_BIT_RATE_20_MHZ;
@@ -244,11 +160,11 @@ static void iwlcore_init_ht_hw_capab(const struct iwl_priv *priv,
244 ht_info->cap |= IEEE80211_HT_CAP_MAX_AMSDU; 160 ht_info->cap |= IEEE80211_HT_CAP_MAX_AMSDU;
245 161
246 ht_info->ampdu_factor = CFG_HT_RX_AMPDU_FACTOR_DEF; 162 ht_info->ampdu_factor = CFG_HT_RX_AMPDU_FACTOR_DEF;
247 if (priv->cfg->ampdu_factor) 163 if (priv->cfg->bt_params && priv->cfg->bt_params->ampdu_factor)
248 ht_info->ampdu_factor = priv->cfg->ampdu_factor; 164 ht_info->ampdu_factor = priv->cfg->bt_params->ampdu_factor;
249 ht_info->ampdu_density = CFG_HT_MPDU_DENSITY_DEF; 165 ht_info->ampdu_density = CFG_HT_MPDU_DENSITY_DEF;
250 if (priv->cfg->ampdu_density) 166 if (priv->cfg->bt_params && priv->cfg->bt_params->ampdu_density)
251 ht_info->ampdu_density = priv->cfg->ampdu_density; 167 ht_info->ampdu_density = priv->cfg->bt_params->ampdu_density;
252 168
253 ht_info->mcs.rx_mask[0] = 0xFF; 169 ht_info->mcs.rx_mask[0] = 0xFF;
254 if (rx_chains_num >= 2) 170 if (rx_chains_num >= 2)
@@ -435,12 +351,6 @@ void iwlcore_tx_cmd_protection(struct iwl_priv *priv,
435EXPORT_SYMBOL(iwlcore_tx_cmd_protection); 351EXPORT_SYMBOL(iwlcore_tx_cmd_protection);
436 352
437 353
438static bool is_single_rx_stream(struct iwl_priv *priv)
439{
440 return priv->current_ht_config.smps == IEEE80211_SMPS_STATIC ||
441 priv->current_ht_config.single_chain_sufficient;
442}
443
444static bool iwl_is_channel_extension(struct iwl_priv *priv, 354static bool iwl_is_channel_extension(struct iwl_priv *priv,
445 enum ieee80211_band band, 355 enum ieee80211_band band,
446 u16 channel, u8 extension_chan_offset) 356 u16 channel, u8 extension_chan_offset)
@@ -488,8 +398,29 @@ EXPORT_SYMBOL(iwl_is_ht40_tx_allowed);
488 398
489static u16 iwl_adjust_beacon_interval(u16 beacon_val, u16 max_beacon_val) 399static u16 iwl_adjust_beacon_interval(u16 beacon_val, u16 max_beacon_val)
490{ 400{
491 u16 new_val = 0; 401 u16 new_val;
492 u16 beacon_factor = 0; 402 u16 beacon_factor;
403
404 /*
405 * If mac80211 hasn't given us a beacon interval, program
406 * the default into the device (not checking this here
407 * would cause the adjustment below to return the maximum
408 * value, which may break PAN.)
409 */
410 if (!beacon_val)
411 return DEFAULT_BEACON_INTERVAL;
412
413 /*
414 * If the beacon interval we obtained from the peer
415 * is too large, we'll have to wake up more often
416 * (and in IBSS case, we'll beacon too much)
417 *
418 * For example, if max_beacon_val is 4096, and the
419 * requested beacon interval is 7000, we'll have to
420 * use 3500 to be able to wake up on the beacons.
421 *
422 * This could badly influence beacon detection stats.
423 */
493 424
494 beacon_factor = (beacon_val + max_beacon_val) / max_beacon_val; 425 beacon_factor = (beacon_val + max_beacon_val) / max_beacon_val;
495 new_val = beacon_val / beacon_factor; 426 new_val = beacon_val / beacon_factor;
@@ -526,10 +457,22 @@ int iwl_send_rxon_timing(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
526 ctx->timing.atim_window = 0; 457 ctx->timing.atim_window = 0;
527 458
528 if (ctx->ctxid == IWL_RXON_CTX_PAN && 459 if (ctx->ctxid == IWL_RXON_CTX_PAN &&
529 (!ctx->vif || ctx->vif->type != NL80211_IFTYPE_STATION)) { 460 (!ctx->vif || ctx->vif->type != NL80211_IFTYPE_STATION) &&
461 iwl_is_associated(priv, IWL_RXON_CTX_BSS) &&
462 priv->contexts[IWL_RXON_CTX_BSS].vif &&
463 priv->contexts[IWL_RXON_CTX_BSS].vif->bss_conf.beacon_int) {
530 ctx->timing.beacon_interval = 464 ctx->timing.beacon_interval =
531 priv->contexts[IWL_RXON_CTX_BSS].timing.beacon_interval; 465 priv->contexts[IWL_RXON_CTX_BSS].timing.beacon_interval;
532 beacon_int = le16_to_cpu(ctx->timing.beacon_interval); 466 beacon_int = le16_to_cpu(ctx->timing.beacon_interval);
467 } else if (ctx->ctxid == IWL_RXON_CTX_BSS &&
468 iwl_is_associated(priv, IWL_RXON_CTX_PAN) &&
469 priv->contexts[IWL_RXON_CTX_PAN].vif &&
470 priv->contexts[IWL_RXON_CTX_PAN].vif->bss_conf.beacon_int &&
471 (!iwl_is_associated_ctx(ctx) || !ctx->vif ||
472 !ctx->vif->bss_conf.beacon_int)) {
473 ctx->timing.beacon_interval =
474 priv->contexts[IWL_RXON_CTX_PAN].timing.beacon_interval;
475 beacon_int = le16_to_cpu(ctx->timing.beacon_interval);
533 } else { 476 } else {
534 beacon_int = iwl_adjust_beacon_interval(beacon_int, 477 beacon_int = iwl_adjust_beacon_interval(beacon_int,
535 priv->hw_params.max_beacon_itrvl * TIME_UNIT); 478 priv->hw_params.max_beacon_itrvl * TIME_UNIT);
@@ -567,76 +510,74 @@ void iwl_set_rxon_hwcrypto(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
567} 510}
568EXPORT_SYMBOL(iwl_set_rxon_hwcrypto); 511EXPORT_SYMBOL(iwl_set_rxon_hwcrypto);
569 512
570/** 513/* validate RXON structure is valid */
571 * iwl_check_rxon_cmd - validate RXON structure is valid
572 *
573 * NOTE: This is really only useful during development and can eventually
574 * be #ifdef'd out once the driver is stable and folks aren't actively
575 * making changes
576 */
577int iwl_check_rxon_cmd(struct iwl_priv *priv, struct iwl_rxon_context *ctx) 514int iwl_check_rxon_cmd(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
578{ 515{
579 int error = 0;
580 int counter = 1;
581 struct iwl_rxon_cmd *rxon = &ctx->staging; 516 struct iwl_rxon_cmd *rxon = &ctx->staging;
517 bool error = false;
582 518
583 if (rxon->flags & RXON_FLG_BAND_24G_MSK) { 519 if (rxon->flags & RXON_FLG_BAND_24G_MSK) {
584 error |= le32_to_cpu(rxon->flags & 520 if (rxon->flags & RXON_FLG_TGJ_NARROW_BAND_MSK) {
585 (RXON_FLG_TGJ_NARROW_BAND_MSK | 521 IWL_WARN(priv, "check 2.4G: wrong narrow\n");
586 RXON_FLG_RADAR_DETECT_MSK)); 522 error = true;
587 if (error) 523 }
588 IWL_WARN(priv, "check 24G fields %d | %d\n", 524 if (rxon->flags & RXON_FLG_RADAR_DETECT_MSK) {
589 counter++, error); 525 IWL_WARN(priv, "check 2.4G: wrong radar\n");
526 error = true;
527 }
590 } else { 528 } else {
591 error |= (rxon->flags & RXON_FLG_SHORT_SLOT_MSK) ? 529 if (!(rxon->flags & RXON_FLG_SHORT_SLOT_MSK)) {
592 0 : le32_to_cpu(RXON_FLG_SHORT_SLOT_MSK); 530 IWL_WARN(priv, "check 5.2G: not short slot!\n");
593 if (error) 531 error = true;
594 IWL_WARN(priv, "check 52 fields %d | %d\n", 532 }
595 counter++, error); 533 if (rxon->flags & RXON_FLG_CCK_MSK) {
596 error |= le32_to_cpu(rxon->flags & RXON_FLG_CCK_MSK); 534 IWL_WARN(priv, "check 5.2G: CCK!\n");
597 if (error) 535 error = true;
598 IWL_WARN(priv, "check 52 CCK %d | %d\n", 536 }
599 counter++, error); 537 }
600 } 538 if ((rxon->node_addr[0] | rxon->bssid_addr[0]) & 0x1) {
601 error |= (rxon->node_addr[0] | rxon->bssid_addr[0]) & 0x1; 539 IWL_WARN(priv, "mac/bssid mcast!\n");
602 if (error) 540 error = true;
603 IWL_WARN(priv, "check mac addr %d | %d\n", counter++, error); 541 }
604 542
605 /* make sure basic rates 6Mbps and 1Mbps are supported */ 543 /* make sure basic rates 6Mbps and 1Mbps are supported */
606 error |= (((rxon->ofdm_basic_rates & IWL_RATE_6M_MASK) == 0) && 544 if ((rxon->ofdm_basic_rates & IWL_RATE_6M_MASK) == 0 &&
607 ((rxon->cck_basic_rates & IWL_RATE_1M_MASK) == 0)); 545 (rxon->cck_basic_rates & IWL_RATE_1M_MASK) == 0) {
608 if (error) 546 IWL_WARN(priv, "neither 1 nor 6 are basic\n");
609 IWL_WARN(priv, "check basic rate %d | %d\n", counter++, error); 547 error = true;
548 }
610 549
611 error |= (le16_to_cpu(rxon->assoc_id) > 2007); 550 if (le16_to_cpu(rxon->assoc_id) > 2007) {
612 if (error) 551 IWL_WARN(priv, "aid > 2007\n");
613 IWL_WARN(priv, "check assoc id %d | %d\n", counter++, error); 552 error = true;
553 }
614 554
615 error |= ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_SHORT_SLOT_MSK)) 555 if ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_SHORT_SLOT_MSK))
616 == (RXON_FLG_CCK_MSK | RXON_FLG_SHORT_SLOT_MSK)); 556 == (RXON_FLG_CCK_MSK | RXON_FLG_SHORT_SLOT_MSK)) {
617 if (error) 557 IWL_WARN(priv, "CCK and short slot\n");
618 IWL_WARN(priv, "check CCK and short slot %d | %d\n", 558 error = true;
619 counter++, error); 559 }
620 560
621 error |= ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK)) 561 if ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK))
622 == (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK)); 562 == (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK)) {
623 if (error) 563 IWL_WARN(priv, "CCK and auto detect");
624 IWL_WARN(priv, "check CCK & auto detect %d | %d\n", 564 error = true;
625 counter++, error); 565 }
626 566
627 error |= ((rxon->flags & (RXON_FLG_AUTO_DETECT_MSK | 567 if ((rxon->flags & (RXON_FLG_AUTO_DETECT_MSK |
628 RXON_FLG_TGG_PROTECT_MSK)) == RXON_FLG_TGG_PROTECT_MSK); 568 RXON_FLG_TGG_PROTECT_MSK)) ==
629 if (error) 569 RXON_FLG_TGG_PROTECT_MSK) {
630 IWL_WARN(priv, "check TGG and auto detect %d | %d\n", 570 IWL_WARN(priv, "TGg but no auto-detect\n");
631 counter++, error); 571 error = true;
572 }
632 573
633 if (error) 574 if (error)
634 IWL_WARN(priv, "Tuning to channel %d\n", 575 IWL_WARN(priv, "Tuning to channel %d\n",
635 le16_to_cpu(rxon->channel)); 576 le16_to_cpu(rxon->channel));
636 577
637 if (error) { 578 if (error) {
638 IWL_ERR(priv, "Not a valid iwl_rxon_assoc_cmd field values\n"); 579 IWL_ERR(priv, "Invalid RXON\n");
639 return -1; 580 return -EINVAL;
640 } 581 }
641 return 0; 582 return 0;
642} 583}
@@ -797,137 +738,6 @@ void iwl_set_rxon_ht(struct iwl_priv *priv, struct iwl_ht_config *ht_conf)
797} 738}
798EXPORT_SYMBOL(iwl_set_rxon_ht); 739EXPORT_SYMBOL(iwl_set_rxon_ht);
799 740
800#define IWL_NUM_RX_CHAINS_MULTIPLE 3
801#define IWL_NUM_RX_CHAINS_SINGLE 2
802#define IWL_NUM_IDLE_CHAINS_DUAL 2
803#define IWL_NUM_IDLE_CHAINS_SINGLE 1
804
805/*
806 * Determine how many receiver/antenna chains to use.
807 *
808 * More provides better reception via diversity. Fewer saves power
809 * at the expense of throughput, but only when not in powersave to
810 * start with.
811 *
812 * MIMO (dual stream) requires at least 2, but works better with 3.
813 * This does not determine *which* chains to use, just how many.
814 */
815static int iwl_get_active_rx_chain_count(struct iwl_priv *priv)
816{
817 if (priv->cfg->advanced_bt_coexist && (priv->bt_full_concurrent ||
818 priv->bt_traffic_load >= IWL_BT_COEX_TRAFFIC_LOAD_HIGH)) {
819 /*
820 * only use chain 'A' in bt high traffic load or
821 * full concurrency mode
822 */
823 return IWL_NUM_RX_CHAINS_SINGLE;
824 }
825 /* # of Rx chains to use when expecting MIMO. */
826 if (is_single_rx_stream(priv))
827 return IWL_NUM_RX_CHAINS_SINGLE;
828 else
829 return IWL_NUM_RX_CHAINS_MULTIPLE;
830}
831
832/*
833 * When we are in power saving mode, unless device support spatial
834 * multiplexing power save, use the active count for rx chain count.
835 */
836static int iwl_get_idle_rx_chain_count(struct iwl_priv *priv, int active_cnt)
837{
838 /* # Rx chains when idling, depending on SMPS mode */
839 switch (priv->current_ht_config.smps) {
840 case IEEE80211_SMPS_STATIC:
841 case IEEE80211_SMPS_DYNAMIC:
842 return IWL_NUM_IDLE_CHAINS_SINGLE;
843 case IEEE80211_SMPS_OFF:
844 return active_cnt;
845 default:
846 WARN(1, "invalid SMPS mode %d",
847 priv->current_ht_config.smps);
848 return active_cnt;
849 }
850}
851
852/* up to 4 chains */
853static u8 iwl_count_chain_bitmap(u32 chain_bitmap)
854{
855 u8 res;
856 res = (chain_bitmap & BIT(0)) >> 0;
857 res += (chain_bitmap & BIT(1)) >> 1;
858 res += (chain_bitmap & BIT(2)) >> 2;
859 res += (chain_bitmap & BIT(3)) >> 3;
860 return res;
861}
862
863/**
864 * iwl_set_rxon_chain - Set up Rx chain usage in "staging" RXON image
865 *
866 * Selects how many and which Rx receivers/antennas/chains to use.
867 * This should not be used for scan command ... it puts data in wrong place.
868 */
869void iwl_set_rxon_chain(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
870{
871 bool is_single = is_single_rx_stream(priv);
872 bool is_cam = !test_bit(STATUS_POWER_PMI, &priv->status);
873 u8 idle_rx_cnt, active_rx_cnt, valid_rx_cnt;
874 u32 active_chains;
875 u16 rx_chain;
876
877 /* Tell uCode which antennas are actually connected.
878 * Before first association, we assume all antennas are connected.
879 * Just after first association, iwl_chain_noise_calibration()
880 * checks which antennas actually *are* connected. */
881 if (priv->chain_noise_data.active_chains)
882 active_chains = priv->chain_noise_data.active_chains;
883 else
884 active_chains = priv->hw_params.valid_rx_ant;
885
886 if (priv->cfg->advanced_bt_coexist && (priv->bt_full_concurrent ||
887 priv->bt_traffic_load >= IWL_BT_COEX_TRAFFIC_LOAD_HIGH)) {
888 /*
889 * only use chain 'A' in bt high traffic load or
890 * full concurrency mode
891 */
892 active_chains = first_antenna(active_chains);
893 }
894
895 rx_chain = active_chains << RXON_RX_CHAIN_VALID_POS;
896
897 /* How many receivers should we use? */
898 active_rx_cnt = iwl_get_active_rx_chain_count(priv);
899 idle_rx_cnt = iwl_get_idle_rx_chain_count(priv, active_rx_cnt);
900
901
902 /* correct rx chain count according hw settings
903 * and chain noise calibration
904 */
905 valid_rx_cnt = iwl_count_chain_bitmap(active_chains);
906 if (valid_rx_cnt < active_rx_cnt)
907 active_rx_cnt = valid_rx_cnt;
908
909 if (valid_rx_cnt < idle_rx_cnt)
910 idle_rx_cnt = valid_rx_cnt;
911
912 rx_chain |= active_rx_cnt << RXON_RX_CHAIN_MIMO_CNT_POS;
913 rx_chain |= idle_rx_cnt << RXON_RX_CHAIN_CNT_POS;
914
915 ctx->staging.rx_chain = cpu_to_le16(rx_chain);
916
917 if (!is_single && (active_rx_cnt >= IWL_NUM_RX_CHAINS_SINGLE) && is_cam)
918 ctx->staging.rx_chain |= RXON_RX_CHAIN_MIMO_FORCE_MSK;
919 else
920 ctx->staging.rx_chain &= ~RXON_RX_CHAIN_MIMO_FORCE_MSK;
921
922 IWL_DEBUG_ASSOC(priv, "rx_chain=0x%X active=%d idle=%d\n",
923 ctx->staging.rx_chain,
924 active_rx_cnt, idle_rx_cnt);
925
926 WARN_ON(active_rx_cnt == 0 || idle_rx_cnt == 0 ||
927 active_rx_cnt < idle_rx_cnt);
928}
929EXPORT_SYMBOL(iwl_set_rxon_chain);
930
931/* Return valid, unused, channel for a passive scan to reset the RF */ 741/* Return valid, unused, channel for a passive scan to reset the RF */
932u8 iwl_get_single_channel_number(struct iwl_priv *priv, 742u8 iwl_get_single_channel_number(struct iwl_priv *priv,
933 enum ieee80211_band band) 743 enum ieee80211_band band)
@@ -1326,7 +1136,7 @@ int iwl_apm_init(struct iwl_priv *priv)
1326 * If not (unlikely), enable L0S, so there is at least some 1136 * If not (unlikely), enable L0S, so there is at least some
1327 * power savings, even without L1. 1137 * power savings, even without L1.
1328 */ 1138 */
1329 if (priv->cfg->set_l0s) { 1139 if (priv->cfg->base_params->set_l0s) {
1330 lctl = iwl_pcie_link_ctl(priv); 1140 lctl = iwl_pcie_link_ctl(priv);
1331 if ((lctl & PCI_CFG_LINK_CTRL_VAL_L1_EN) == 1141 if ((lctl & PCI_CFG_LINK_CTRL_VAL_L1_EN) ==
1332 PCI_CFG_LINK_CTRL_VAL_L1_EN) { 1142 PCI_CFG_LINK_CTRL_VAL_L1_EN) {
@@ -1343,8 +1153,9 @@ int iwl_apm_init(struct iwl_priv *priv)
1343 } 1153 }
1344 1154
1345 /* Configure analog phase-lock-loop before activating to D0A */ 1155 /* Configure analog phase-lock-loop before activating to D0A */
1346 if (priv->cfg->pll_cfg_val) 1156 if (priv->cfg->base_params->pll_cfg_val)
1347 iwl_set_bit(priv, CSR_ANA_PLL_CFG, priv->cfg->pll_cfg_val); 1157 iwl_set_bit(priv, CSR_ANA_PLL_CFG,
1158 priv->cfg->base_params->pll_cfg_val);
1348 1159
1349 /* 1160 /*
1350 * Set "initialization complete" bit to move adapter from 1161 * Set "initialization complete" bit to move adapter from
@@ -1375,7 +1186,7 @@ int iwl_apm_init(struct iwl_priv *priv)
1375 * do not disable clocks. This preserves any hardware bits already 1186 * do not disable clocks. This preserves any hardware bits already
1376 * set by default in "CLK_CTRL_REG" after reset. 1187 * set by default in "CLK_CTRL_REG" after reset.
1377 */ 1188 */
1378 if (priv->cfg->use_bsm) 1189 if (priv->cfg->base_params->use_bsm)
1379 iwl_write_prph(priv, APMG_CLK_EN_REG, 1190 iwl_write_prph(priv, APMG_CLK_EN_REG,
1380 APMG_CLK_VAL_DMA_CLK_RQT | APMG_CLK_VAL_BSM_CLK_RQT); 1191 APMG_CLK_VAL_DMA_CLK_RQT | APMG_CLK_VAL_BSM_CLK_RQT);
1381 else 1192 else
@@ -1716,43 +1527,47 @@ static inline void iwl_set_no_assoc(struct iwl_priv *priv,
1716 iwlcore_commit_rxon(priv, ctx); 1527 iwlcore_commit_rxon(priv, ctx);
1717} 1528}
1718 1529
1719static int iwl_mac_beacon_update(struct ieee80211_hw *hw, struct sk_buff *skb) 1530static void iwlcore_beacon_update(struct ieee80211_hw *hw,
1531 struct ieee80211_vif *vif)
1720{ 1532{
1721 struct iwl_priv *priv = hw->priv; 1533 struct iwl_priv *priv = hw->priv;
1722 unsigned long flags; 1534 unsigned long flags;
1723 __le64 timestamp; 1535 __le64 timestamp;
1536 struct sk_buff *skb = ieee80211_beacon_get(hw, vif);
1724 1537
1725 IWL_DEBUG_MAC80211(priv, "enter\n"); 1538 if (!skb)
1539 return;
1540
1541 IWL_DEBUG_ASSOC(priv, "enter\n");
1726 1542
1727 lockdep_assert_held(&priv->mutex); 1543 lockdep_assert_held(&priv->mutex);
1728 1544
1729 if (!priv->beacon_ctx) { 1545 if (!priv->beacon_ctx) {
1730 IWL_ERR(priv, "update beacon but no beacon context!\n"); 1546 IWL_ERR(priv, "update beacon but no beacon context!\n");
1731 dev_kfree_skb(skb); 1547 dev_kfree_skb(skb);
1732 return -EINVAL; 1548 return;
1733 }
1734
1735 if (!iwl_is_ready_rf(priv)) {
1736 IWL_DEBUG_MAC80211(priv, "leave - RF not ready\n");
1737 return -EIO;
1738 } 1549 }
1739 1550
1740 spin_lock_irqsave(&priv->lock, flags); 1551 spin_lock_irqsave(&priv->lock, flags);
1741 1552
1742 if (priv->ibss_beacon) 1553 if (priv->beacon_skb)
1743 dev_kfree_skb(priv->ibss_beacon); 1554 dev_kfree_skb(priv->beacon_skb);
1744 1555
1745 priv->ibss_beacon = skb; 1556 priv->beacon_skb = skb;
1746 1557
1747 timestamp = ((struct ieee80211_mgmt *)skb->data)->u.beacon.timestamp; 1558 timestamp = ((struct ieee80211_mgmt *)skb->data)->u.beacon.timestamp;
1748 priv->timestamp = le64_to_cpu(timestamp); 1559 priv->timestamp = le64_to_cpu(timestamp);
1749 1560
1750 IWL_DEBUG_MAC80211(priv, "leave\n"); 1561 IWL_DEBUG_ASSOC(priv, "leave\n");
1562
1751 spin_unlock_irqrestore(&priv->lock, flags); 1563 spin_unlock_irqrestore(&priv->lock, flags);
1752 1564
1753 priv->cfg->ops->lib->post_associate(priv, priv->beacon_ctx->vif); 1565 if (!iwl_is_ready_rf(priv)) {
1566 IWL_DEBUG_MAC80211(priv, "leave - RF not ready\n");
1567 return;
1568 }
1754 1569
1755 return 0; 1570 priv->cfg->ops->lib->post_associate(priv, priv->beacon_ctx->vif);
1756} 1571}
1757 1572
1758void iwl_bss_info_changed(struct ieee80211_hw *hw, 1573void iwl_bss_info_changed(struct ieee80211_hw *hw,
@@ -1793,13 +1608,12 @@ void iwl_bss_info_changed(struct ieee80211_hw *hw,
1793 } 1608 }
1794 1609
1795 if (changes & BSS_CHANGED_BEACON && vif->type == NL80211_IFTYPE_AP) { 1610 if (changes & BSS_CHANGED_BEACON && vif->type == NL80211_IFTYPE_AP) {
1796 dev_kfree_skb(priv->ibss_beacon); 1611 dev_kfree_skb(priv->beacon_skb);
1797 priv->ibss_beacon = ieee80211_beacon_get(hw, vif); 1612 priv->beacon_skb = ieee80211_beacon_get(hw, vif);
1798 } 1613 }
1799 1614
1800 if (changes & BSS_CHANGED_BEACON_INT) { 1615 if (changes & BSS_CHANGED_BEACON_INT && vif->type == NL80211_IFTYPE_AP)
1801 /* TODO: in AP mode, do something to make this take effect */ 1616 iwl_send_rxon_timing(priv, ctx);
1802 }
1803 1617
1804 if (changes & BSS_CHANGED_BSSID) { 1618 if (changes & BSS_CHANGED_BSSID) {
1805 IWL_DEBUG_MAC80211(priv, "BSSID %pM\n", bss_conf->bssid); 1619 IWL_DEBUG_MAC80211(priv, "BSSID %pM\n", bss_conf->bssid);
@@ -1835,13 +1649,8 @@ void iwl_bss_info_changed(struct ieee80211_hw *hw,
1835 * mac80211 decides to do both changes at once because 1649 * mac80211 decides to do both changes at once because
1836 * it will invoke post_associate. 1650 * it will invoke post_associate.
1837 */ 1651 */
1838 if (vif->type == NL80211_IFTYPE_ADHOC && 1652 if (vif->type == NL80211_IFTYPE_ADHOC && changes & BSS_CHANGED_BEACON)
1839 changes & BSS_CHANGED_BEACON) { 1653 iwlcore_beacon_update(hw, vif);
1840 struct sk_buff *beacon = ieee80211_beacon_get(hw, vif);
1841
1842 if (beacon)
1843 iwl_mac_beacon_update(hw, beacon);
1844 }
1845 1654
1846 if (changes & BSS_CHANGED_ERP_PREAMBLE) { 1655 if (changes & BSS_CHANGED_ERP_PREAMBLE) {
1847 IWL_DEBUG_MAC80211(priv, "ERP_PREAMBLE %d\n", 1656 IWL_DEBUG_MAC80211(priv, "ERP_PREAMBLE %d\n",
@@ -1918,6 +1727,7 @@ void iwl_bss_info_changed(struct ieee80211_hw *hw,
1918 memcpy(ctx->staging.bssid_addr, 1727 memcpy(ctx->staging.bssid_addr,
1919 bss_conf->bssid, ETH_ALEN); 1728 bss_conf->bssid, ETH_ALEN);
1920 memcpy(priv->bssid, bss_conf->bssid, ETH_ALEN); 1729 memcpy(priv->bssid, bss_conf->bssid, ETH_ALEN);
1730 iwl_led_associate(priv);
1921 iwlcore_config_ap(priv, vif); 1731 iwlcore_config_ap(priv, vif);
1922 } else 1732 } else
1923 iwl_set_no_assoc(priv, vif); 1733 iwl_set_no_assoc(priv, vif);
@@ -1968,7 +1778,8 @@ int iwl_mac_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
1968 1778
1969 mutex_lock(&priv->mutex); 1779 mutex_lock(&priv->mutex);
1970 1780
1971 if (WARN_ON(!iwl_is_ready_rf(priv))) { 1781 if (!iwl_is_ready_rf(priv)) {
1782 IWL_WARN(priv, "Try to add interface when device not ready\n");
1972 err = -EINVAL; 1783 err = -EINVAL;
1973 goto out; 1784 goto out;
1974 } 1785 }
@@ -2009,11 +1820,17 @@ int iwl_mac_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
2009 */ 1820 */
2010 priv->iw_mode = vif->type; 1821 priv->iw_mode = vif->type;
2011 1822
1823 ctx->is_active = true;
1824
2012 err = iwl_set_mode(priv, vif); 1825 err = iwl_set_mode(priv, vif);
2013 if (err) 1826 if (err) {
1827 if (!ctx->always_active)
1828 ctx->is_active = false;
2014 goto out_err; 1829 goto out_err;
1830 }
2015 1831
2016 if (priv->cfg->advanced_bt_coexist && 1832 if (priv->cfg->bt_params &&
1833 priv->cfg->bt_params->advanced_bt_coexist &&
2017 vif->type == NL80211_IFTYPE_ADHOC) { 1834 vif->type == NL80211_IFTYPE_ADHOC) {
2018 /* 1835 /*
2019 * pretend to have high BT traffic as long as we 1836 * pretend to have high BT traffic as long as we
@@ -2041,7 +1858,6 @@ void iwl_mac_remove_interface(struct ieee80211_hw *hw,
2041{ 1858{
2042 struct iwl_priv *priv = hw->priv; 1859 struct iwl_priv *priv = hw->priv;
2043 struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif); 1860 struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif);
2044 bool scan_completed = false;
2045 1861
2046 IWL_DEBUG_MAC80211(priv, "enter\n"); 1862 IWL_DEBUG_MAC80211(priv, "enter\n");
2047 1863
@@ -2050,14 +1866,14 @@ void iwl_mac_remove_interface(struct ieee80211_hw *hw,
2050 WARN_ON(ctx->vif != vif); 1866 WARN_ON(ctx->vif != vif);
2051 ctx->vif = NULL; 1867 ctx->vif = NULL;
2052 1868
2053 iwl_scan_cancel_timeout(priv, 100);
2054 iwl_set_mode(priv, vif);
2055
2056 if (priv->scan_vif == vif) { 1869 if (priv->scan_vif == vif) {
2057 scan_completed = true; 1870 iwl_scan_cancel_timeout(priv, 200);
2058 priv->scan_vif = NULL; 1871 iwl_force_scan_end(priv);
2059 priv->scan_request = NULL;
2060 } 1872 }
1873 iwl_set_mode(priv, vif);
1874
1875 if (!ctx->always_active)
1876 ctx->is_active = false;
2061 1877
2062 /* 1878 /*
2063 * When removing the IBSS interface, overwrite the 1879 * When removing the IBSS interface, overwrite the
@@ -2072,9 +1888,6 @@ void iwl_mac_remove_interface(struct ieee80211_hw *hw,
2072 memset(priv->bssid, 0, ETH_ALEN); 1888 memset(priv->bssid, 0, ETH_ALEN);
2073 mutex_unlock(&priv->mutex); 1889 mutex_unlock(&priv->mutex);
2074 1890
2075 if (scan_completed)
2076 ieee80211_scan_completed(priv->hw, true);
2077
2078 IWL_DEBUG_MAC80211(priv, "leave\n"); 1891 IWL_DEBUG_MAC80211(priv, "leave\n");
2079 1892
2080} 1893}
@@ -2246,15 +2059,16 @@ void iwl_mac_reset_tsf(struct ieee80211_hw *hw)
2246 spin_lock_irqsave(&priv->lock, flags); 2059 spin_lock_irqsave(&priv->lock, flags);
2247 2060
2248 /* new association get rid of ibss beacon skb */ 2061 /* new association get rid of ibss beacon skb */
2249 if (priv->ibss_beacon) 2062 if (priv->beacon_skb)
2250 dev_kfree_skb(priv->ibss_beacon); 2063 dev_kfree_skb(priv->beacon_skb);
2251 2064
2252 priv->ibss_beacon = NULL; 2065 priv->beacon_skb = NULL;
2253 2066
2254 priv->timestamp = 0; 2067 priv->timestamp = 0;
2255 2068
2256 spin_unlock_irqrestore(&priv->lock, flags); 2069 spin_unlock_irqrestore(&priv->lock, flags);
2257 2070
2071 iwl_scan_cancel_timeout(priv, 100);
2258 if (!iwl_is_ready_rf(priv)) { 2072 if (!iwl_is_ready_rf(priv)) {
2259 IWL_DEBUG_MAC80211(priv, "leave - not ready\n"); 2073 IWL_DEBUG_MAC80211(priv, "leave - not ready\n");
2260 mutex_unlock(&priv->mutex); 2074 mutex_unlock(&priv->mutex);
@@ -2264,7 +2078,6 @@ void iwl_mac_reset_tsf(struct ieee80211_hw *hw)
2264 /* we are restarting association process 2078 /* we are restarting association process
2265 * clear RXON_FILTER_ASSOC_MSK bit 2079 * clear RXON_FILTER_ASSOC_MSK bit
2266 */ 2080 */
2267 iwl_scan_cancel_timeout(priv, 100);
2268 ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK; 2081 ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
2269 iwlcore_commit_rxon(priv, ctx); 2082 iwlcore_commit_rxon(priv, ctx);
2270 2083
@@ -2280,7 +2093,8 @@ int iwl_alloc_txq_mem(struct iwl_priv *priv)
2280{ 2093{
2281 if (!priv->txq) 2094 if (!priv->txq)
2282 priv->txq = kzalloc( 2095 priv->txq = kzalloc(
2283 sizeof(struct iwl_tx_queue) * priv->cfg->num_of_queues, 2096 sizeof(struct iwl_tx_queue) *
2097 priv->cfg->base_params->num_of_queues,
2284 GFP_KERNEL); 2098 GFP_KERNEL);
2285 if (!priv->txq) { 2099 if (!priv->txq) {
2286 IWL_ERR(priv, "Not enough memory for txq\n"); 2100 IWL_ERR(priv, "Not enough memory for txq\n");
@@ -2536,140 +2350,6 @@ void iwl_update_stats(struct iwl_priv *priv, bool is_tx, __le16 fc, u16 len)
2536EXPORT_SYMBOL(iwl_update_stats); 2350EXPORT_SYMBOL(iwl_update_stats);
2537#endif 2351#endif
2538 2352
2539static const char *get_csr_string(int cmd)
2540{
2541 switch (cmd) {
2542 IWL_CMD(CSR_HW_IF_CONFIG_REG);
2543 IWL_CMD(CSR_INT_COALESCING);
2544 IWL_CMD(CSR_INT);
2545 IWL_CMD(CSR_INT_MASK);
2546 IWL_CMD(CSR_FH_INT_STATUS);
2547 IWL_CMD(CSR_GPIO_IN);
2548 IWL_CMD(CSR_RESET);
2549 IWL_CMD(CSR_GP_CNTRL);
2550 IWL_CMD(CSR_HW_REV);
2551 IWL_CMD(CSR_EEPROM_REG);
2552 IWL_CMD(CSR_EEPROM_GP);
2553 IWL_CMD(CSR_OTP_GP_REG);
2554 IWL_CMD(CSR_GIO_REG);
2555 IWL_CMD(CSR_GP_UCODE_REG);
2556 IWL_CMD(CSR_GP_DRIVER_REG);
2557 IWL_CMD(CSR_UCODE_DRV_GP1);
2558 IWL_CMD(CSR_UCODE_DRV_GP2);
2559 IWL_CMD(CSR_LED_REG);
2560 IWL_CMD(CSR_DRAM_INT_TBL_REG);
2561 IWL_CMD(CSR_GIO_CHICKEN_BITS);
2562 IWL_CMD(CSR_ANA_PLL_CFG);
2563 IWL_CMD(CSR_HW_REV_WA_REG);
2564 IWL_CMD(CSR_DBG_HPET_MEM_REG);
2565 default:
2566 return "UNKNOWN";
2567
2568 }
2569}
2570
2571void iwl_dump_csr(struct iwl_priv *priv)
2572{
2573 int i;
2574 u32 csr_tbl[] = {
2575 CSR_HW_IF_CONFIG_REG,
2576 CSR_INT_COALESCING,
2577 CSR_INT,
2578 CSR_INT_MASK,
2579 CSR_FH_INT_STATUS,
2580 CSR_GPIO_IN,
2581 CSR_RESET,
2582 CSR_GP_CNTRL,
2583 CSR_HW_REV,
2584 CSR_EEPROM_REG,
2585 CSR_EEPROM_GP,
2586 CSR_OTP_GP_REG,
2587 CSR_GIO_REG,
2588 CSR_GP_UCODE_REG,
2589 CSR_GP_DRIVER_REG,
2590 CSR_UCODE_DRV_GP1,
2591 CSR_UCODE_DRV_GP2,
2592 CSR_LED_REG,
2593 CSR_DRAM_INT_TBL_REG,
2594 CSR_GIO_CHICKEN_BITS,
2595 CSR_ANA_PLL_CFG,
2596 CSR_HW_REV_WA_REG,
2597 CSR_DBG_HPET_MEM_REG
2598 };
2599 IWL_ERR(priv, "CSR values:\n");
2600 IWL_ERR(priv, "(2nd byte of CSR_INT_COALESCING is "
2601 "CSR_INT_PERIODIC_REG)\n");
2602 for (i = 0; i < ARRAY_SIZE(csr_tbl); i++) {
2603 IWL_ERR(priv, " %25s: 0X%08x\n",
2604 get_csr_string(csr_tbl[i]),
2605 iwl_read32(priv, csr_tbl[i]));
2606 }
2607}
2608EXPORT_SYMBOL(iwl_dump_csr);
2609
2610static const char *get_fh_string(int cmd)
2611{
2612 switch (cmd) {
2613 IWL_CMD(FH_RSCSR_CHNL0_STTS_WPTR_REG);
2614 IWL_CMD(FH_RSCSR_CHNL0_RBDCB_BASE_REG);
2615 IWL_CMD(FH_RSCSR_CHNL0_WPTR);
2616 IWL_CMD(FH_MEM_RCSR_CHNL0_CONFIG_REG);
2617 IWL_CMD(FH_MEM_RSSR_SHARED_CTRL_REG);
2618 IWL_CMD(FH_MEM_RSSR_RX_STATUS_REG);
2619 IWL_CMD(FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV);
2620 IWL_CMD(FH_TSSR_TX_STATUS_REG);
2621 IWL_CMD(FH_TSSR_TX_ERROR_REG);
2622 default:
2623 return "UNKNOWN";
2624
2625 }
2626}
2627
2628int iwl_dump_fh(struct iwl_priv *priv, char **buf, bool display)
2629{
2630 int i;
2631#ifdef CONFIG_IWLWIFI_DEBUG
2632 int pos = 0;
2633 size_t bufsz = 0;
2634#endif
2635 u32 fh_tbl[] = {
2636 FH_RSCSR_CHNL0_STTS_WPTR_REG,
2637 FH_RSCSR_CHNL0_RBDCB_BASE_REG,
2638 FH_RSCSR_CHNL0_WPTR,
2639 FH_MEM_RCSR_CHNL0_CONFIG_REG,
2640 FH_MEM_RSSR_SHARED_CTRL_REG,
2641 FH_MEM_RSSR_RX_STATUS_REG,
2642 FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV,
2643 FH_TSSR_TX_STATUS_REG,
2644 FH_TSSR_TX_ERROR_REG
2645 };
2646#ifdef CONFIG_IWLWIFI_DEBUG
2647 if (display) {
2648 bufsz = ARRAY_SIZE(fh_tbl) * 48 + 40;
2649 *buf = kmalloc(bufsz, GFP_KERNEL);
2650 if (!*buf)
2651 return -ENOMEM;
2652 pos += scnprintf(*buf + pos, bufsz - pos,
2653 "FH register values:\n");
2654 for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) {
2655 pos += scnprintf(*buf + pos, bufsz - pos,
2656 " %34s: 0X%08x\n",
2657 get_fh_string(fh_tbl[i]),
2658 iwl_read_direct32(priv, fh_tbl[i]));
2659 }
2660 return pos;
2661 }
2662#endif
2663 IWL_ERR(priv, "FH register values:\n");
2664 for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) {
2665 IWL_ERR(priv, " %34s: 0X%08x\n",
2666 get_fh_string(fh_tbl[i]),
2667 iwl_read_direct32(priv, fh_tbl[i]));
2668 }
2669 return 0;
2670}
2671EXPORT_SYMBOL(iwl_dump_fh);
2672
2673static void iwl_force_rf_reset(struct iwl_priv *priv) 2353static void iwl_force_rf_reset(struct iwl_priv *priv)
2674{ 2354{
2675 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) 2355 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
@@ -2750,7 +2430,6 @@ int iwl_force_reset(struct iwl_priv *priv, int mode, bool external)
2750 } 2430 }
2751 return 0; 2431 return 0;
2752} 2432}
2753EXPORT_SYMBOL(iwl_force_reset);
2754 2433
2755/** 2434/**
2756 * iwl_bg_monitor_recover - Timer callback to check for stuck queue and recover 2435 * iwl_bg_monitor_recover - Timer callback to check for stuck queue and recover
@@ -2786,33 +2465,31 @@ static int iwl_check_stuck_queue(struct iwl_priv *priv, int cnt)
2786 txq = &priv->txq[cnt]; 2465 txq = &priv->txq[cnt];
2787 q = &txq->q; 2466 q = &txq->q;
2788 /* queue is empty, skip */ 2467 /* queue is empty, skip */
2789 if (q->read_ptr != q->write_ptr) { 2468 if (q->read_ptr == q->write_ptr)
2790 if (q->read_ptr == q->last_read_ptr) { 2469 return 0;
2791 /* a queue has not been read from last time */ 2470
2792 if (q->repeat_same_read_ptr > MAX_REPEAT) { 2471 if (q->read_ptr == q->last_read_ptr) {
2793 IWL_ERR(priv, 2472 /* a queue has not been read from last time */
2794 "queue %d stuck %d time. Fw reload.\n", 2473 if (q->repeat_same_read_ptr > MAX_REPEAT) {
2795 q->id, q->repeat_same_read_ptr); 2474 IWL_ERR(priv,
2796 q->repeat_same_read_ptr = 0; 2475 "queue %d stuck %d time. Fw reload.\n",
2797 iwl_force_reset(priv, IWL_FW_RESET, false); 2476 q->id, q->repeat_same_read_ptr);
2798 } else {
2799 q->repeat_same_read_ptr++;
2800 IWL_DEBUG_RADIO(priv,
2801 "queue %d, not read %d time\n",
2802 q->id,
2803 q->repeat_same_read_ptr);
2804 if (!priv->cfg->advanced_bt_coexist) {
2805 mod_timer(&priv->monitor_recover,
2806 jiffies + msecs_to_jiffies(
2807 IWL_ONE_HUNDRED_MSECS));
2808 return 1;
2809 }
2810 }
2811 return 0;
2812 } else {
2813 q->last_read_ptr = q->read_ptr;
2814 q->repeat_same_read_ptr = 0; 2477 q->repeat_same_read_ptr = 0;
2478 iwl_force_reset(priv, IWL_FW_RESET, false);
2479 } else {
2480 q->repeat_same_read_ptr++;
2481 IWL_DEBUG_RADIO(priv,
2482 "queue %d, not read %d time\n",
2483 q->id,
2484 q->repeat_same_read_ptr);
2485 mod_timer(&priv->monitor_recover,
2486 jiffies + msecs_to_jiffies(
2487 IWL_ONE_HUNDRED_MSECS));
2488 return 1;
2815 } 2489 }
2490 } else {
2491 q->last_read_ptr = q->read_ptr;
2492 q->repeat_same_read_ptr = 0;
2816 } 2493 }
2817 return 0; 2494 return 0;
2818} 2495}
@@ -2839,13 +2516,13 @@ void iwl_bg_monitor_recover(unsigned long data)
2839 return; 2516 return;
2840 } 2517 }
2841 } 2518 }
2842 if (priv->cfg->monitor_recover_period) { 2519 if (priv->cfg->base_params->monitor_recover_period) {
2843 /* 2520 /*
2844 * Reschedule the timer to occur in 2521 * Reschedule the timer to occur in
2845 * priv->cfg->monitor_recover_period 2522 * priv->cfg->base_params->monitor_recover_period
2846 */ 2523 */
2847 mod_timer(&priv->monitor_recover, jiffies + msecs_to_jiffies( 2524 mod_timer(&priv->monitor_recover, jiffies + msecs_to_jiffies(
2848 priv->cfg->monitor_recover_period)); 2525 priv->cfg->base_params->monitor_recover_period));
2849 } 2526 }
2850} 2527}
2851EXPORT_SYMBOL(iwl_bg_monitor_recover); 2528EXPORT_SYMBOL(iwl_bg_monitor_recover);
@@ -2918,7 +2595,7 @@ int iwl_pci_suspend(struct pci_dev *pdev, pm_message_t state)
2918 * it will not call apm_ops.stop() to stop the DMA operation. 2595 * it will not call apm_ops.stop() to stop the DMA operation.
2919 * Calling apm_ops.stop here to make sure we stop the DMA. 2596 * Calling apm_ops.stop here to make sure we stop the DMA.
2920 */ 2597 */
2921 priv->cfg->ops->lib->apm_ops.stop(priv); 2598 iwl_apm_stop(priv);
2922 2599
2923 pci_save_state(pdev); 2600 pci_save_state(pdev);
2924 pci_disable_device(pdev); 2601 pci_disable_device(pdev);
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.h b/drivers/net/wireless/iwlwifi/iwl-core.h
index f7b57ed84f66..64527def059f 100644
--- a/drivers/net/wireless/iwlwifi/iwl-core.h
+++ b/drivers/net/wireless/iwlwifi/iwl-core.h
@@ -111,14 +111,13 @@ struct iwl_hcmd_utils_ops {
111 __le16 fc, __le32 *tx_flags); 111 __le16 fc, __le32 *tx_flags);
112 int (*calc_rssi)(struct iwl_priv *priv, 112 int (*calc_rssi)(struct iwl_priv *priv,
113 struct iwl_rx_phy_res *rx_resp); 113 struct iwl_rx_phy_res *rx_resp);
114 void (*request_scan)(struct iwl_priv *priv, struct ieee80211_vif *vif); 114 int (*request_scan)(struct iwl_priv *priv, struct ieee80211_vif *vif);
115 void (*post_scan)(struct iwl_priv *priv);
115}; 116};
116 117
117struct iwl_apm_ops { 118struct iwl_apm_ops {
118 int (*init)(struct iwl_priv *priv); 119 int (*init)(struct iwl_priv *priv);
119 void (*stop)(struct iwl_priv *priv);
120 void (*config)(struct iwl_priv *priv); 120 void (*config)(struct iwl_priv *priv);
121 int (*set_pwr_src)(struct iwl_priv *priv, enum iwl_pwr_src src);
122}; 121};
123 122
124struct iwl_debugfs_ops { 123struct iwl_debugfs_ops {
@@ -130,12 +129,12 @@ struct iwl_debugfs_ops {
130 size_t count, loff_t *ppos); 129 size_t count, loff_t *ppos);
131 ssize_t (*bt_stats_read)(struct file *file, char __user *user_buf, 130 ssize_t (*bt_stats_read)(struct file *file, char __user *user_buf,
132 size_t count, loff_t *ppos); 131 size_t count, loff_t *ppos);
132 ssize_t (*reply_tx_error)(struct file *file, char __user *user_buf,
133 size_t count, loff_t *ppos);
133}; 134};
134 135
135struct iwl_temp_ops { 136struct iwl_temp_ops {
136 void (*temperature)(struct iwl_priv *priv); 137 void (*temperature)(struct iwl_priv *priv);
137 void (*set_ct_kill)(struct iwl_priv *priv);
138 void (*set_calib_version)(struct iwl_priv *priv);
139}; 138};
140 139
141struct iwl_tt_ops { 140struct iwl_tt_ops {
@@ -231,11 +230,17 @@ struct iwl_led_ops {
231 int (*off)(struct iwl_priv *priv); 230 int (*off)(struct iwl_priv *priv);
232}; 231};
233 232
233/* NIC specific ops */
234struct iwl_nic_ops {
235 void (*additional_nic_config)(struct iwl_priv *priv);
236};
237
234struct iwl_ops { 238struct iwl_ops {
235 const struct iwl_lib_ops *lib; 239 const struct iwl_lib_ops *lib;
236 const struct iwl_hcmd_ops *hcmd; 240 const struct iwl_hcmd_ops *hcmd;
237 const struct iwl_hcmd_utils_ops *utils; 241 const struct iwl_hcmd_utils_ops *utils;
238 const struct iwl_led_ops *led; 242 const struct iwl_led_ops *led;
243 const struct iwl_nic_ops *nic;
239}; 244};
240 245
241struct iwl_mod_params { 246struct iwl_mod_params {
@@ -248,20 +253,12 @@ struct iwl_mod_params {
248 int restart_fw; /* def: 1 = restart firmware */ 253 int restart_fw; /* def: 1 = restart firmware */
249}; 254};
250 255
251/** 256/*
252 * struct iwl_cfg
253 * @fw_name_pre: Firmware filename prefix. The api version and extension
254 * (.ucode) will be added to filename before loading from disk. The
255 * filename is constructed as fw_name_pre<api>.ucode.
256 * @ucode_api_max: Highest version of uCode API supported by driver.
257 * @ucode_api_min: Lowest version of uCode API supported by driver.
258 * @pa_type: used by 6000 series only to identify the type of Power Amplifier
259 * @max_ll_items: max number of OTP blocks 257 * @max_ll_items: max number of OTP blocks
260 * @shadow_ram_support: shadow support for OTP memory 258 * @shadow_ram_support: shadow support for OTP memory
261 * @led_compensation: compensate on the led on/off time per HW according 259 * @led_compensation: compensate on the led on/off time per HW according
262 * to the deviation to achieve the desired led frequency. 260 * to the deviation to achieve the desired led frequency.
263 * The detail algorithm is described in iwl-led.c 261 * The detail algorithm is described in iwl-led.c
264 * @use_rts_for_aggregation: use rts/cts protection for HT traffic
265 * @chain_noise_num_beacons: number of beacons used to compute chain noise 262 * @chain_noise_num_beacons: number of beacons used to compute chain noise
266 * @adv_thermal_throttle: support advance thermal throttle 263 * @adv_thermal_throttle: support advance thermal throttle
267 * @support_ct_kill_exit: support ct kill exit condition 264 * @support_ct_kill_exit: support ct kill exit condition
@@ -279,15 +276,74 @@ struct iwl_mod_params {
279 * sensitivity calibration operation 276 * sensitivity calibration operation
280 * @chain_noise_calib_by_driver: driver has the capability to perform 277 * @chain_noise_calib_by_driver: driver has the capability to perform
281 * chain noise calibration operation 278 * chain noise calibration operation
282 * @scan_antennas: available antenna for scan operation 279*/
280struct iwl_base_params {
281 int eeprom_size;
282 int num_of_queues; /* def: HW dependent */
283 int num_of_ampdu_queues;/* def: HW dependent */
284 /* for iwl_apm_init() */
285 u32 pll_cfg_val;
286 bool set_l0s;
287 bool use_bsm;
288
289 bool use_isr_legacy;
290 const u16 max_ll_items;
291 const bool shadow_ram_support;
292 u16 led_compensation;
293 const bool broken_powersave;
294 int chain_noise_num_beacons;
295 const bool supports_idle;
296 bool adv_thermal_throttle;
297 bool support_ct_kill_exit;
298 const bool support_wimax_coexist;
299 u8 plcp_delta_threshold;
300 s32 chain_noise_scale;
301 /* timer period for monitor the driver queues */
302 u32 monitor_recover_period;
303 bool temperature_kelvin;
304 u32 max_event_log_size;
305 const bool tx_power_by_driver;
306 const bool ucode_tracing;
307 const bool sensitivity_calib_by_driver;
308 const bool chain_noise_calib_by_driver;
309};
310/*
283 * @advanced_bt_coexist: support advanced bt coexist 311 * @advanced_bt_coexist: support advanced bt coexist
284 * @bt_init_traffic_load: specify initial bt traffic load 312 * @bt_init_traffic_load: specify initial bt traffic load
285 * @bt_prio_boost: default bt priority boost value 313 * @bt_prio_boost: default bt priority boost value
286 * @need_dc_calib: need to perform init dc calibration
287 * @bt_statistics: use BT version of statistics notification 314 * @bt_statistics: use BT version of statistics notification
288 * @agg_time_limit: maximum number of uSec in aggregation 315 * @agg_time_limit: maximum number of uSec in aggregation
289 * @ampdu_factor: Maximum A-MPDU length factor 316 * @ampdu_factor: Maximum A-MPDU length factor
290 * @ampdu_density: Minimum A-MPDU spacing 317 * @ampdu_density: Minimum A-MPDU spacing
318*/
319struct iwl_bt_params {
320 bool advanced_bt_coexist;
321 u8 bt_init_traffic_load;
322 u8 bt_prio_boost;
323 const bool bt_statistics;
324 u16 agg_time_limit;
325 u8 ampdu_factor;
326 u8 ampdu_density;
327};
328/*
329 * @use_rts_for_aggregation: use rts/cts protection for HT traffic
330*/
331struct iwl_ht_params {
332 const bool ht_greenfield_support; /* if used set to true */
333 bool use_rts_for_aggregation;
334};
335
336/**
337 * struct iwl_cfg
338 * @fw_name_pre: Firmware filename prefix. The api version and extension
339 * (.ucode) will be added to filename before loading from disk. The
340 * filename is constructed as fw_name_pre<api>.ucode.
341 * @ucode_api_max: Highest version of uCode API supported by driver.
342 * @ucode_api_min: Lowest version of uCode API supported by driver.
343 * @pa_type: used by 6000 series only to identify the type of Power Amplifier
344 * @need_dc_calib: need to perform init dc calibration
345 * @need_temp_offset_calib: need to perform temperature offset calibration
346 * @scan_antennas: available antenna for scan operation
291 * 347 *
292 * We enable the driver to be backward compatible wrt API version. The 348 * We enable the driver to be backward compatible wrt API version. The
293 * driver specifies which APIs it supports (with @ucode_api_max being the 349 * driver specifies which APIs it supports (with @ucode_api_max being the
@@ -298,9 +354,9 @@ struct iwl_mod_params {
298 * 354 *
299 * For example, 355 * For example,
300 * if (IWL_UCODE_API(priv->ucode_ver) >= 2) { 356 * if (IWL_UCODE_API(priv->ucode_ver) >= 2) {
301 * Driver interacts with Firmware API version >= 2. 357 * Driver interacts with Firmware API version >= 2.
302 * } else { 358 * } else {
303 * Driver interacts with Firmware API version 1. 359 * Driver interacts with Firmware API version 1.
304 * } 360 * }
305 * 361 *
306 * The ideal usage of this infrastructure is to treat a new ucode API 362 * The ideal usage of this infrastructure is to treat a new ucode API
@@ -311,59 +367,29 @@ struct iwl_mod_params {
311 * 367 *
312 */ 368 */
313struct iwl_cfg { 369struct iwl_cfg {
370 /* params specific to an individual device within a device family */
314 const char *name; 371 const char *name;
315 const char *fw_name_pre; 372 const char *fw_name_pre;
316 const unsigned int ucode_api_max; 373 const unsigned int ucode_api_max;
317 const unsigned int ucode_api_min; 374 const unsigned int ucode_api_min;
375 u8 valid_tx_ant;
376 u8 valid_rx_ant;
318 unsigned int sku; 377 unsigned int sku;
319 int eeprom_size;
320 u16 eeprom_ver; 378 u16 eeprom_ver;
321 u16 eeprom_calib_ver; 379 u16 eeprom_calib_ver;
322 int num_of_queues; /* def: HW dependent */
323 int num_of_ampdu_queues;/* def: HW dependent */
324 const struct iwl_ops *ops; 380 const struct iwl_ops *ops;
381 /* module based parameters which can be set from modprobe cmd */
325 const struct iwl_mod_params *mod_params; 382 const struct iwl_mod_params *mod_params;
326 u8 valid_tx_ant; 383 /* params not likely to change within a device family */
327 u8 valid_rx_ant; 384 struct iwl_base_params *base_params;
328 385 /* params likely to change within a device family */
329 /* for iwl_apm_init() */ 386 struct iwl_ht_params *ht_params;
330 u32 pll_cfg_val; 387 struct iwl_bt_params *bt_params;
331 bool set_l0s; 388 enum iwl_pa_type pa_type; /* if used set to IWL_PA_SYSTEM */
332 bool use_bsm; 389 const bool need_dc_calib; /* if used set to true */
333 390 const bool need_temp_offset_calib; /* if used set to true */
334 bool use_isr_legacy;
335 enum iwl_pa_type pa_type;
336 const u16 max_ll_items;
337 const bool shadow_ram_support;
338 const bool ht_greenfield_support;
339 u16 led_compensation;
340 const bool broken_powersave;
341 bool use_rts_for_aggregation;
342 int chain_noise_num_beacons;
343 const bool supports_idle;
344 bool adv_thermal_throttle;
345 bool support_ct_kill_exit;
346 const bool support_wimax_coexist;
347 u8 plcp_delta_threshold;
348 s32 chain_noise_scale;
349 /* timer period for monitor the driver queues */
350 u32 monitor_recover_period;
351 bool temperature_kelvin;
352 u32 max_event_log_size;
353 const bool tx_power_by_driver;
354 const bool ucode_tracing;
355 const bool sensitivity_calib_by_driver;
356 const bool chain_noise_calib_by_driver;
357 u8 scan_rx_antennas[IEEE80211_NUM_BANDS]; 391 u8 scan_rx_antennas[IEEE80211_NUM_BANDS];
358 u8 scan_tx_antennas[IEEE80211_NUM_BANDS]; 392 u8 scan_tx_antennas[IEEE80211_NUM_BANDS];
359 bool advanced_bt_coexist;
360 u8 bt_init_traffic_load;
361 u8 bt_prio_boost;
362 const bool need_dc_calib;
363 const bool bt_statistics;
364 u16 agg_time_limit;
365 u8 ampdu_factor;
366 u8 ampdu_density;
367}; 393};
368 394
369/*************************** 395/***************************
@@ -372,7 +398,6 @@ struct iwl_cfg {
372 398
373struct ieee80211_hw *iwl_alloc_all(struct iwl_cfg *cfg, 399struct ieee80211_hw *iwl_alloc_all(struct iwl_cfg *cfg,
374 struct ieee80211_ops *hw_ops); 400 struct ieee80211_ops *hw_ops);
375void iwl_activate_qos(struct iwl_priv *priv);
376int iwl_mac_conf_tx(struct ieee80211_hw *hw, u16 queue, 401int iwl_mac_conf_tx(struct ieee80211_hw *hw, u16 queue,
377 const struct ieee80211_tx_queue_params *params); 402 const struct ieee80211_tx_queue_params *params);
378int iwl_mac_tx_last_beacon(struct ieee80211_hw *hw); 403int iwl_mac_tx_last_beacon(struct ieee80211_hw *hw);
@@ -380,7 +405,6 @@ void iwl_set_rxon_hwcrypto(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
380 int hw_decrypt); 405 int hw_decrypt);
381int iwl_check_rxon_cmd(struct iwl_priv *priv, struct iwl_rxon_context *ctx); 406int iwl_check_rxon_cmd(struct iwl_priv *priv, struct iwl_rxon_context *ctx);
382int iwl_full_rxon_required(struct iwl_priv *priv, struct iwl_rxon_context *ctx); 407int iwl_full_rxon_required(struct iwl_priv *priv, struct iwl_rxon_context *ctx);
383void iwl_set_rxon_chain(struct iwl_priv *priv, struct iwl_rxon_context *ctx);
384int iwl_set_rxon_channel(struct iwl_priv *priv, struct ieee80211_channel *ch, 408int iwl_set_rxon_channel(struct iwl_priv *priv, struct ieee80211_channel *ch,
385 struct iwl_rxon_context *ctx); 409 struct iwl_rxon_context *ctx);
386void iwl_set_flags_for_band(struct iwl_priv *priv, 410void iwl_set_flags_for_band(struct iwl_priv *priv,
@@ -406,7 +430,6 @@ void iwl_bss_info_changed(struct ieee80211_hw *hw,
406 struct ieee80211_vif *vif, 430 struct ieee80211_vif *vif,
407 struct ieee80211_bss_conf *bss_conf, 431 struct ieee80211_bss_conf *bss_conf,
408 u32 changes); 432 u32 changes);
409int iwl_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx);
410int iwl_mac_add_interface(struct ieee80211_hw *hw, 433int iwl_mac_add_interface(struct ieee80211_hw *hw,
411 struct ieee80211_vif *vif); 434 struct ieee80211_vif *vif);
412void iwl_mac_remove_interface(struct ieee80211_hw *hw, 435void iwl_mac_remove_interface(struct ieee80211_hw *hw,
@@ -483,7 +506,6 @@ void iwl_rx_reply_error(struct iwl_priv *priv,
483******************************************************/ 506******************************************************/
484void iwl_cmd_queue_free(struct iwl_priv *priv); 507void iwl_cmd_queue_free(struct iwl_priv *priv);
485int iwl_rx_queue_alloc(struct iwl_priv *priv); 508int iwl_rx_queue_alloc(struct iwl_priv *priv);
486void iwl_rx_handle(struct iwl_priv *priv);
487void iwl_rx_queue_update_write_ptr(struct iwl_priv *priv, 509void iwl_rx_queue_update_write_ptr(struct iwl_priv *priv,
488 struct iwl_rx_queue *q); 510 struct iwl_rx_queue *q);
489int iwl_rx_queue_space(const struct iwl_rx_queue *q); 511int iwl_rx_queue_space(const struct iwl_rx_queue *q);
@@ -501,12 +523,6 @@ void iwl_rx_csa(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb);
501/***************************************************** 523/*****************************************************
502* TX 524* TX
503******************************************************/ 525******************************************************/
504void iwl_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq);
505int iwl_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv,
506 struct iwl_tx_queue *txq,
507 dma_addr_t addr, u16 len, u8 reset, u8 pad);
508int iwl_hw_tx_queue_init(struct iwl_priv *priv,
509 struct iwl_tx_queue *txq);
510void iwl_txq_update_write_ptr(struct iwl_priv *priv, struct iwl_tx_queue *txq); 526void iwl_txq_update_write_ptr(struct iwl_priv *priv, struct iwl_tx_queue *txq);
511int iwl_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq, 527int iwl_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq,
512 int slots_num, u32 txq_id); 528 int slots_num, u32 txq_id);
@@ -522,37 +538,16 @@ int iwl_set_tx_power(struct iwl_priv *priv, s8 tx_power, bool force);
522 * Rate 538 * Rate
523 ******************************************************************************/ 539 ******************************************************************************/
524 540
525int iwl_hwrate_to_plcp_idx(u32 rate_n_flags);
526
527u8 iwl_rate_get_lowest_plcp(struct iwl_priv *priv, 541u8 iwl_rate_get_lowest_plcp(struct iwl_priv *priv,
528 struct iwl_rxon_context *ctx); 542 struct iwl_rxon_context *ctx);
529 543
530u8 iwl_toggle_tx_ant(struct iwl_priv *priv, u8 ant_idx, u8 valid);
531
532static inline u32 iwl_ant_idx_to_flags(u8 ant_idx)
533{
534 return BIT(ant_idx) << RATE_MCS_ANT_POS;
535}
536
537static inline u8 iwl_hw_get_rate(__le32 rate_n_flags)
538{
539 return le32_to_cpu(rate_n_flags) & 0xFF;
540}
541static inline u32 iwl_hw_get_rate_n_flags(__le32 rate_n_flags)
542{
543 return le32_to_cpu(rate_n_flags) & 0x1FFFF;
544}
545static inline __le32 iwl_hw_set_rate_n_flags(u8 rate, u32 flags)
546{
547 return cpu_to_le32(flags|(u32)rate);
548}
549
550/******************************************************************************* 544/*******************************************************************************
551 * Scanning 545 * Scanning
552 ******************************************************************************/ 546 ******************************************************************************/
553void iwl_init_scan_params(struct iwl_priv *priv); 547void iwl_init_scan_params(struct iwl_priv *priv);
554int iwl_scan_cancel(struct iwl_priv *priv); 548int iwl_scan_cancel(struct iwl_priv *priv);
555int iwl_scan_cancel_timeout(struct iwl_priv *priv, unsigned long ms); 549int iwl_scan_cancel_timeout(struct iwl_priv *priv, unsigned long ms);
550void iwl_force_scan_end(struct iwl_priv *priv);
556int iwl_mac_hw_scan(struct ieee80211_hw *hw, 551int iwl_mac_hw_scan(struct ieee80211_hw *hw,
557 struct ieee80211_vif *vif, 552 struct ieee80211_vif *vif,
558 struct cfg80211_scan_request *req); 553 struct cfg80211_scan_request *req);
@@ -568,6 +563,7 @@ u16 iwl_get_passive_dwell_time(struct iwl_priv *priv,
568 enum ieee80211_band band, 563 enum ieee80211_band band,
569 struct ieee80211_vif *vif); 564 struct ieee80211_vif *vif);
570void iwl_setup_scan_deferred_work(struct iwl_priv *priv); 565void iwl_setup_scan_deferred_work(struct iwl_priv *priv);
566void iwl_cancel_scan_deferred_work(struct iwl_priv *priv);
571 567
572/* For faster active scanning, scan will move to the next channel if fewer than 568/* For faster active scanning, scan will move to the next channel if fewer than
573 * PLCP_QUIET_THRESH packets are heard on this channel within 569 * PLCP_QUIET_THRESH packets are heard on this channel within
@@ -580,13 +576,6 @@ void iwl_setup_scan_deferred_work(struct iwl_priv *priv);
580 576
581#define IWL_SCAN_CHECK_WATCHDOG (HZ * 7) 577#define IWL_SCAN_CHECK_WATCHDOG (HZ * 7)
582 578
583/*******************************************************************************
584 * Calibrations - implemented in iwl-calib.c
585 ******************************************************************************/
586int iwl_send_calib_results(struct iwl_priv *priv);
587int iwl_calib_set(struct iwl_calib_result *res, const u8 *buf, int len);
588void iwl_calib_free_results(struct iwl_priv *priv);
589
590/***************************************************** 579/*****************************************************
591 * S e n d i n g H o s t C o m m a n d s * 580 * S e n d i n g H o s t C o m m a n d s *
592 *****************************************************/ 581 *****************************************************/
@@ -636,8 +625,6 @@ int iwl_pci_resume(struct pci_dev *pdev);
636void iwl_dump_nic_error_log(struct iwl_priv *priv); 625void iwl_dump_nic_error_log(struct iwl_priv *priv);
637int iwl_dump_nic_event_log(struct iwl_priv *priv, 626int iwl_dump_nic_event_log(struct iwl_priv *priv,
638 bool full_log, char **buf, bool display); 627 bool full_log, char **buf, bool display);
639void iwl_dump_csr(struct iwl_priv *priv);
640int iwl_dump_fh(struct iwl_priv *priv, char **buf, bool display);
641#ifdef CONFIG_IWLWIFI_DEBUG 628#ifdef CONFIG_IWLWIFI_DEBUG
642void iwl_print_rx_config_cmd(struct iwl_priv *priv, 629void iwl_print_rx_config_cmd(struct iwl_priv *priv,
643 struct iwl_rxon_context *ctx); 630 struct iwl_rxon_context *ctx);
@@ -723,8 +710,6 @@ static inline int iwl_is_ready_rf(struct iwl_priv *priv)
723extern void iwl_send_bt_config(struct iwl_priv *priv); 710extern void iwl_send_bt_config(struct iwl_priv *priv);
724extern int iwl_send_statistics_request(struct iwl_priv *priv, 711extern int iwl_send_statistics_request(struct iwl_priv *priv,
725 u8 flags, bool clear); 712 u8 flags, bool clear);
726extern int iwl_send_lq_cmd(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
727 struct iwl_link_quality_cmd *lq, u8 flags, bool init);
728void iwl_apm_stop(struct iwl_priv *priv); 713void iwl_apm_stop(struct iwl_priv *priv);
729int iwl_apm_init(struct iwl_priv *priv); 714int iwl_apm_init(struct iwl_priv *priv);
730 715
diff --git a/drivers/net/wireless/iwlwifi/iwl-csr.h b/drivers/net/wireless/iwlwifi/iwl-csr.h
index ecf98e7ac4ed..2aa15ab13892 100644
--- a/drivers/net/wireless/iwlwifi/iwl-csr.h
+++ b/drivers/net/wireless/iwlwifi/iwl-csr.h
@@ -371,7 +371,8 @@
371#define CSR_GP_DRIVER_REG_BIT_RADIO_SKU_3x3_HYB (0x00000000) 371#define CSR_GP_DRIVER_REG_BIT_RADIO_SKU_3x3_HYB (0x00000000)
372#define CSR_GP_DRIVER_REG_BIT_RADIO_SKU_2x2_HYB (0x00000001) 372#define CSR_GP_DRIVER_REG_BIT_RADIO_SKU_2x2_HYB (0x00000001)
373#define CSR_GP_DRIVER_REG_BIT_RADIO_SKU_2x2_IPA (0x00000002) 373#define CSR_GP_DRIVER_REG_BIT_RADIO_SKU_2x2_IPA (0x00000002)
374#define CSR_GP_DRIVER_REG_BIT_CALIB_VERSION6 (0x00000004) 374#define CSR_GP_DRIVER_REG_BIT_CALIB_VERSION6 (0x00000004)
375#define CSR_GP_DRIVER_REG_BIT_6050_1x2 (0x00000008)
375 376
376/* GIO Chicken Bits (PCI Express bus link power management) */ 377/* GIO Chicken Bits (PCI Express bus link power management) */
377#define CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX (0x00800000) 378#define CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX (0x00800000)
diff --git a/drivers/net/wireless/iwlwifi/iwl-debugfs.c b/drivers/net/wireless/iwlwifi/iwl-debugfs.c
index 0ee8f516c4ab..96d9085639e3 100644
--- a/drivers/net/wireless/iwlwifi/iwl-debugfs.c
+++ b/drivers/net/wireless/iwlwifi/iwl-debugfs.c
@@ -39,7 +39,6 @@
39#include "iwl-debug.h" 39#include "iwl-debug.h"
40#include "iwl-core.h" 40#include "iwl-core.h"
41#include "iwl-io.h" 41#include "iwl-io.h"
42#include "iwl-calib.h"
43 42
44/* create and remove of files */ 43/* create and remove of files */
45#define DEBUGFS_ADD_FILE(name, parent, mode) do { \ 44#define DEBUGFS_ADD_FILE(name, parent, mode) do { \
@@ -356,7 +355,7 @@ static ssize_t iwl_dbgfs_nvm_read(struct file *file,
356 const u8 *ptr; 355 const u8 *ptr;
357 char *buf; 356 char *buf;
358 u16 eeprom_ver; 357 u16 eeprom_ver;
359 size_t eeprom_len = priv->cfg->eeprom_size; 358 size_t eeprom_len = priv->cfg->base_params->eeprom_size;
360 buf_size = 4 * eeprom_len + 256; 359 buf_size = 4 * eeprom_len + 256;
361 360
362 if (eeprom_len % 16) { 361 if (eeprom_len % 16) {
@@ -575,10 +574,10 @@ static ssize_t iwl_dbgfs_interrupt_read(struct file *file,
575 priv->isr_stats.hw); 574 priv->isr_stats.hw);
576 pos += scnprintf(buf + pos, bufsz - pos, "SW Error:\t\t\t %u\n", 575 pos += scnprintf(buf + pos, bufsz - pos, "SW Error:\t\t\t %u\n",
577 priv->isr_stats.sw); 576 priv->isr_stats.sw);
578 if (priv->isr_stats.sw > 0) { 577 if (priv->isr_stats.sw || priv->isr_stats.hw) {
579 pos += scnprintf(buf + pos, bufsz - pos, 578 pos += scnprintf(buf + pos, bufsz - pos,
580 "\tLast Restarting Code: 0x%X\n", 579 "\tLast Restarting Code: 0x%X\n",
581 priv->isr_stats.sw_err); 580 priv->isr_stats.err_code);
582 } 581 }
583#ifdef CONFIG_IWLWIFI_DEBUG 582#ifdef CONFIG_IWLWIFI_DEBUG
584 pos += scnprintf(buf + pos, bufsz - pos, "Frame transmitted:\t\t %u\n", 583 pos += scnprintf(buf + pos, bufsz - pos, "Frame transmitted:\t\t %u\n",
@@ -872,7 +871,7 @@ static ssize_t iwl_dbgfs_traffic_log_read(struct file *file,
872 struct iwl_rx_queue *rxq = &priv->rxq; 871 struct iwl_rx_queue *rxq = &priv->rxq;
873 char *buf; 872 char *buf;
874 int bufsz = ((IWL_TRAFFIC_ENTRIES * IWL_TRAFFIC_ENTRY_SIZE * 64) * 2) + 873 int bufsz = ((IWL_TRAFFIC_ENTRIES * IWL_TRAFFIC_ENTRY_SIZE * 64) * 2) +
875 (priv->cfg->num_of_queues * 32 * 8) + 400; 874 (priv->cfg->base_params->num_of_queues * 32 * 8) + 400;
876 const u8 *ptr; 875 const u8 *ptr;
877 ssize_t ret; 876 ssize_t ret;
878 877
@@ -971,7 +970,8 @@ static ssize_t iwl_dbgfs_tx_queue_read(struct file *file,
971 int pos = 0; 970 int pos = 0;
972 int cnt; 971 int cnt;
973 int ret; 972 int ret;
974 const size_t bufsz = sizeof(char) * 64 * priv->cfg->num_of_queues; 973 const size_t bufsz = sizeof(char) * 64 *
974 priv->cfg->base_params->num_of_queues;
975 975
976 if (!priv->txq) { 976 if (!priv->txq) {
977 IWL_ERR(priv, "txq not ready\n"); 977 IWL_ERR(priv, "txq not ready\n");
@@ -1415,7 +1415,7 @@ static ssize_t iwl_dbgfs_plcp_delta_read(struct file *file,
1415 const size_t bufsz = sizeof(buf); 1415 const size_t bufsz = sizeof(buf);
1416 1416
1417 pos += scnprintf(buf + pos, bufsz - pos, "%u\n", 1417 pos += scnprintf(buf + pos, bufsz - pos, "%u\n",
1418 priv->cfg->plcp_delta_threshold); 1418 priv->cfg->base_params->plcp_delta_threshold);
1419 1419
1420 return simple_read_from_buffer(user_buf, count, ppos, buf, pos); 1420 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1421} 1421}
@@ -1437,10 +1437,10 @@ static ssize_t iwl_dbgfs_plcp_delta_write(struct file *file,
1437 return -EINVAL; 1437 return -EINVAL;
1438 if ((plcp < IWL_MAX_PLCP_ERR_THRESHOLD_MIN) || 1438 if ((plcp < IWL_MAX_PLCP_ERR_THRESHOLD_MIN) ||
1439 (plcp > IWL_MAX_PLCP_ERR_THRESHOLD_MAX)) 1439 (plcp > IWL_MAX_PLCP_ERR_THRESHOLD_MAX))
1440 priv->cfg->plcp_delta_threshold = 1440 priv->cfg->base_params->plcp_delta_threshold =
1441 IWL_MAX_PLCP_ERR_THRESHOLD_DISABLE; 1441 IWL_MAX_PLCP_ERR_THRESHOLD_DISABLE;
1442 else 1442 else
1443 priv->cfg->plcp_delta_threshold = plcp; 1443 priv->cfg->base_params->plcp_delta_threshold = plcp;
1444 return count; 1444 return count;
1445} 1445}
1446 1446
@@ -1550,13 +1550,14 @@ static ssize_t iwl_dbgfs_monitor_period_write(struct file *file,
1550 if (sscanf(buf, "%d", &period) != 1) 1550 if (sscanf(buf, "%d", &period) != 1)
1551 return -EINVAL; 1551 return -EINVAL;
1552 if (period < 0 || period > IWL_MAX_MONITORING_PERIOD) 1552 if (period < 0 || period > IWL_MAX_MONITORING_PERIOD)
1553 priv->cfg->monitor_recover_period = IWL_DEF_MONITORING_PERIOD; 1553 priv->cfg->base_params->monitor_recover_period =
1554 IWL_DEF_MONITORING_PERIOD;
1554 else 1555 else
1555 priv->cfg->monitor_recover_period = period; 1556 priv->cfg->base_params->monitor_recover_period = period;
1556 1557
1557 if (priv->cfg->monitor_recover_period) 1558 if (priv->cfg->base_params->monitor_recover_period)
1558 mod_timer(&priv->monitor_recover, jiffies + msecs_to_jiffies( 1559 mod_timer(&priv->monitor_recover, jiffies + msecs_to_jiffies(
1559 priv->cfg->monitor_recover_period)); 1560 priv->cfg->base_params->monitor_recover_period));
1560 else 1561 else
1561 del_timer_sync(&priv->monitor_recover); 1562 del_timer_sync(&priv->monitor_recover);
1562 return count; 1563 return count;
@@ -1604,6 +1605,64 @@ static ssize_t iwl_dbgfs_bt_traffic_read(struct file *file,
1604 return ret; 1605 return ret;
1605} 1606}
1606 1607
1608static ssize_t iwl_dbgfs_protection_mode_read(struct file *file,
1609 char __user *user_buf,
1610 size_t count, loff_t *ppos)
1611{
1612 struct iwl_priv *priv = (struct iwl_priv *)file->private_data;
1613
1614 int pos = 0;
1615 char buf[40];
1616 const size_t bufsz = sizeof(buf);
1617
1618 if (priv->cfg->ht_params)
1619 pos += scnprintf(buf + pos, bufsz - pos,
1620 "use %s for aggregation\n",
1621 (priv->cfg->ht_params->use_rts_for_aggregation) ?
1622 "rts/cts" : "cts-to-self");
1623 else
1624 pos += scnprintf(buf + pos, bufsz - pos, "N/A");
1625
1626 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1627}
1628
1629static ssize_t iwl_dbgfs_protection_mode_write(struct file *file,
1630 const char __user *user_buf,
1631 size_t count, loff_t *ppos) {
1632
1633 struct iwl_priv *priv = file->private_data;
1634 char buf[8];
1635 int buf_size;
1636 int rts;
1637
1638 if (!priv->cfg->ht_params)
1639 return -EINVAL;
1640
1641 memset(buf, 0, sizeof(buf));
1642 buf_size = min(count, sizeof(buf) - 1);
1643 if (copy_from_user(buf, user_buf, buf_size))
1644 return -EFAULT;
1645 if (sscanf(buf, "%d", &rts) != 1)
1646 return -EINVAL;
1647 if (rts)
1648 priv->cfg->ht_params->use_rts_for_aggregation = true;
1649 else
1650 priv->cfg->ht_params->use_rts_for_aggregation = false;
1651 return count;
1652}
1653
1654static ssize_t iwl_dbgfs_reply_tx_error_read(struct file *file,
1655 char __user *user_buf,
1656 size_t count, loff_t *ppos)
1657{
1658 struct iwl_priv *priv = file->private_data;
1659
1660 if (priv->cfg->ops->lib->debugfs_ops.reply_tx_error)
1661 return priv->cfg->ops->lib->debugfs_ops.reply_tx_error(
1662 file, user_buf, count, ppos);
1663 else
1664 return -ENODATA;
1665}
1607DEBUGFS_READ_FILE_OPS(rx_statistics); 1666DEBUGFS_READ_FILE_OPS(rx_statistics);
1608DEBUGFS_READ_FILE_OPS(tx_statistics); 1667DEBUGFS_READ_FILE_OPS(tx_statistics);
1609DEBUGFS_READ_WRITE_FILE_OPS(traffic_log); 1668DEBUGFS_READ_WRITE_FILE_OPS(traffic_log);
@@ -1629,6 +1688,8 @@ DEBUGFS_WRITE_FILE_OPS(txfifo_flush);
1629DEBUGFS_READ_FILE_OPS(ucode_bt_stats); 1688DEBUGFS_READ_FILE_OPS(ucode_bt_stats);
1630DEBUGFS_WRITE_FILE_OPS(monitor_period); 1689DEBUGFS_WRITE_FILE_OPS(monitor_period);
1631DEBUGFS_READ_FILE_OPS(bt_traffic); 1690DEBUGFS_READ_FILE_OPS(bt_traffic);
1691DEBUGFS_READ_WRITE_FILE_OPS(protection_mode);
1692DEBUGFS_READ_FILE_OPS(reply_tx_error);
1632 1693
1633/* 1694/*
1634 * Create the debugfs files and directories 1695 * Create the debugfs files and directories
@@ -1664,7 +1725,7 @@ int iwl_dbgfs_register(struct iwl_priv *priv, const char *name)
1664 DEBUGFS_ADD_FILE(interrupt, dir_data, S_IWUSR | S_IRUSR); 1725 DEBUGFS_ADD_FILE(interrupt, dir_data, S_IWUSR | S_IRUSR);
1665 DEBUGFS_ADD_FILE(qos, dir_data, S_IRUSR); 1726 DEBUGFS_ADD_FILE(qos, dir_data, S_IRUSR);
1666 DEBUGFS_ADD_FILE(led, dir_data, S_IRUSR); 1727 DEBUGFS_ADD_FILE(led, dir_data, S_IRUSR);
1667 if (!priv->cfg->broken_powersave) { 1728 if (!priv->cfg->base_params->broken_powersave) {
1668 DEBUGFS_ADD_FILE(sleep_level_override, dir_data, 1729 DEBUGFS_ADD_FILE(sleep_level_override, dir_data,
1669 S_IWUSR | S_IRUSR); 1730 S_IWUSR | S_IRUSR);
1670 DEBUGFS_ADD_FILE(current_sleep_command, dir_data, S_IRUSR); 1731 DEBUGFS_ADD_FILE(current_sleep_command, dir_data, S_IRUSR);
@@ -1689,27 +1750,29 @@ int iwl_dbgfs_register(struct iwl_priv *priv, const char *name)
1689 DEBUGFS_ADD_FILE(ucode_general_stats, dir_debug, S_IRUSR); 1750 DEBUGFS_ADD_FILE(ucode_general_stats, dir_debug, S_IRUSR);
1690 if (priv->cfg->ops->lib->dev_txfifo_flush) 1751 if (priv->cfg->ops->lib->dev_txfifo_flush)
1691 DEBUGFS_ADD_FILE(txfifo_flush, dir_debug, S_IWUSR); 1752 DEBUGFS_ADD_FILE(txfifo_flush, dir_debug, S_IWUSR);
1753 DEBUGFS_ADD_FILE(protection_mode, dir_debug, S_IWUSR | S_IRUSR);
1692 1754
1693 if (priv->cfg->sensitivity_calib_by_driver) 1755 if (priv->cfg->base_params->sensitivity_calib_by_driver)
1694 DEBUGFS_ADD_FILE(sensitivity, dir_debug, S_IRUSR); 1756 DEBUGFS_ADD_FILE(sensitivity, dir_debug, S_IRUSR);
1695 if (priv->cfg->chain_noise_calib_by_driver) 1757 if (priv->cfg->base_params->chain_noise_calib_by_driver)
1696 DEBUGFS_ADD_FILE(chain_noise, dir_debug, S_IRUSR); 1758 DEBUGFS_ADD_FILE(chain_noise, dir_debug, S_IRUSR);
1697 if (priv->cfg->ucode_tracing) 1759 if (priv->cfg->base_params->ucode_tracing)
1698 DEBUGFS_ADD_FILE(ucode_tracing, dir_debug, S_IWUSR | S_IRUSR); 1760 DEBUGFS_ADD_FILE(ucode_tracing, dir_debug, S_IWUSR | S_IRUSR);
1699 if (priv->cfg->bt_statistics) 1761 if (priv->cfg->bt_params && priv->cfg->bt_params->bt_statistics)
1700 DEBUGFS_ADD_FILE(ucode_bt_stats, dir_debug, S_IRUSR); 1762 DEBUGFS_ADD_FILE(ucode_bt_stats, dir_debug, S_IRUSR);
1763 DEBUGFS_ADD_FILE(reply_tx_error, dir_debug, S_IRUSR);
1701 DEBUGFS_ADD_FILE(rxon_flags, dir_debug, S_IWUSR); 1764 DEBUGFS_ADD_FILE(rxon_flags, dir_debug, S_IWUSR);
1702 DEBUGFS_ADD_FILE(rxon_filter_flags, dir_debug, S_IWUSR); 1765 DEBUGFS_ADD_FILE(rxon_filter_flags, dir_debug, S_IWUSR);
1703 DEBUGFS_ADD_FILE(monitor_period, dir_debug, S_IWUSR); 1766 DEBUGFS_ADD_FILE(monitor_period, dir_debug, S_IWUSR);
1704 if (priv->cfg->advanced_bt_coexist) 1767 if (priv->cfg->bt_params && priv->cfg->bt_params->advanced_bt_coexist)
1705 DEBUGFS_ADD_FILE(bt_traffic, dir_debug, S_IRUSR); 1768 DEBUGFS_ADD_FILE(bt_traffic, dir_debug, S_IRUSR);
1706 if (priv->cfg->sensitivity_calib_by_driver) 1769 if (priv->cfg->base_params->sensitivity_calib_by_driver)
1707 DEBUGFS_ADD_BOOL(disable_sensitivity, dir_rf, 1770 DEBUGFS_ADD_BOOL(disable_sensitivity, dir_rf,
1708 &priv->disable_sens_cal); 1771 &priv->disable_sens_cal);
1709 if (priv->cfg->chain_noise_calib_by_driver) 1772 if (priv->cfg->base_params->chain_noise_calib_by_driver)
1710 DEBUGFS_ADD_BOOL(disable_chain_noise, dir_rf, 1773 DEBUGFS_ADD_BOOL(disable_chain_noise, dir_rf,
1711 &priv->disable_chain_noise_cal); 1774 &priv->disable_chain_noise_cal);
1712 if (priv->cfg->tx_power_by_driver) 1775 if (priv->cfg->base_params->tx_power_by_driver)
1713 DEBUGFS_ADD_BOOL(disable_tx_power, dir_rf, 1776 DEBUGFS_ADD_BOOL(disable_tx_power, dir_rf,
1714 &priv->disable_tx_power_cal); 1777 &priv->disable_tx_power_cal);
1715 return 0; 1778 return 0;
diff --git a/drivers/net/wireless/iwlwifi/iwl-dev.h b/drivers/net/wireless/iwlwifi/iwl-dev.h
index 4dd38b7b8b74..70e07fa48405 100644
--- a/drivers/net/wireless/iwlwifi/iwl-dev.h
+++ b/drivers/net/wireless/iwlwifi/iwl-dev.h
@@ -282,13 +282,6 @@ struct iwl_channel_info {
282 */ 282 */
283#define IWL_IPAN_MCAST_QUEUE 8 283#define IWL_IPAN_MCAST_QUEUE 8
284 284
285/* Power management (not Tx power) structures */
286
287enum iwl_pwr_src {
288 IWL_PWR_SRC_VMAIN,
289 IWL_PWR_SRC_VAUX,
290};
291
292#define IEEE80211_DATA_LEN 2304 285#define IEEE80211_DATA_LEN 2304
293#define IEEE80211_4ADDR_LEN 30 286#define IEEE80211_4ADDR_LEN 30
294#define IEEE80211_HLEN (IEEE80211_4ADDR_LEN) 287#define IEEE80211_HLEN (IEEE80211_4ADDR_LEN)
@@ -684,6 +677,7 @@ struct iwl_sensitivity_ranges {
684 * @ct_kill_threshold: temperature threshold 677 * @ct_kill_threshold: temperature threshold
685 * @beacon_time_tsf_bits: number of valid tsf bits for beacon time 678 * @beacon_time_tsf_bits: number of valid tsf bits for beacon time
686 * @calib_init_cfg: setup initial calibrations for the hw 679 * @calib_init_cfg: setup initial calibrations for the hw
680 * @calib_rt_cfg: setup runtime calibrations for the hw
687 * @struct iwl_sensitivity_ranges: range of sensitivity values 681 * @struct iwl_sensitivity_ranges: range of sensitivity values
688 */ 682 */
689struct iwl_hw_params { 683struct iwl_hw_params {
@@ -710,6 +704,7 @@ struct iwl_hw_params {
710 /* for 1000, 6000 series and up */ 704 /* for 1000, 6000 series and up */
711 u16 beacon_time_tsf_bits; 705 u16 beacon_time_tsf_bits;
712 u32 calib_init_cfg; 706 u32 calib_init_cfg;
707 u32 calib_rt_cfg;
713 const struct iwl_sensitivity_ranges *sens; 708 const struct iwl_sensitivity_ranges *sens;
714}; 709};
715 710
@@ -730,7 +725,6 @@ struct iwl_hw_params {
730 * 725 *
731 ****************************************************************************/ 726 ****************************************************************************/
732extern void iwl_update_chain_flags(struct iwl_priv *priv); 727extern void iwl_update_chain_flags(struct iwl_priv *priv);
733extern int iwl_set_pwr_src(struct iwl_priv *priv, enum iwl_pwr_src src);
734extern const u8 iwl_bcast_addr[ETH_ALEN]; 728extern const u8 iwl_bcast_addr[ETH_ALEN];
735extern int iwl_rxq_stop(struct iwl_priv *priv); 729extern int iwl_rxq_stop(struct iwl_priv *priv);
736extern void iwl_txq_ctx_stop(struct iwl_priv *priv); 730extern void iwl_txq_ctx_stop(struct iwl_priv *priv);
@@ -841,6 +835,7 @@ enum iwl_calib {
841 IWL_CALIB_TX_IQ, 835 IWL_CALIB_TX_IQ,
842 IWL_CALIB_TX_IQ_PERD, 836 IWL_CALIB_TX_IQ_PERD,
843 IWL_CALIB_BASE_BAND, 837 IWL_CALIB_BASE_BAND,
838 IWL_CALIB_TEMP_OFFSET,
844 IWL_CALIB_MAX 839 IWL_CALIB_MAX
845}; 840};
846 841
@@ -945,7 +940,7 @@ enum iwl_pa_type {
945struct isr_statistics { 940struct isr_statistics {
946 u32 hw; 941 u32 hw;
947 u32 sw; 942 u32 sw;
948 u32 sw_err; 943 u32 err_code;
949 u32 sch; 944 u32 sch;
950 u32 alive; 945 u32 alive;
951 u32 rfkill; 946 u32 rfkill;
@@ -957,6 +952,50 @@ struct isr_statistics {
957 u32 unhandled; 952 u32 unhandled;
958}; 953};
959 954
955/* reply_tx_statistics (for _agn devices) */
956struct reply_tx_error_statistics {
957 u32 pp_delay;
958 u32 pp_few_bytes;
959 u32 pp_bt_prio;
960 u32 pp_quiet_period;
961 u32 pp_calc_ttak;
962 u32 int_crossed_retry;
963 u32 short_limit;
964 u32 long_limit;
965 u32 fifo_underrun;
966 u32 drain_flow;
967 u32 rfkill_flush;
968 u32 life_expire;
969 u32 dest_ps;
970 u32 host_abort;
971 u32 bt_retry;
972 u32 sta_invalid;
973 u32 frag_drop;
974 u32 tid_disable;
975 u32 fifo_flush;
976 u32 insuff_cf_poll;
977 u32 fail_hw_drop;
978 u32 sta_color_mismatch;
979 u32 unknown;
980};
981
982/* reply_agg_tx_statistics (for _agn devices) */
983struct reply_agg_tx_error_statistics {
984 u32 underrun;
985 u32 bt_prio;
986 u32 few_bytes;
987 u32 abort;
988 u32 last_sent_ttl;
989 u32 last_sent_try;
990 u32 last_sent_bt_kill;
991 u32 scd_query;
992 u32 bad_crc32;
993 u32 response;
994 u32 dump_tx;
995 u32 delay_tx;
996 u32 unknown;
997};
998
960#ifdef CONFIG_IWLWIFI_DEBUGFS 999#ifdef CONFIG_IWLWIFI_DEBUGFS
961/* management statistics */ 1000/* management statistics */
962enum iwl_mgmt_stats { 1001enum iwl_mgmt_stats {
@@ -1116,6 +1155,13 @@ struct iwl_rxon_context {
1116 const u8 *ac_to_queue; 1155 const u8 *ac_to_queue;
1117 u8 mcast_queue; 1156 u8 mcast_queue;
1118 1157
1158 /*
1159 * We could use the vif to indicate active, but we
1160 * also need it to be active during disabling when
1161 * we already removed the vif for type setting.
1162 */
1163 bool always_active, is_active;
1164
1119 enum iwl_rxon_context_id ctxid; 1165 enum iwl_rxon_context_id ctxid;
1120 1166
1121 u32 interface_modes, exclusive_interface_modes; 1167 u32 interface_modes, exclusive_interface_modes;
@@ -1337,8 +1383,6 @@ struct iwl_priv {
1337 1383
1338 enum nl80211_iftype iw_mode; 1384 enum nl80211_iftype iw_mode;
1339 1385
1340 struct sk_buff *ibss_beacon;
1341
1342 /* Last Rx'd beacon timestamp */ 1386 /* Last Rx'd beacon timestamp */
1343 u64 timestamp; 1387 u64 timestamp;
1344 1388
@@ -1408,6 +1452,9 @@ struct iwl_priv {
1408 1452
1409 struct iwl_notif_statistics statistics; 1453 struct iwl_notif_statistics statistics;
1410 struct iwl_bt_notif_statistics statistics_bt; 1454 struct iwl_bt_notif_statistics statistics_bt;
1455 /* counts reply_tx error */
1456 struct reply_tx_error_statistics reply_tx_stats;
1457 struct reply_agg_tx_error_statistics reply_agg_tx_stats;
1411#ifdef CONFIG_IWLWIFI_DEBUGFS 1458#ifdef CONFIG_IWLWIFI_DEBUGFS
1412 struct iwl_notif_statistics accum_statistics; 1459 struct iwl_notif_statistics accum_statistics;
1413 struct iwl_notif_statistics delta_statistics; 1460 struct iwl_notif_statistics delta_statistics;
@@ -1447,8 +1494,10 @@ struct iwl_priv {
1447 struct work_struct scan_completed; 1494 struct work_struct scan_completed;
1448 struct work_struct rx_replenish; 1495 struct work_struct rx_replenish;
1449 struct work_struct abort_scan; 1496 struct work_struct abort_scan;
1497
1450 struct work_struct beacon_update; 1498 struct work_struct beacon_update;
1451 struct iwl_rxon_context *beacon_ctx; 1499 struct iwl_rxon_context *beacon_ctx;
1500 struct sk_buff *beacon_skb;
1452 1501
1453 struct work_struct tt_work; 1502 struct work_struct tt_work;
1454 struct work_struct ct_enter; 1503 struct work_struct ct_enter;
@@ -1510,7 +1559,6 @@ static inline void iwl_txq_ctx_deactivate(struct iwl_priv *priv, int txq_id)
1510} 1559}
1511 1560
1512#ifdef CONFIG_IWLWIFI_DEBUG 1561#ifdef CONFIG_IWLWIFI_DEBUG
1513const char *iwl_get_tx_fail_reason(u32 status);
1514/* 1562/*
1515 * iwl_get_debug_level: Return active debug level for device 1563 * iwl_get_debug_level: Return active debug level for device
1516 * 1564 *
@@ -1526,8 +1574,6 @@ static inline u32 iwl_get_debug_level(struct iwl_priv *priv)
1526 return iwl_debug_level; 1574 return iwl_debug_level;
1527} 1575}
1528#else 1576#else
1529static inline const char *iwl_get_tx_fail_reason(u32 status) { return ""; }
1530
1531static inline u32 iwl_get_debug_level(struct iwl_priv *priv) 1577static inline u32 iwl_get_debug_level(struct iwl_priv *priv)
1532{ 1578{
1533 return iwl_debug_level; 1579 return iwl_debug_level;
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom.c b/drivers/net/wireless/iwlwifi/iwl-eeprom.c
index a45d02e555cf..87cd10ff285d 100644
--- a/drivers/net/wireless/iwlwifi/iwl-eeprom.c
+++ b/drivers/net/wireless/iwlwifi/iwl-eeprom.c
@@ -136,85 +136,13 @@ static const u8 iwl_eeprom_band_7[] = { /* 5.2 ht40 channel */
136 36, 44, 52, 60, 100, 108, 116, 124, 132, 149, 157 136 36, 44, 52, 60, 100, 108, 116, 124, 132, 149, 157
137}; 137};
138 138
139/**
140 * struct iwl_txpwr_section: eeprom section information
141 * @offset: indirect address into eeprom image
142 * @count: number of "struct iwl_eeprom_enhanced_txpwr" in this section
143 * @band: band type for the section
144 * @is_common - true: common section, false: channel section
145 * @is_cck - true: cck section, false: not cck section
146 * @is_ht_40 - true: all channel in the section are HT40 channel,
147 * false: legacy or HT 20 MHz
148 * ignore if it is common section
149 * @iwl_eeprom_section_channel: channel array in the section,
150 * ignore if common section
151 */
152struct iwl_txpwr_section {
153 u32 offset;
154 u8 count;
155 enum ieee80211_band band;
156 bool is_common;
157 bool is_cck;
158 bool is_ht40;
159 u8 iwl_eeprom_section_channel[EEPROM_MAX_TXPOWER_SECTION_ELEMENTS];
160};
161
162/**
163 * section 1 - 3 are regulatory tx power apply to all channels based on
164 * modulation: CCK, OFDM
165 * Band: 2.4GHz, 5.2GHz
166 * section 4 - 10 are regulatory tx power apply to specified channels
167 * For example:
168 * 1L - Channel 1 Legacy
169 * 1HT - Channel 1 HT
170 * (1,+1) - Channel 1 HT40 "_above_"
171 *
172 * Section 1: all CCK channels
173 * Section 2: all 2.4 GHz OFDM (Legacy, HT and HT40) channels
174 * Section 3: all 5.2 GHz OFDM (Legacy, HT and HT40) channels
175 * Section 4: 2.4 GHz 20MHz channels: 1L, 1HT, 2L, 2HT, 10L, 10HT, 11L, 11HT
176 * Section 5: 2.4 GHz 40MHz channels: (1,+1) (2,+1) (6,+1) (7,+1) (9,+1)
177 * Section 6: 5.2 GHz 20MHz channels: 36L, 64L, 100L, 36HT, 64HT, 100HT
178 * Section 7: 5.2 GHz 40MHz channels: (36,+1) (60,+1) (100,+1)
179 * Section 8: 2.4 GHz channel: 13L, 13HT
180 * Section 9: 2.4 GHz channel: 140L, 140HT
181 * Section 10: 2.4 GHz 40MHz channels: (132,+1) (44,+1)
182 *
183 */
184static const struct iwl_txpwr_section enhinfo[] = {
185 { EEPROM_LB_CCK_20_COMMON, 1, IEEE80211_BAND_2GHZ, true, true, false },
186 { EEPROM_LB_OFDM_COMMON, 3, IEEE80211_BAND_2GHZ, true, false, false },
187 { EEPROM_HB_OFDM_COMMON, 3, IEEE80211_BAND_5GHZ, true, false, false },
188 { EEPROM_LB_OFDM_20_BAND, 8, IEEE80211_BAND_2GHZ,
189 false, false, false,
190 {1, 1, 2, 2, 10, 10, 11, 11 } },
191 { EEPROM_LB_OFDM_HT40_BAND, 5, IEEE80211_BAND_2GHZ,
192 false, false, true,
193 { 1, 2, 6, 7, 9 } },
194 { EEPROM_HB_OFDM_20_BAND, 6, IEEE80211_BAND_5GHZ,
195 false, false, false,
196 { 36, 64, 100, 36, 64, 100 } },
197 { EEPROM_HB_OFDM_HT40_BAND, 3, IEEE80211_BAND_5GHZ,
198 false, false, true,
199 { 36, 60, 100 } },
200 { EEPROM_LB_OFDM_20_CHANNEL_13, 2, IEEE80211_BAND_2GHZ,
201 false, false, false,
202 { 13, 13 } },
203 { EEPROM_HB_OFDM_20_CHANNEL_140, 2, IEEE80211_BAND_5GHZ,
204 false, false, false,
205 { 140, 140 } },
206 { EEPROM_HB_OFDM_HT40_BAND_1, 2, IEEE80211_BAND_5GHZ,
207 false, false, true,
208 { 132, 44 } },
209};
210
211/****************************************************************************** 139/******************************************************************************
212 * 140 *
213 * EEPROM related functions 141 * EEPROM related functions
214 * 142 *
215******************************************************************************/ 143******************************************************************************/
216 144
217int iwlcore_eeprom_verify_signature(struct iwl_priv *priv) 145static int iwl_eeprom_verify_signature(struct iwl_priv *priv)
218{ 146{
219 u32 gp = iwl_read32(priv, CSR_EEPROM_GP) & CSR_EEPROM_GP_VALID_MSK; 147 u32 gp = iwl_read32(priv, CSR_EEPROM_GP) & CSR_EEPROM_GP_VALID_MSK;
220 int ret = 0; 148 int ret = 0;
@@ -246,7 +174,6 @@ int iwlcore_eeprom_verify_signature(struct iwl_priv *priv)
246 } 174 }
247 return ret; 175 return ret;
248} 176}
249EXPORT_SYMBOL(iwlcore_eeprom_verify_signature);
250 177
251static void iwl_set_otp_access(struct iwl_priv *priv, enum iwl_access_mode mode) 178static void iwl_set_otp_access(struct iwl_priv *priv, enum iwl_access_mode mode)
252{ 179{
@@ -290,49 +217,9 @@ static int iwlcore_get_nvm_type(struct iwl_priv *priv)
290 return nvm_type; 217 return nvm_type;
291} 218}
292 219
293/*
294 * The device's EEPROM semaphore prevents conflicts between driver and uCode
295 * when accessing the EEPROM; each access is a series of pulses to/from the
296 * EEPROM chip, not a single event, so even reads could conflict if they
297 * weren't arbitrated by the semaphore.
298 */
299int iwlcore_eeprom_acquire_semaphore(struct iwl_priv *priv)
300{
301 u16 count;
302 int ret;
303
304 for (count = 0; count < EEPROM_SEM_RETRY_LIMIT; count++) {
305 /* Request semaphore */
306 iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG,
307 CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM);
308
309 /* See if we got it */
310 ret = iwl_poll_bit(priv, CSR_HW_IF_CONFIG_REG,
311 CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM,
312 CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM,
313 EEPROM_SEM_TIMEOUT);
314 if (ret >= 0) {
315 IWL_DEBUG_IO(priv, "Acquired semaphore after %d tries.\n",
316 count+1);
317 return ret;
318 }
319 }
320
321 return ret;
322}
323EXPORT_SYMBOL(iwlcore_eeprom_acquire_semaphore);
324
325void iwlcore_eeprom_release_semaphore(struct iwl_priv *priv)
326{
327 iwl_clear_bit(priv, CSR_HW_IF_CONFIG_REG,
328 CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM);
329
330}
331EXPORT_SYMBOL(iwlcore_eeprom_release_semaphore);
332
333const u8 *iwlcore_eeprom_query_addr(const struct iwl_priv *priv, size_t offset) 220const u8 *iwlcore_eeprom_query_addr(const struct iwl_priv *priv, size_t offset)
334{ 221{
335 BUG_ON(offset >= priv->cfg->eeprom_size); 222 BUG_ON(offset >= priv->cfg->base_params->eeprom_size);
336 return &priv->eeprom[offset]; 223 return &priv->eeprom[offset];
337} 224}
338EXPORT_SYMBOL(iwlcore_eeprom_query_addr); 225EXPORT_SYMBOL(iwlcore_eeprom_query_addr);
@@ -364,7 +251,7 @@ static int iwl_init_otp_access(struct iwl_priv *priv)
364 * CSR auto clock gate disable bit - 251 * CSR auto clock gate disable bit -
365 * this is only applicable for HW with OTP shadow RAM 252 * this is only applicable for HW with OTP shadow RAM
366 */ 253 */
367 if (priv->cfg->shadow_ram_support) 254 if (priv->cfg->base_params->shadow_ram_support)
368 iwl_set_bit(priv, CSR_DBG_LINK_PWR_MGMT_REG, 255 iwl_set_bit(priv, CSR_DBG_LINK_PWR_MGMT_REG,
369 CSR_RESET_LINK_PWR_MGMT_DISABLED); 256 CSR_RESET_LINK_PWR_MGMT_DISABLED);
370 } 257 }
@@ -484,13 +371,27 @@ static int iwl_find_otp_image(struct iwl_priv *priv,
484 } 371 }
485 /* more in the link list, continue */ 372 /* more in the link list, continue */
486 usedblocks++; 373 usedblocks++;
487 } while (usedblocks <= priv->cfg->max_ll_items); 374 } while (usedblocks <= priv->cfg->base_params->max_ll_items);
488 375
489 /* OTP has no valid blocks */ 376 /* OTP has no valid blocks */
490 IWL_DEBUG_INFO(priv, "OTP has no valid blocks\n"); 377 IWL_DEBUG_INFO(priv, "OTP has no valid blocks\n");
491 return -EINVAL; 378 return -EINVAL;
492} 379}
493 380
381const u8 *iwl_eeprom_query_addr(const struct iwl_priv *priv, size_t offset)
382{
383 return priv->cfg->ops->lib->eeprom_ops.query_addr(priv, offset);
384}
385EXPORT_SYMBOL(iwl_eeprom_query_addr);
386
387u16 iwl_eeprom_query16(const struct iwl_priv *priv, size_t offset)
388{
389 if (!priv->eeprom)
390 return 0;
391 return (u16)priv->eeprom[offset] | ((u16)priv->eeprom[offset + 1] << 8);
392}
393EXPORT_SYMBOL(iwl_eeprom_query16);
394
494/** 395/**
495 * iwl_eeprom_init - read EEPROM contents 396 * iwl_eeprom_init - read EEPROM contents
496 * 397 *
@@ -512,8 +413,8 @@ int iwl_eeprom_init(struct iwl_priv *priv)
512 if (priv->nvm_device_type == -ENOENT) 413 if (priv->nvm_device_type == -ENOENT)
513 return -ENOENT; 414 return -ENOENT;
514 /* allocate eeprom */ 415 /* allocate eeprom */
515 IWL_DEBUG_INFO(priv, "NVM size = %d\n", priv->cfg->eeprom_size); 416 sz = priv->cfg->base_params->eeprom_size;
516 sz = priv->cfg->eeprom_size; 417 IWL_DEBUG_INFO(priv, "NVM size = %d\n", sz);
517 priv->eeprom = kzalloc(sz, GFP_KERNEL); 418 priv->eeprom = kzalloc(sz, GFP_KERNEL);
518 if (!priv->eeprom) { 419 if (!priv->eeprom) {
519 ret = -ENOMEM; 420 ret = -ENOMEM;
@@ -523,7 +424,7 @@ int iwl_eeprom_init(struct iwl_priv *priv)
523 424
524 priv->cfg->ops->lib->apm_ops.init(priv); 425 priv->cfg->ops->lib->apm_ops.init(priv);
525 426
526 ret = priv->cfg->ops->lib->eeprom_ops.verify_signature(priv); 427 ret = iwl_eeprom_verify_signature(priv);
527 if (ret < 0) { 428 if (ret < 0) {
528 IWL_ERR(priv, "EEPROM not found, EEPROM_GP=0x%08x\n", gp); 429 IWL_ERR(priv, "EEPROM not found, EEPROM_GP=0x%08x\n", gp);
529 ret = -ENOENT; 430 ret = -ENOENT;
@@ -554,7 +455,7 @@ int iwl_eeprom_init(struct iwl_priv *priv)
554 CSR_OTP_GP_REG_ECC_CORR_STATUS_MSK | 455 CSR_OTP_GP_REG_ECC_CORR_STATUS_MSK |
555 CSR_OTP_GP_REG_ECC_UNCORR_STATUS_MSK); 456 CSR_OTP_GP_REG_ECC_UNCORR_STATUS_MSK);
556 /* traversing the linked list if no shadow ram supported */ 457 /* traversing the linked list if no shadow ram supported */
557 if (!priv->cfg->shadow_ram_support) { 458 if (!priv->cfg->base_params->shadow_ram_support) {
558 if (iwl_find_otp_image(priv, &validblockaddr)) { 459 if (iwl_find_otp_image(priv, &validblockaddr)) {
559 ret = -ENOENT; 460 ret = -ENOENT;
560 goto done; 461 goto done;
@@ -604,7 +505,7 @@ err:
604 if (ret) 505 if (ret)
605 iwl_eeprom_free(priv); 506 iwl_eeprom_free(priv);
606 /* Reset chip to save power until we load uCode during "up". */ 507 /* Reset chip to save power until we load uCode during "up". */
607 priv->cfg->ops->lib->apm_ops.stop(priv); 508 iwl_apm_stop(priv);
608alloc_err: 509alloc_err:
609 return ret; 510 return ret;
610} 511}
@@ -617,53 +518,6 @@ void iwl_eeprom_free(struct iwl_priv *priv)
617} 518}
618EXPORT_SYMBOL(iwl_eeprom_free); 519EXPORT_SYMBOL(iwl_eeprom_free);
619 520
620int iwl_eeprom_check_version(struct iwl_priv *priv)
621{
622 u16 eeprom_ver;
623 u16 calib_ver;
624
625 eeprom_ver = iwl_eeprom_query16(priv, EEPROM_VERSION);
626 calib_ver = priv->cfg->ops->lib->eeprom_ops.calib_version(priv);
627
628 if (eeprom_ver < priv->cfg->eeprom_ver ||
629 calib_ver < priv->cfg->eeprom_calib_ver)
630 goto err;
631
632 IWL_INFO(priv, "device EEPROM VER=0x%x, CALIB=0x%x\n",
633 eeprom_ver, calib_ver);
634
635 return 0;
636err:
637 IWL_ERR(priv, "Unsupported (too old) EEPROM VER=0x%x < 0x%x CALIB=0x%x < 0x%x\n",
638 eeprom_ver, priv->cfg->eeprom_ver,
639 calib_ver, priv->cfg->eeprom_calib_ver);
640 return -EINVAL;
641
642}
643EXPORT_SYMBOL(iwl_eeprom_check_version);
644
645const u8 *iwl_eeprom_query_addr(const struct iwl_priv *priv, size_t offset)
646{
647 return priv->cfg->ops->lib->eeprom_ops.query_addr(priv, offset);
648}
649EXPORT_SYMBOL(iwl_eeprom_query_addr);
650
651u16 iwl_eeprom_query16(const struct iwl_priv *priv, size_t offset)
652{
653 if (!priv->eeprom)
654 return 0;
655 return (u16)priv->eeprom[offset] | ((u16)priv->eeprom[offset + 1] << 8);
656}
657EXPORT_SYMBOL(iwl_eeprom_query16);
658
659void iwl_eeprom_get_mac(const struct iwl_priv *priv, u8 *mac)
660{
661 const u8 *addr = priv->cfg->ops->lib->eeprom_ops.query_addr(priv,
662 EEPROM_MAC_ADDRESS);
663 memcpy(mac, addr, ETH_ALEN);
664}
665EXPORT_SYMBOL(iwl_eeprom_get_mac);
666
667static void iwl_init_band_reference(const struct iwl_priv *priv, 521static void iwl_init_band_reference(const struct iwl_priv *priv,
668 int eep_band, int *eeprom_ch_count, 522 int eep_band, int *eeprom_ch_count,
669 const struct iwl_eeprom_channel **eeprom_ch_info, 523 const struct iwl_eeprom_channel **eeprom_ch_info,
@@ -722,7 +576,6 @@ static void iwl_init_band_reference(const struct iwl_priv *priv,
722 576
723#define CHECK_AND_PRINT(x) ((eeprom_ch->flags & EEPROM_CHANNEL_##x) \ 577#define CHECK_AND_PRINT(x) ((eeprom_ch->flags & EEPROM_CHANNEL_##x) \
724 ? # x " " : "") 578 ? # x " " : "")
725
726/** 579/**
727 * iwl_mod_ht40_chan_info - Copy ht40 channel info into driver's priv. 580 * iwl_mod_ht40_chan_info - Copy ht40 channel info into driver's priv.
728 * 581 *
@@ -766,205 +619,6 @@ static int iwl_mod_ht40_chan_info(struct iwl_priv *priv,
766 return 0; 619 return 0;
767} 620}
768 621
769/**
770 * iwl_get_max_txpower_avg - get the highest tx power from all chains.
771 * find the highest tx power from all chains for the channel
772 */
773static s8 iwl_get_max_txpower_avg(struct iwl_priv *priv,
774 struct iwl_eeprom_enhanced_txpwr *enhanced_txpower,
775 int element, s8 *max_txpower_in_half_dbm)
776{
777 s8 max_txpower_avg = 0; /* (dBm) */
778
779 IWL_DEBUG_INFO(priv, "%d - "
780 "chain_a: %d dB chain_b: %d dB "
781 "chain_c: %d dB mimo2: %d dB mimo3: %d dB\n",
782 element,
783 enhanced_txpower[element].chain_a_max >> 1,
784 enhanced_txpower[element].chain_b_max >> 1,
785 enhanced_txpower[element].chain_c_max >> 1,
786 enhanced_txpower[element].mimo2_max >> 1,
787 enhanced_txpower[element].mimo3_max >> 1);
788 /* Take the highest tx power from any valid chains */
789 if ((priv->cfg->valid_tx_ant & ANT_A) &&
790 (enhanced_txpower[element].chain_a_max > max_txpower_avg))
791 max_txpower_avg = enhanced_txpower[element].chain_a_max;
792 if ((priv->cfg->valid_tx_ant & ANT_B) &&
793 (enhanced_txpower[element].chain_b_max > max_txpower_avg))
794 max_txpower_avg = enhanced_txpower[element].chain_b_max;
795 if ((priv->cfg->valid_tx_ant & ANT_C) &&
796 (enhanced_txpower[element].chain_c_max > max_txpower_avg))
797 max_txpower_avg = enhanced_txpower[element].chain_c_max;
798 if (((priv->cfg->valid_tx_ant == ANT_AB) |
799 (priv->cfg->valid_tx_ant == ANT_BC) |
800 (priv->cfg->valid_tx_ant == ANT_AC)) &&
801 (enhanced_txpower[element].mimo2_max > max_txpower_avg))
802 max_txpower_avg = enhanced_txpower[element].mimo2_max;
803 if ((priv->cfg->valid_tx_ant == ANT_ABC) &&
804 (enhanced_txpower[element].mimo3_max > max_txpower_avg))
805 max_txpower_avg = enhanced_txpower[element].mimo3_max;
806
807 /*
808 * max. tx power in EEPROM is in 1/2 dBm format
809 * convert from 1/2 dBm to dBm (round-up convert)
810 * but we also do not want to loss 1/2 dBm resolution which
811 * will impact performance
812 */
813 *max_txpower_in_half_dbm = max_txpower_avg;
814 return (max_txpower_avg & 0x01) + (max_txpower_avg >> 1);
815}
816
817/**
818 * iwl_update_common_txpower: update channel tx power
819 * update tx power per band based on EEPROM enhanced tx power info.
820 */
821static s8 iwl_update_common_txpower(struct iwl_priv *priv,
822 struct iwl_eeprom_enhanced_txpwr *enhanced_txpower,
823 int section, int element, s8 *max_txpower_in_half_dbm)
824{
825 struct iwl_channel_info *ch_info;
826 int ch;
827 bool is_ht40 = false;
828 s8 max_txpower_avg; /* (dBm) */
829
830 /* it is common section, contain all type (Legacy, HT and HT40)
831 * based on the element in the section to determine
832 * is it HT 40 or not
833 */
834 if (element == EEPROM_TXPOWER_COMMON_HT40_INDEX)
835 is_ht40 = true;
836 max_txpower_avg =
837 iwl_get_max_txpower_avg(priv, enhanced_txpower,
838 element, max_txpower_in_half_dbm);
839
840 ch_info = priv->channel_info;
841
842 for (ch = 0; ch < priv->channel_count; ch++) {
843 /* find matching band and update tx power if needed */
844 if ((ch_info->band == enhinfo[section].band) &&
845 (ch_info->max_power_avg < max_txpower_avg) &&
846 (!is_ht40)) {
847 /* Update regulatory-based run-time data */
848 ch_info->max_power_avg = ch_info->curr_txpow =
849 max_txpower_avg;
850 ch_info->scan_power = max_txpower_avg;
851 }
852 if ((ch_info->band == enhinfo[section].band) && is_ht40 &&
853 (ch_info->ht40_max_power_avg < max_txpower_avg)) {
854 /* Update regulatory-based run-time data */
855 ch_info->ht40_max_power_avg = max_txpower_avg;
856 }
857 ch_info++;
858 }
859 return max_txpower_avg;
860}
861
862/**
863 * iwl_update_channel_txpower: update channel tx power
864 * update channel tx power based on EEPROM enhanced tx power info.
865 */
866static s8 iwl_update_channel_txpower(struct iwl_priv *priv,
867 struct iwl_eeprom_enhanced_txpwr *enhanced_txpower,
868 int section, int element, s8 *max_txpower_in_half_dbm)
869{
870 struct iwl_channel_info *ch_info;
871 int ch;
872 u8 channel;
873 s8 max_txpower_avg; /* (dBm) */
874
875 channel = enhinfo[section].iwl_eeprom_section_channel[element];
876 max_txpower_avg =
877 iwl_get_max_txpower_avg(priv, enhanced_txpower,
878 element, max_txpower_in_half_dbm);
879
880 ch_info = priv->channel_info;
881 for (ch = 0; ch < priv->channel_count; ch++) {
882 /* find matching channel and update tx power if needed */
883 if (ch_info->channel == channel) {
884 if ((ch_info->max_power_avg < max_txpower_avg) &&
885 (!enhinfo[section].is_ht40)) {
886 /* Update regulatory-based run-time data */
887 ch_info->max_power_avg = max_txpower_avg;
888 ch_info->curr_txpow = max_txpower_avg;
889 ch_info->scan_power = max_txpower_avg;
890 }
891 if ((enhinfo[section].is_ht40) &&
892 (ch_info->ht40_max_power_avg < max_txpower_avg)) {
893 /* Update regulatory-based run-time data */
894 ch_info->ht40_max_power_avg = max_txpower_avg;
895 }
896 break;
897 }
898 ch_info++;
899 }
900 return max_txpower_avg;
901}
902
903/**
904 * iwlcore_eeprom_enhanced_txpower: process enhanced tx power info
905 */
906void iwlcore_eeprom_enhanced_txpower(struct iwl_priv *priv)
907{
908 int eeprom_section_count = 0;
909 int section, element;
910 struct iwl_eeprom_enhanced_txpwr *enhanced_txpower;
911 u32 offset;
912 s8 max_txpower_avg; /* (dBm) */
913 s8 max_txpower_in_half_dbm; /* (half-dBm) */
914
915 /* Loop through all the sections
916 * adjust bands and channel's max tx power
917 * Set the tx_power_user_lmt to the highest power
918 * supported by any channels and chains
919 */
920 for (section = 0; section < ARRAY_SIZE(enhinfo); section++) {
921 eeprom_section_count = enhinfo[section].count;
922 offset = enhinfo[section].offset;
923 enhanced_txpower = (struct iwl_eeprom_enhanced_txpwr *)
924 iwl_eeprom_query_addr(priv, offset);
925
926 /*
927 * check for valid entry -
928 * different version of EEPROM might contain different set
929 * of enhanced tx power table
930 * always check for valid entry before process
931 * the information
932 */
933 if (!enhanced_txpower->common || enhanced_txpower->reserved)
934 continue;
935
936 for (element = 0; element < eeprom_section_count; element++) {
937 if (enhinfo[section].is_common)
938 max_txpower_avg =
939 iwl_update_common_txpower(priv,
940 enhanced_txpower, section,
941 element,
942 &max_txpower_in_half_dbm);
943 else
944 max_txpower_avg =
945 iwl_update_channel_txpower(priv,
946 enhanced_txpower, section,
947 element,
948 &max_txpower_in_half_dbm);
949
950 /* Update the tx_power_user_lmt to the highest power
951 * supported by any channel */
952 if (max_txpower_avg > priv->tx_power_user_lmt)
953 priv->tx_power_user_lmt = max_txpower_avg;
954
955 /*
956 * Update the tx_power_lmt_in_half_dbm to
957 * the highest power supported by any channel
958 */
959 if (max_txpower_in_half_dbm >
960 priv->tx_power_lmt_in_half_dbm)
961 priv->tx_power_lmt_in_half_dbm =
962 max_txpower_in_half_dbm;
963 }
964 }
965}
966EXPORT_SYMBOL(iwlcore_eeprom_enhanced_txpower);
967
968#define CHECK_AND_PRINT_I(x) ((eeprom_ch_info[ch].flags & EEPROM_CHANNEL_##x) \ 622#define CHECK_AND_PRINT_I(x) ((eeprom_ch_info[ch].flags & EEPROM_CHANNEL_##x) \
969 ? # x " " : "") 623 ? # x " " : "")
970 624
@@ -1162,4 +816,3 @@ const struct iwl_channel_info *iwl_get_channel_info(const struct iwl_priv *priv,
1162 return NULL; 816 return NULL;
1163} 817}
1164EXPORT_SYMBOL(iwl_get_channel_info); 818EXPORT_SYMBOL(iwl_get_channel_info);
1165
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom.h b/drivers/net/wireless/iwlwifi/iwl-eeprom.h
index a4772aff51fe..d9b590625ae4 100644
--- a/drivers/net/wireless/iwlwifi/iwl-eeprom.h
+++ b/drivers/net/wireless/iwlwifi/iwl-eeprom.h
@@ -493,7 +493,6 @@ struct iwl_eeprom_calib_info {
493 493
494struct iwl_eeprom_ops { 494struct iwl_eeprom_ops {
495 const u32 regulatory_bands[7]; 495 const u32 regulatory_bands[7];
496 int (*verify_signature) (struct iwl_priv *priv);
497 int (*acquire_semaphore) (struct iwl_priv *priv); 496 int (*acquire_semaphore) (struct iwl_priv *priv);
498 void (*release_semaphore) (struct iwl_priv *priv); 497 void (*release_semaphore) (struct iwl_priv *priv);
499 u16 (*calib_version) (struct iwl_priv *priv); 498 u16 (*calib_version) (struct iwl_priv *priv);
@@ -502,18 +501,13 @@ struct iwl_eeprom_ops {
502}; 501};
503 502
504 503
505void iwl_eeprom_get_mac(const struct iwl_priv *priv, u8 *mac);
506int iwl_eeprom_init(struct iwl_priv *priv); 504int iwl_eeprom_init(struct iwl_priv *priv);
507void iwl_eeprom_free(struct iwl_priv *priv); 505void iwl_eeprom_free(struct iwl_priv *priv);
508int iwl_eeprom_check_version(struct iwl_priv *priv); 506int iwl_eeprom_check_version(struct iwl_priv *priv);
509const u8 *iwl_eeprom_query_addr(const struct iwl_priv *priv, size_t offset); 507const u8 *iwl_eeprom_query_addr(const struct iwl_priv *priv, size_t offset);
510u16 iwl_eeprom_query16(const struct iwl_priv *priv, size_t offset);
511
512int iwlcore_eeprom_verify_signature(struct iwl_priv *priv); 508int iwlcore_eeprom_verify_signature(struct iwl_priv *priv);
513int iwlcore_eeprom_acquire_semaphore(struct iwl_priv *priv); 509u16 iwl_eeprom_query16(const struct iwl_priv *priv, size_t offset);
514void iwlcore_eeprom_release_semaphore(struct iwl_priv *priv);
515const u8 *iwlcore_eeprom_query_addr(const struct iwl_priv *priv, size_t offset); 510const u8 *iwlcore_eeprom_query_addr(const struct iwl_priv *priv, size_t offset);
516void iwlcore_eeprom_enhanced_txpower(struct iwl_priv *priv);
517int iwl_init_channel_map(struct iwl_priv *priv); 511int iwl_init_channel_map(struct iwl_priv *priv);
518void iwl_free_channel_map(struct iwl_priv *priv); 512void iwl_free_channel_map(struct iwl_priv *priv);
519const struct iwl_channel_info *iwl_get_channel_info( 513const struct iwl_channel_info *iwl_get_channel_info(
diff --git a/drivers/net/wireless/iwlwifi/iwl-helpers.h b/drivers/net/wireless/iwlwifi/iwl-helpers.h
index 621abe3c5afc..1aaef70deaec 100644
--- a/drivers/net/wireless/iwlwifi/iwl-helpers.h
+++ b/drivers/net/wireless/iwlwifi/iwl-helpers.h
@@ -44,11 +44,6 @@ static inline struct ieee80211_conf *ieee80211_get_hw_conf(
44 return &hw->conf; 44 return &hw->conf;
45} 45}
46 46
47static inline int iwl_check_bits(unsigned long field, unsigned long mask)
48{
49 return ((field & mask) == mask) ? 1 : 0;
50}
51
52static inline unsigned long elapsed_jiffies(unsigned long start, 47static inline unsigned long elapsed_jiffies(unsigned long start,
53 unsigned long end) 48 unsigned long end)
54{ 49{
diff --git a/drivers/net/wireless/iwlwifi/iwl-led.c b/drivers/net/wireless/iwlwifi/iwl-led.c
index db5bfcb036ca..86c2b6fed0c6 100644
--- a/drivers/net/wireless/iwlwifi/iwl-led.c
+++ b/drivers/net/wireless/iwlwifi/iwl-led.c
@@ -108,13 +108,13 @@ static int iwl_led_pattern(struct iwl_priv *priv, unsigned int idx)
108 BUG_ON(idx > IWL_MAX_BLINK_TBL); 108 BUG_ON(idx > IWL_MAX_BLINK_TBL);
109 109
110 IWL_DEBUG_LED(priv, "Led blink time compensation= %u\n", 110 IWL_DEBUG_LED(priv, "Led blink time compensation= %u\n",
111 priv->cfg->led_compensation); 111 priv->cfg->base_params->led_compensation);
112 led_cmd.on = 112 led_cmd.on =
113 iwl_blink_compensation(priv, blink_tbl[idx].on_time, 113 iwl_blink_compensation(priv, blink_tbl[idx].on_time,
114 priv->cfg->led_compensation); 114 priv->cfg->base_params->led_compensation);
115 led_cmd.off = 115 led_cmd.off =
116 iwl_blink_compensation(priv, blink_tbl[idx].off_time, 116 iwl_blink_compensation(priv, blink_tbl[idx].off_time,
117 priv->cfg->led_compensation); 117 priv->cfg->base_params->led_compensation);
118 118
119 return priv->cfg->ops->led->cmd(priv, &led_cmd); 119 return priv->cfg->ops->led->cmd(priv, &led_cmd);
120} 120}
diff --git a/drivers/net/wireless/iwlwifi/iwl-power.c b/drivers/net/wireless/iwlwifi/iwl-power.c
index 63c0ab46261f..49d7788937a9 100644
--- a/drivers/net/wireless/iwlwifi/iwl-power.c
+++ b/drivers/net/wireless/iwlwifi/iwl-power.c
@@ -278,9 +278,9 @@ int iwl_power_update_mode(struct iwl_priv *priv, bool force)
278 278
279 dtimper = priv->hw->conf.ps_dtim_period ?: 1; 279 dtimper = priv->hw->conf.ps_dtim_period ?: 1;
280 280
281 if (priv->cfg->broken_powersave) 281 if (priv->cfg->base_params->broken_powersave)
282 iwl_power_sleep_cam_cmd(priv, &cmd); 282 iwl_power_sleep_cam_cmd(priv, &cmd);
283 else if (priv->cfg->supports_idle && 283 else if (priv->cfg->base_params->supports_idle &&
284 priv->hw->conf.flags & IEEE80211_CONF_IDLE) 284 priv->hw->conf.flags & IEEE80211_CONF_IDLE)
285 iwl_static_sleep_cmd(priv, &cmd, IWL_POWER_INDEX_5, 20); 285 iwl_static_sleep_cmd(priv, &cmd, IWL_POWER_INDEX_5, 20);
286 else if (priv->cfg->ops->lib->tt_ops.lower_power_detection && 286 else if (priv->cfg->ops->lib->tt_ops.lower_power_detection &&
diff --git a/drivers/net/wireless/iwlwifi/iwl-rx.c b/drivers/net/wireless/iwlwifi/iwl-rx.c
index 10be197b0f22..f436270ca39a 100644
--- a/drivers/net/wireless/iwlwifi/iwl-rx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-rx.c
@@ -36,7 +36,6 @@
36#include "iwl-core.h" 36#include "iwl-core.h"
37#include "iwl-sta.h" 37#include "iwl-sta.h"
38#include "iwl-io.h" 38#include "iwl-io.h"
39#include "iwl-calib.h"
40#include "iwl-helpers.h" 39#include "iwl-helpers.h"
41/************************** RX-FUNCTIONS ****************************/ 40/************************** RX-FUNCTIONS ****************************/
42/* 41/*
diff --git a/drivers/net/wireless/iwlwifi/iwl-scan.c b/drivers/net/wireless/iwlwifi/iwl-scan.c
index 7727f0966d31..67da31295781 100644
--- a/drivers/net/wireless/iwlwifi/iwl-scan.c
+++ b/drivers/net/wireless/iwlwifi/iwl-scan.c
@@ -54,100 +54,133 @@
54#define IWL_PASSIVE_DWELL_BASE (100) 54#define IWL_PASSIVE_DWELL_BASE (100)
55#define IWL_CHANNEL_TUNE_TIME 5 55#define IWL_CHANNEL_TUNE_TIME 5
56 56
57static int iwl_send_scan_abort(struct iwl_priv *priv)
58{
59 int ret;
60 struct iwl_rx_packet *pkt;
61 struct iwl_host_cmd cmd = {
62 .id = REPLY_SCAN_ABORT_CMD,
63 .flags = CMD_WANT_SKB,
64 };
57 65
66 /* Exit instantly with error when device is not ready
67 * to receive scan abort command or it does not perform
68 * hardware scan currently */
69 if (!test_bit(STATUS_READY, &priv->status) ||
70 !test_bit(STATUS_GEO_CONFIGURED, &priv->status) ||
71 !test_bit(STATUS_SCAN_HW, &priv->status) ||
72 test_bit(STATUS_FW_ERROR, &priv->status) ||
73 test_bit(STATUS_EXIT_PENDING, &priv->status))
74 return -EIO;
58 75
59/** 76 ret = iwl_send_cmd_sync(priv, &cmd);
60 * iwl_scan_cancel - Cancel any currently executing HW scan 77 if (ret)
61 * 78 return ret;
62 * NOTE: priv->mutex is not required before calling this function 79
63 */ 80 pkt = (struct iwl_rx_packet *)cmd.reply_page;
64int iwl_scan_cancel(struct iwl_priv *priv) 81 if (pkt->u.status != CAN_ABORT_STATUS) {
82 /* The scan abort will return 1 for success or
83 * 2 for "failure". A failure condition can be
84 * due to simply not being in an active scan which
85 * can occur if we send the scan abort before we
86 * the microcode has notified us that a scan is
87 * completed. */
88 IWL_DEBUG_SCAN(priv, "SCAN_ABORT ret %d.\n", pkt->u.status);
89 ret = -EIO;
90 }
91
92 iwl_free_pages(priv, cmd.reply_page);
93 return ret;
94}
95
96static void iwl_complete_scan(struct iwl_priv *priv, bool aborted)
65{ 97{
66 if (!test_bit(STATUS_SCAN_HW, &priv->status)) { 98 /* check if scan was requested from mac80211 */
67 clear_bit(STATUS_SCANNING, &priv->status); 99 if (priv->scan_request) {
68 return 0; 100 IWL_DEBUG_SCAN(priv, "Complete scan in mac80211\n");
101 ieee80211_scan_completed(priv->hw, aborted);
69 } 102 }
70 103
71 if (test_bit(STATUS_SCANNING, &priv->status)) { 104 priv->is_internal_short_scan = false;
72 if (!test_and_set_bit(STATUS_SCAN_ABORTING, &priv->status)) { 105 priv->scan_vif = NULL;
73 IWL_DEBUG_SCAN(priv, "Queuing scan abort.\n"); 106 priv->scan_request = NULL;
74 queue_work(priv->workqueue, &priv->abort_scan); 107}
108
109void iwl_force_scan_end(struct iwl_priv *priv)
110{
111 lockdep_assert_held(&priv->mutex);
112
113 if (!test_bit(STATUS_SCANNING, &priv->status)) {
114 IWL_DEBUG_SCAN(priv, "Forcing scan end while not scanning\n");
115 return;
116 }
117
118 IWL_DEBUG_SCAN(priv, "Forcing scan end\n");
119 clear_bit(STATUS_SCANNING, &priv->status);
120 clear_bit(STATUS_SCAN_HW, &priv->status);
121 clear_bit(STATUS_SCAN_ABORTING, &priv->status);
122 iwl_complete_scan(priv, true);
123}
75 124
76 } else 125static void iwl_do_scan_abort(struct iwl_priv *priv)
77 IWL_DEBUG_SCAN(priv, "Scan abort already in progress.\n"); 126{
127 int ret;
128
129 lockdep_assert_held(&priv->mutex);
130
131 if (!test_bit(STATUS_SCANNING, &priv->status)) {
132 IWL_DEBUG_SCAN(priv, "Not performing scan to abort\n");
133 return;
134 }
78 135
79 return test_bit(STATUS_SCANNING, &priv->status); 136 if (test_and_set_bit(STATUS_SCAN_ABORTING, &priv->status)) {
137 IWL_DEBUG_SCAN(priv, "Scan abort in progress\n");
138 return;
80 } 139 }
81 140
141 ret = iwl_send_scan_abort(priv);
142 if (ret) {
143 IWL_DEBUG_SCAN(priv, "Send scan abort failed %d\n", ret);
144 iwl_force_scan_end(priv);
145 } else
146 IWL_DEBUG_SCAN(priv, "Sucessfully send scan abort\n");
147}
148
149/**
150 * iwl_scan_cancel - Cancel any currently executing HW scan
151 */
152int iwl_scan_cancel(struct iwl_priv *priv)
153{
154 IWL_DEBUG_SCAN(priv, "Queuing abort scan\n");
155 queue_work(priv->workqueue, &priv->abort_scan);
82 return 0; 156 return 0;
83} 157}
84EXPORT_SYMBOL(iwl_scan_cancel); 158EXPORT_SYMBOL(iwl_scan_cancel);
159
85/** 160/**
86 * iwl_scan_cancel_timeout - Cancel any currently executing HW scan 161 * iwl_scan_cancel_timeout - Cancel any currently executing HW scan
87 * @ms: amount of time to wait (in milliseconds) for scan to abort 162 * @ms: amount of time to wait (in milliseconds) for scan to abort
88 * 163 *
89 * NOTE: priv->mutex must be held before calling this function
90 */ 164 */
91int iwl_scan_cancel_timeout(struct iwl_priv *priv, unsigned long ms) 165int iwl_scan_cancel_timeout(struct iwl_priv *priv, unsigned long ms)
92{ 166{
93 unsigned long now = jiffies; 167 unsigned long timeout = jiffies + msecs_to_jiffies(ms);
94 int ret;
95 168
96 ret = iwl_scan_cancel(priv); 169 lockdep_assert_held(&priv->mutex);
97 if (ret && ms) {
98 mutex_unlock(&priv->mutex);
99 while (!time_after(jiffies, now + msecs_to_jiffies(ms)) &&
100 test_bit(STATUS_SCANNING, &priv->status))
101 msleep(1);
102 mutex_lock(&priv->mutex);
103
104 return test_bit(STATUS_SCANNING, &priv->status);
105 }
106
107 return ret;
108}
109EXPORT_SYMBOL(iwl_scan_cancel_timeout);
110 170
111static int iwl_send_scan_abort(struct iwl_priv *priv) 171 IWL_DEBUG_SCAN(priv, "Scan cancel timeout\n");
112{
113 int ret = 0;
114 struct iwl_rx_packet *pkt;
115 struct iwl_host_cmd cmd = {
116 .id = REPLY_SCAN_ABORT_CMD,
117 .flags = CMD_WANT_SKB,
118 };
119 172
120 /* If there isn't a scan actively going on in the hardware 173 iwl_do_scan_abort(priv);
121 * then we are in between scan bands and not actually
122 * actively scanning, so don't send the abort command */
123 if (!test_bit(STATUS_SCAN_HW, &priv->status)) {
124 clear_bit(STATUS_SCAN_ABORTING, &priv->status);
125 return 0;
126 }
127 174
128 ret = iwl_send_cmd_sync(priv, &cmd); 175 while (time_before_eq(jiffies, timeout)) {
129 if (ret) { 176 if (!test_bit(STATUS_SCAN_HW, &priv->status))
130 clear_bit(STATUS_SCAN_ABORTING, &priv->status); 177 break;
131 return ret; 178 msleep(20);
132 }
133
134 pkt = (struct iwl_rx_packet *)cmd.reply_page;
135 if (pkt->u.status != CAN_ABORT_STATUS) {
136 /* The scan abort will return 1 for success or
137 * 2 for "failure". A failure condition can be
138 * due to simply not being in an active scan which
139 * can occur if we send the scan abort before we
140 * the microcode has notified us that a scan is
141 * completed. */
142 IWL_DEBUG_INFO(priv, "SCAN_ABORT returned %d.\n", pkt->u.status);
143 clear_bit(STATUS_SCAN_ABORTING, &priv->status);
144 clear_bit(STATUS_SCAN_HW, &priv->status);
145 } 179 }
146 180
147 iwl_free_pages(priv, cmd.reply_page); 181 return test_bit(STATUS_SCAN_HW, &priv->status);
148
149 return ret;
150} 182}
183EXPORT_SYMBOL(iwl_scan_cancel_timeout);
151 184
152/* Service response to REPLY_SCAN_CMD (0x80) */ 185/* Service response to REPLY_SCAN_CMD (0x80) */
153static void iwl_rx_reply_scan(struct iwl_priv *priv, 186static void iwl_rx_reply_scan(struct iwl_priv *priv,
@@ -158,7 +191,7 @@ static void iwl_rx_reply_scan(struct iwl_priv *priv,
158 struct iwl_scanreq_notification *notif = 191 struct iwl_scanreq_notification *notif =
159 (struct iwl_scanreq_notification *)pkt->u.raw; 192 (struct iwl_scanreq_notification *)pkt->u.raw;
160 193
161 IWL_DEBUG_RX(priv, "Scan request status = 0x%x\n", notif->status); 194 IWL_DEBUG_SCAN(priv, "Scan request status = 0x%x\n", notif->status);
162#endif 195#endif
163} 196}
164 197
@@ -217,26 +250,17 @@ static void iwl_rx_scan_complete_notif(struct iwl_priv *priv,
217 /* The HW is no longer scanning */ 250 /* The HW is no longer scanning */
218 clear_bit(STATUS_SCAN_HW, &priv->status); 251 clear_bit(STATUS_SCAN_HW, &priv->status);
219 252
220 IWL_DEBUG_INFO(priv, "Scan on %sGHz took %dms\n", 253 IWL_DEBUG_SCAN(priv, "Scan on %sGHz took %dms\n",
221 (priv->scan_band == IEEE80211_BAND_2GHZ) ? "2.4" : "5.2", 254 (priv->scan_band == IEEE80211_BAND_2GHZ) ? "2.4" : "5.2",
222 jiffies_to_msecs(elapsed_jiffies 255 jiffies_to_msecs(elapsed_jiffies
223 (priv->scan_start, jiffies))); 256 (priv->scan_start, jiffies)));
224 257
225 /* 258 queue_work(priv->workqueue, &priv->scan_completed);
226 * If a request to abort was given, or the scan did not succeed
227 * then we reset the scan state machine and terminate,
228 * re-queuing another scan if one has been requested
229 */
230 if (test_and_clear_bit(STATUS_SCAN_ABORTING, &priv->status))
231 IWL_DEBUG_INFO(priv, "Aborted scan completed.\n");
232
233 IWL_DEBUG_INFO(priv, "Setting scan to off\n");
234
235 clear_bit(STATUS_SCANNING, &priv->status);
236 259
237 if (priv->iw_mode != NL80211_IFTYPE_ADHOC && 260 if (priv->iw_mode != NL80211_IFTYPE_ADHOC &&
238 priv->cfg->advanced_bt_coexist && priv->bt_status != 261 priv->cfg->bt_params &&
239 scan_notif->bt_status) { 262 priv->cfg->bt_params->advanced_bt_coexist &&
263 priv->bt_status != scan_notif->bt_status) {
240 if (scan_notif->bt_status) { 264 if (scan_notif->bt_status) {
241 /* BT on */ 265 /* BT on */
242 if (!priv->bt_ch_announce) 266 if (!priv->bt_ch_announce)
@@ -254,7 +278,6 @@ static void iwl_rx_scan_complete_notif(struct iwl_priv *priv,
254 priv->bt_status = scan_notif->bt_status; 278 priv->bt_status = scan_notif->bt_status;
255 queue_work(priv->workqueue, &priv->bt_traffic_change_work); 279 queue_work(priv->workqueue, &priv->bt_traffic_change_work);
256 } 280 }
257 queue_work(priv->workqueue, &priv->scan_completed);
258} 281}
259 282
260void iwl_setup_rx_scan_handlers(struct iwl_priv *priv) 283void iwl_setup_rx_scan_handlers(struct iwl_priv *priv)
@@ -324,19 +347,53 @@ void iwl_init_scan_params(struct iwl_priv *priv)
324} 347}
325EXPORT_SYMBOL(iwl_init_scan_params); 348EXPORT_SYMBOL(iwl_init_scan_params);
326 349
327static int iwl_scan_initiate(struct iwl_priv *priv, struct ieee80211_vif *vif) 350static int __must_check iwl_scan_initiate(struct iwl_priv *priv,
351 struct ieee80211_vif *vif,
352 bool internal,
353 enum ieee80211_band band)
328{ 354{
355 int ret;
356
329 lockdep_assert_held(&priv->mutex); 357 lockdep_assert_held(&priv->mutex);
330 358
331 IWL_DEBUG_INFO(priv, "Starting scan...\n"); 359 if (WARN_ON(!priv->cfg->ops->utils->request_scan))
360 return -EOPNOTSUPP;
361
362 cancel_delayed_work(&priv->scan_check);
363
364 if (!iwl_is_ready_rf(priv)) {
365 IWL_WARN(priv, "Request scan called when driver not ready.\n");
366 return -EIO;
367 }
368
369 if (test_bit(STATUS_SCAN_HW, &priv->status)) {
370 IWL_DEBUG_SCAN(priv,
371 "Multiple concurrent scan requests in parallel.\n");
372 return -EBUSY;
373 }
374
375 if (test_bit(STATUS_SCAN_ABORTING, &priv->status)) {
376 IWL_DEBUG_SCAN(priv, "Scan request while abort pending.\n");
377 return -EBUSY;
378 }
379
380 IWL_DEBUG_SCAN(priv, "Starting %sscan...\n",
381 internal ? "internal short " : "");
382
332 set_bit(STATUS_SCANNING, &priv->status); 383 set_bit(STATUS_SCANNING, &priv->status);
333 priv->is_internal_short_scan = false; 384 priv->is_internal_short_scan = internal;
334 priv->scan_start = jiffies; 385 priv->scan_start = jiffies;
386 priv->scan_band = band;
335 387
336 if (WARN_ON(!priv->cfg->ops->utils->request_scan)) 388 ret = priv->cfg->ops->utils->request_scan(priv, vif);
337 return -EOPNOTSUPP; 389 if (ret) {
390 clear_bit(STATUS_SCANNING, &priv->status);
391 priv->is_internal_short_scan = false;
392 return ret;
393 }
338 394
339 priv->cfg->ops->utils->request_scan(priv, vif); 395 queue_delayed_work(priv->workqueue, &priv->scan_check,
396 IWL_SCAN_CHECK_WATCHDOG);
340 397
341 return 0; 398 return 0;
342} 399}
@@ -355,12 +412,6 @@ int iwl_mac_hw_scan(struct ieee80211_hw *hw,
355 412
356 mutex_lock(&priv->mutex); 413 mutex_lock(&priv->mutex);
357 414
358 if (!iwl_is_ready_rf(priv)) {
359 ret = -EIO;
360 IWL_DEBUG_MAC80211(priv, "leave - not ready or exit pending\n");
361 goto out_unlock;
362 }
363
364 if (test_bit(STATUS_SCANNING, &priv->status) && 415 if (test_bit(STATUS_SCANNING, &priv->status) &&
365 !priv->is_internal_short_scan) { 416 !priv->is_internal_short_scan) {
366 IWL_DEBUG_SCAN(priv, "Scan already in progress.\n"); 417 IWL_DEBUG_SCAN(priv, "Scan already in progress.\n");
@@ -368,14 +419,7 @@ int iwl_mac_hw_scan(struct ieee80211_hw *hw,
368 goto out_unlock; 419 goto out_unlock;
369 } 420 }
370 421
371 if (test_bit(STATUS_SCAN_ABORTING, &priv->status)) {
372 IWL_DEBUG_SCAN(priv, "Scan request while abort pending\n");
373 ret = -EAGAIN;
374 goto out_unlock;
375 }
376
377 /* mac80211 will only ask for one band at a time */ 422 /* mac80211 will only ask for one band at a time */
378 priv->scan_band = req->channels[0]->band;
379 priv->scan_request = req; 423 priv->scan_request = req;
380 priv->scan_vif = vif; 424 priv->scan_vif = vif;
381 425
@@ -383,10 +427,12 @@ int iwl_mac_hw_scan(struct ieee80211_hw *hw,
383 * If an internal scan is in progress, just set 427 * If an internal scan is in progress, just set
384 * up the scan_request as per above. 428 * up the scan_request as per above.
385 */ 429 */
386 if (priv->is_internal_short_scan) 430 if (priv->is_internal_short_scan) {
431 IWL_DEBUG_SCAN(priv, "SCAN request during internal scan\n");
387 ret = 0; 432 ret = 0;
388 else 433 } else
389 ret = iwl_scan_initiate(priv, vif); 434 ret = iwl_scan_initiate(priv, vif, false,
435 req->channels[0]->band);
390 436
391 IWL_DEBUG_MAC80211(priv, "leave\n"); 437 IWL_DEBUG_MAC80211(priv, "leave\n");
392 438
@@ -411,6 +457,8 @@ static void iwl_bg_start_internal_scan(struct work_struct *work)
411 struct iwl_priv *priv = 457 struct iwl_priv *priv =
412 container_of(work, struct iwl_priv, start_internal_scan); 458 container_of(work, struct iwl_priv, start_internal_scan);
413 459
460 IWL_DEBUG_SCAN(priv, "Start internal scan\n");
461
414 mutex_lock(&priv->mutex); 462 mutex_lock(&priv->mutex);
415 463
416 if (priv->is_internal_short_scan == true) { 464 if (priv->is_internal_short_scan == true) {
@@ -418,31 +466,13 @@ static void iwl_bg_start_internal_scan(struct work_struct *work)
418 goto unlock; 466 goto unlock;
419 } 467 }
420 468
421 if (!iwl_is_ready_rf(priv)) {
422 IWL_DEBUG_SCAN(priv, "not ready or exit pending\n");
423 goto unlock;
424 }
425
426 if (test_bit(STATUS_SCANNING, &priv->status)) { 469 if (test_bit(STATUS_SCANNING, &priv->status)) {
427 IWL_DEBUG_SCAN(priv, "Scan already in progress.\n"); 470 IWL_DEBUG_SCAN(priv, "Scan already in progress.\n");
428 goto unlock; 471 goto unlock;
429 } 472 }
430 473
431 if (test_bit(STATUS_SCAN_ABORTING, &priv->status)) { 474 if (iwl_scan_initiate(priv, NULL, true, priv->band))
432 IWL_DEBUG_SCAN(priv, "Scan request while abort pending\n"); 475 IWL_DEBUG_SCAN(priv, "failed to start internal short scan\n");
433 goto unlock;
434 }
435
436 priv->scan_band = priv->band;
437
438 IWL_DEBUG_SCAN(priv, "Start internal short scan...\n");
439 set_bit(STATUS_SCANNING, &priv->status);
440 priv->is_internal_short_scan = true;
441
442 if (WARN_ON(!priv->cfg->ops->utils->request_scan))
443 goto unlock;
444
445 priv->cfg->ops->utils->request_scan(priv, NULL);
446 unlock: 476 unlock:
447 mutex_unlock(&priv->mutex); 477 mutex_unlock(&priv->mutex);
448} 478}
@@ -452,18 +482,13 @@ static void iwl_bg_scan_check(struct work_struct *data)
452 struct iwl_priv *priv = 482 struct iwl_priv *priv =
453 container_of(data, struct iwl_priv, scan_check.work); 483 container_of(data, struct iwl_priv, scan_check.work);
454 484
455 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) 485 IWL_DEBUG_SCAN(priv, "Scan check work\n");
456 return;
457 486
487 /* Since we are here firmware does not finish scan and
488 * most likely is in bad shape, so we don't bother to
489 * send abort command, just force scan complete to mac80211 */
458 mutex_lock(&priv->mutex); 490 mutex_lock(&priv->mutex);
459 if (test_bit(STATUS_SCANNING, &priv->status) && 491 iwl_force_scan_end(priv);
460 !test_bit(STATUS_SCAN_ABORTING, &priv->status)) {
461 IWL_DEBUG_SCAN(priv, "Scan completion watchdog (%dms)\n",
462 jiffies_to_msecs(IWL_SCAN_CHECK_WATCHDOG));
463
464 if (!test_bit(STATUS_EXIT_PENDING, &priv->status))
465 iwl_send_scan_abort(priv);
466 }
467 mutex_unlock(&priv->mutex); 492 mutex_unlock(&priv->mutex);
468} 493}
469 494
@@ -519,15 +544,12 @@ static void iwl_bg_abort_scan(struct work_struct *work)
519{ 544{
520 struct iwl_priv *priv = container_of(work, struct iwl_priv, abort_scan); 545 struct iwl_priv *priv = container_of(work, struct iwl_priv, abort_scan);
521 546
522 if (!test_bit(STATUS_READY, &priv->status) || 547 IWL_DEBUG_SCAN(priv, "Abort scan work\n");
523 !test_bit(STATUS_GEO_CONFIGURED, &priv->status))
524 return;
525
526 cancel_delayed_work(&priv->scan_check);
527 548
549 /* We keep scan_check work queued in case when firmware will not
550 * report back scan completed notification */
528 mutex_lock(&priv->mutex); 551 mutex_lock(&priv->mutex);
529 if (test_bit(STATUS_SCAN_ABORTING, &priv->status)) 552 iwl_scan_cancel_timeout(priv, 200);
530 iwl_send_scan_abort(priv);
531 mutex_unlock(&priv->mutex); 553 mutex_unlock(&priv->mutex);
532} 554}
533 555
@@ -535,55 +557,60 @@ static void iwl_bg_scan_completed(struct work_struct *work)
535{ 557{
536 struct iwl_priv *priv = 558 struct iwl_priv *priv =
537 container_of(work, struct iwl_priv, scan_completed); 559 container_of(work, struct iwl_priv, scan_completed);
538 bool internal = false; 560 bool aborted;
539 bool scan_completed = false;
540 struct iwl_rxon_context *ctx;
541 561
542 IWL_DEBUG_SCAN(priv, "SCAN complete scan\n"); 562 IWL_DEBUG_SCAN(priv, "Completed %sscan.\n",
563 priv->is_internal_short_scan ? "internal short " : "");
543 564
544 cancel_delayed_work(&priv->scan_check); 565 cancel_delayed_work(&priv->scan_check);
545 566
546 mutex_lock(&priv->mutex); 567 mutex_lock(&priv->mutex);
547 if (priv->is_internal_short_scan) { 568
548 priv->is_internal_short_scan = false; 569 aborted = test_and_clear_bit(STATUS_SCAN_ABORTING, &priv->status);
549 IWL_DEBUG_SCAN(priv, "internal short scan completed\n"); 570 if (aborted)
550 internal = true; 571 IWL_DEBUG_SCAN(priv, "Aborted scan completed.\n");
551 } else if (priv->scan_request) { 572
552 scan_completed = true; 573 if (!test_and_clear_bit(STATUS_SCANNING, &priv->status)) {
553 priv->scan_request = NULL; 574 IWL_DEBUG_SCAN(priv, "Scan already completed.\n");
554 priv->scan_vif = NULL; 575 goto out_settings;
555 } 576 }
556 577
557 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) 578 if (priv->is_internal_short_scan && !aborted) {
579 int err;
580
581 /* Check if mac80211 requested scan during our internal scan */
582 if (priv->scan_request == NULL)
583 goto out_complete;
584
585 /* If so request a new scan */
586 err = iwl_scan_initiate(priv, priv->scan_vif, false,
587 priv->scan_request->channels[0]->band);
588 if (err) {
589 IWL_DEBUG_SCAN(priv,
590 "failed to initiate pending scan: %d\n", err);
591 aborted = true;
592 goto out_complete;
593 }
594
558 goto out; 595 goto out;
596 }
559 597
560 if (internal && priv->scan_request) 598out_complete:
561 iwl_scan_initiate(priv, priv->scan_vif); 599 iwl_complete_scan(priv, aborted);
600
601out_settings:
602 /* Can we still talk to firmware ? */
603 if (!iwl_is_ready_rf(priv))
604 goto out;
562 605
563 /* Since setting the TXPOWER may have been deferred while 606 /* Since setting the TXPOWER may have been deferred while
564 * performing the scan, fire one off */ 607 * performing the scan, fire one off */
565 iwl_set_tx_power(priv, priv->tx_power_user_lmt, true); 608 iwl_set_tx_power(priv, priv->tx_power_user_lmt, true);
566 609
567 /* 610 priv->cfg->ops->utils->post_scan(priv);
568 * Since setting the RXON may have been deferred while
569 * performing the scan, fire one off if needed
570 */
571 for_each_context(priv, ctx)
572 iwlcore_commit_rxon(priv, ctx);
573 611
574 out: 612 out:
575 if (priv->cfg->ops->hcmd->set_pan_params)
576 priv->cfg->ops->hcmd->set_pan_params(priv);
577
578 mutex_unlock(&priv->mutex); 613 mutex_unlock(&priv->mutex);
579
580 /*
581 * Do not hold mutex here since this will cause mac80211 to call
582 * into driver again into functions that will attempt to take
583 * mutex.
584 */
585 if (scan_completed)
586 ieee80211_scan_completed(priv->hw, false);
587} 614}
588 615
589void iwl_setup_scan_deferred_work(struct iwl_priv *priv) 616void iwl_setup_scan_deferred_work(struct iwl_priv *priv)
@@ -595,3 +622,16 @@ void iwl_setup_scan_deferred_work(struct iwl_priv *priv)
595} 622}
596EXPORT_SYMBOL(iwl_setup_scan_deferred_work); 623EXPORT_SYMBOL(iwl_setup_scan_deferred_work);
597 624
625void iwl_cancel_scan_deferred_work(struct iwl_priv *priv)
626{
627 cancel_work_sync(&priv->start_internal_scan);
628 cancel_work_sync(&priv->abort_scan);
629 cancel_work_sync(&priv->scan_completed);
630
631 if (cancel_delayed_work_sync(&priv->scan_check)) {
632 mutex_lock(&priv->mutex);
633 iwl_force_scan_end(priv);
634 mutex_unlock(&priv->mutex);
635 }
636}
637EXPORT_SYMBOL(iwl_cancel_scan_deferred_work);
diff --git a/drivers/net/wireless/iwlwifi/iwl-sta.c b/drivers/net/wireless/iwlwifi/iwl-sta.c
index ccd09027c7cd..7c7f7dcb1b1e 100644
--- a/drivers/net/wireless/iwlwifi/iwl-sta.c
+++ b/drivers/net/wireless/iwlwifi/iwl-sta.c
@@ -228,9 +228,8 @@ static void iwl_set_ht_add_station(struct iwl_priv *priv, u8 index,
228 * 228 *
229 * should be called with sta_lock held 229 * should be called with sta_lock held
230 */ 230 */
231static u8 iwl_prep_station(struct iwl_priv *priv, struct iwl_rxon_context *ctx, 231u8 iwl_prep_station(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
232 const u8 *addr, bool is_ap, 232 const u8 *addr, bool is_ap, struct ieee80211_sta *sta)
233 struct ieee80211_sta *sta)
234{ 233{
235 struct iwl_station_entry *station; 234 struct iwl_station_entry *station;
236 int i; 235 int i;
@@ -317,6 +316,7 @@ static u8 iwl_prep_station(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
317 return sta_id; 316 return sta_id;
318 317
319} 318}
319EXPORT_SYMBOL_GPL(iwl_prep_station);
320 320
321#define STA_WAIT_TIMEOUT (HZ/2) 321#define STA_WAIT_TIMEOUT (HZ/2)
322 322
@@ -381,111 +381,6 @@ int iwl_add_station_common(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
381} 381}
382EXPORT_SYMBOL(iwl_add_station_common); 382EXPORT_SYMBOL(iwl_add_station_common);
383 383
384static struct iwl_link_quality_cmd *iwl_sta_alloc_lq(struct iwl_priv *priv,
385 u8 sta_id)
386{
387 int i, r;
388 struct iwl_link_quality_cmd *link_cmd;
389 u32 rate_flags;
390
391 link_cmd = kzalloc(sizeof(struct iwl_link_quality_cmd), GFP_KERNEL);
392 if (!link_cmd) {
393 IWL_ERR(priv, "Unable to allocate memory for LQ cmd.\n");
394 return NULL;
395 }
396 /* Set up the rate scaling to start at selected rate, fall back
397 * all the way down to 1M in IEEE order, and then spin on 1M */
398 if (priv->band == IEEE80211_BAND_5GHZ)
399 r = IWL_RATE_6M_INDEX;
400 else
401 r = IWL_RATE_1M_INDEX;
402
403 for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) {
404 rate_flags = 0;
405 if (r >= IWL_FIRST_CCK_RATE && r <= IWL_LAST_CCK_RATE)
406 rate_flags |= RATE_MCS_CCK_MSK;
407
408 rate_flags |= first_antenna(priv->hw_params.valid_tx_ant) <<
409 RATE_MCS_ANT_POS;
410
411 link_cmd->rs_table[i].rate_n_flags =
412 iwl_hw_set_rate_n_flags(iwl_rates[r].plcp, rate_flags);
413 r = iwl_get_prev_ieee_rate(r);
414 }
415
416 link_cmd->general_params.single_stream_ant_msk =
417 first_antenna(priv->hw_params.valid_tx_ant);
418
419 link_cmd->general_params.dual_stream_ant_msk =
420 priv->hw_params.valid_tx_ant &
421 ~first_antenna(priv->hw_params.valid_tx_ant);
422 if (!link_cmd->general_params.dual_stream_ant_msk) {
423 link_cmd->general_params.dual_stream_ant_msk = ANT_AB;
424 } else if (num_of_ant(priv->hw_params.valid_tx_ant) == 2) {
425 link_cmd->general_params.dual_stream_ant_msk =
426 priv->hw_params.valid_tx_ant;
427 }
428
429 link_cmd->agg_params.agg_dis_start_th = LINK_QUAL_AGG_DISABLE_START_DEF;
430 link_cmd->agg_params.agg_time_limit =
431 cpu_to_le16(LINK_QUAL_AGG_TIME_LIMIT_DEF);
432
433 link_cmd->sta_id = sta_id;
434
435 return link_cmd;
436}
437
438/*
439 * iwl_add_bssid_station - Add the special IBSS BSSID station
440 *
441 * Function sleeps.
442 */
443int iwl_add_bssid_station(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
444 const u8 *addr, bool init_rs, u8 *sta_id_r)
445{
446 int ret;
447 u8 sta_id;
448 struct iwl_link_quality_cmd *link_cmd;
449 unsigned long flags;
450
451 if (sta_id_r)
452 *sta_id_r = IWL_INVALID_STATION;
453
454 ret = iwl_add_station_common(priv, ctx, addr, 0, NULL, &sta_id);
455 if (ret) {
456 IWL_ERR(priv, "Unable to add station %pM\n", addr);
457 return ret;
458 }
459
460 if (sta_id_r)
461 *sta_id_r = sta_id;
462
463 spin_lock_irqsave(&priv->sta_lock, flags);
464 priv->stations[sta_id].used |= IWL_STA_LOCAL;
465 spin_unlock_irqrestore(&priv->sta_lock, flags);
466
467 if (init_rs) {
468 /* Set up default rate scaling table in device's station table */
469 link_cmd = iwl_sta_alloc_lq(priv, sta_id);
470 if (!link_cmd) {
471 IWL_ERR(priv, "Unable to initialize rate scaling for station %pM.\n",
472 addr);
473 return -ENOMEM;
474 }
475
476 ret = iwl_send_lq_cmd(priv, ctx, link_cmd, CMD_SYNC, true);
477 if (ret)
478 IWL_ERR(priv, "Link quality command failed (%d)\n", ret);
479
480 spin_lock_irqsave(&priv->sta_lock, flags);
481 priv->stations[sta_id].lq = link_cmd;
482 spin_unlock_irqrestore(&priv->sta_lock, flags);
483 }
484
485 return 0;
486}
487EXPORT_SYMBOL(iwl_add_bssid_station);
488
489/** 384/**
490 * iwl_sta_ucode_deactivate - deactivate ucode status for a station 385 * iwl_sta_ucode_deactivate - deactivate ucode status for a station
491 * 386 *
@@ -741,405 +636,25 @@ int iwl_get_free_ucode_key_index(struct iwl_priv *priv)
741} 636}
742EXPORT_SYMBOL(iwl_get_free_ucode_key_index); 637EXPORT_SYMBOL(iwl_get_free_ucode_key_index);
743 638
744static int iwl_send_static_wepkey_cmd(struct iwl_priv *priv, 639void iwl_dealloc_bcast_stations(struct iwl_priv *priv)
745 struct iwl_rxon_context *ctx,
746 bool send_if_empty)
747{
748 int i, not_empty = 0;
749 u8 buff[sizeof(struct iwl_wep_cmd) +
750 sizeof(struct iwl_wep_key) * WEP_KEYS_MAX];
751 struct iwl_wep_cmd *wep_cmd = (struct iwl_wep_cmd *)buff;
752 size_t cmd_size = sizeof(struct iwl_wep_cmd);
753 struct iwl_host_cmd cmd = {
754 .id = ctx->wep_key_cmd,
755 .data = wep_cmd,
756 .flags = CMD_SYNC,
757 };
758
759 might_sleep();
760
761 memset(wep_cmd, 0, cmd_size +
762 (sizeof(struct iwl_wep_key) * WEP_KEYS_MAX));
763
764 for (i = 0; i < WEP_KEYS_MAX ; i++) {
765 wep_cmd->key[i].key_index = i;
766 if (ctx->wep_keys[i].key_size) {
767 wep_cmd->key[i].key_offset = i;
768 not_empty = 1;
769 } else {
770 wep_cmd->key[i].key_offset = WEP_INVALID_OFFSET;
771 }
772
773 wep_cmd->key[i].key_size = ctx->wep_keys[i].key_size;
774 memcpy(&wep_cmd->key[i].key[3], ctx->wep_keys[i].key,
775 ctx->wep_keys[i].key_size);
776 }
777
778 wep_cmd->global_key_type = WEP_KEY_WEP_TYPE;
779 wep_cmd->num_keys = WEP_KEYS_MAX;
780
781 cmd_size += sizeof(struct iwl_wep_key) * WEP_KEYS_MAX;
782
783 cmd.len = cmd_size;
784
785 if (not_empty || send_if_empty)
786 return iwl_send_cmd(priv, &cmd);
787 else
788 return 0;
789}
790
791int iwl_restore_default_wep_keys(struct iwl_priv *priv,
792 struct iwl_rxon_context *ctx)
793{
794 lockdep_assert_held(&priv->mutex);
795
796 return iwl_send_static_wepkey_cmd(priv, ctx, false);
797}
798EXPORT_SYMBOL(iwl_restore_default_wep_keys);
799
800int iwl_remove_default_wep_key(struct iwl_priv *priv,
801 struct iwl_rxon_context *ctx,
802 struct ieee80211_key_conf *keyconf)
803{
804 int ret;
805
806 lockdep_assert_held(&priv->mutex);
807
808 IWL_DEBUG_WEP(priv, "Removing default WEP key: idx=%d\n",
809 keyconf->keyidx);
810
811 memset(&ctx->wep_keys[keyconf->keyidx], 0, sizeof(ctx->wep_keys[0]));
812 if (iwl_is_rfkill(priv)) {
813 IWL_DEBUG_WEP(priv, "Not sending REPLY_WEPKEY command due to RFKILL.\n");
814 /* but keys in device are clear anyway so return success */
815 return 0;
816 }
817 ret = iwl_send_static_wepkey_cmd(priv, ctx, 1);
818 IWL_DEBUG_WEP(priv, "Remove default WEP key: idx=%d ret=%d\n",
819 keyconf->keyidx, ret);
820
821 return ret;
822}
823EXPORT_SYMBOL(iwl_remove_default_wep_key);
824
825int iwl_set_default_wep_key(struct iwl_priv *priv,
826 struct iwl_rxon_context *ctx,
827 struct ieee80211_key_conf *keyconf)
828{
829 int ret;
830
831 lockdep_assert_held(&priv->mutex);
832
833 if (keyconf->keylen != WEP_KEY_LEN_128 &&
834 keyconf->keylen != WEP_KEY_LEN_64) {
835 IWL_DEBUG_WEP(priv, "Bad WEP key length %d\n", keyconf->keylen);
836 return -EINVAL;
837 }
838
839 keyconf->flags &= ~IEEE80211_KEY_FLAG_GENERATE_IV;
840 keyconf->hw_key_idx = HW_KEY_DEFAULT;
841 priv->stations[ctx->ap_sta_id].keyinfo.cipher = keyconf->cipher;
842
843 ctx->wep_keys[keyconf->keyidx].key_size = keyconf->keylen;
844 memcpy(&ctx->wep_keys[keyconf->keyidx].key, &keyconf->key,
845 keyconf->keylen);
846
847 ret = iwl_send_static_wepkey_cmd(priv, ctx, false);
848 IWL_DEBUG_WEP(priv, "Set default WEP key: len=%d idx=%d ret=%d\n",
849 keyconf->keylen, keyconf->keyidx, ret);
850
851 return ret;
852}
853EXPORT_SYMBOL(iwl_set_default_wep_key);
854
855static int iwl_set_wep_dynamic_key_info(struct iwl_priv *priv,
856 struct iwl_rxon_context *ctx,
857 struct ieee80211_key_conf *keyconf,
858 u8 sta_id)
859{
860 unsigned long flags;
861 __le16 key_flags = 0;
862 struct iwl_addsta_cmd sta_cmd;
863
864 lockdep_assert_held(&priv->mutex);
865
866 keyconf->flags &= ~IEEE80211_KEY_FLAG_GENERATE_IV;
867
868 key_flags |= (STA_KEY_FLG_WEP | STA_KEY_FLG_MAP_KEY_MSK);
869 key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
870 key_flags &= ~STA_KEY_FLG_INVALID;
871
872 if (keyconf->keylen == WEP_KEY_LEN_128)
873 key_flags |= STA_KEY_FLG_KEY_SIZE_MSK;
874
875 if (sta_id == ctx->bcast_sta_id)
876 key_flags |= STA_KEY_MULTICAST_MSK;
877
878 spin_lock_irqsave(&priv->sta_lock, flags);
879
880 priv->stations[sta_id].keyinfo.cipher = keyconf->cipher;
881 priv->stations[sta_id].keyinfo.keylen = keyconf->keylen;
882 priv->stations[sta_id].keyinfo.keyidx = keyconf->keyidx;
883
884 memcpy(priv->stations[sta_id].keyinfo.key,
885 keyconf->key, keyconf->keylen);
886
887 memcpy(&priv->stations[sta_id].sta.key.key[3],
888 keyconf->key, keyconf->keylen);
889
890 if ((priv->stations[sta_id].sta.key.key_flags & STA_KEY_FLG_ENCRYPT_MSK)
891 == STA_KEY_FLG_NO_ENC)
892 priv->stations[sta_id].sta.key.key_offset =
893 iwl_get_free_ucode_key_index(priv);
894 /* else, we are overriding an existing key => no need to allocated room
895 * in uCode. */
896
897 WARN(priv->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET,
898 "no space for a new key");
899
900 priv->stations[sta_id].sta.key.key_flags = key_flags;
901 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
902 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
903
904 memcpy(&sta_cmd, &priv->stations[sta_id].sta, sizeof(struct iwl_addsta_cmd));
905 spin_unlock_irqrestore(&priv->sta_lock, flags);
906
907 return iwl_send_add_sta(priv, &sta_cmd, CMD_SYNC);
908}
909
910static int iwl_set_ccmp_dynamic_key_info(struct iwl_priv *priv,
911 struct iwl_rxon_context *ctx,
912 struct ieee80211_key_conf *keyconf,
913 u8 sta_id)
914{
915 unsigned long flags;
916 __le16 key_flags = 0;
917 struct iwl_addsta_cmd sta_cmd;
918
919 lockdep_assert_held(&priv->mutex);
920
921 key_flags |= (STA_KEY_FLG_CCMP | STA_KEY_FLG_MAP_KEY_MSK);
922 key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
923 key_flags &= ~STA_KEY_FLG_INVALID;
924
925 if (sta_id == ctx->bcast_sta_id)
926 key_flags |= STA_KEY_MULTICAST_MSK;
927
928 keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
929
930 spin_lock_irqsave(&priv->sta_lock, flags);
931 priv->stations[sta_id].keyinfo.cipher = keyconf->cipher;
932 priv->stations[sta_id].keyinfo.keylen = keyconf->keylen;
933
934 memcpy(priv->stations[sta_id].keyinfo.key, keyconf->key,
935 keyconf->keylen);
936
937 memcpy(priv->stations[sta_id].sta.key.key, keyconf->key,
938 keyconf->keylen);
939
940 if ((priv->stations[sta_id].sta.key.key_flags & STA_KEY_FLG_ENCRYPT_MSK)
941 == STA_KEY_FLG_NO_ENC)
942 priv->stations[sta_id].sta.key.key_offset =
943 iwl_get_free_ucode_key_index(priv);
944 /* else, we are overriding an existing key => no need to allocated room
945 * in uCode. */
946
947 WARN(priv->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET,
948 "no space for a new key");
949
950 priv->stations[sta_id].sta.key.key_flags = key_flags;
951 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
952 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
953
954 memcpy(&sta_cmd, &priv->stations[sta_id].sta, sizeof(struct iwl_addsta_cmd));
955 spin_unlock_irqrestore(&priv->sta_lock, flags);
956
957 return iwl_send_add_sta(priv, &sta_cmd, CMD_SYNC);
958}
959
960static int iwl_set_tkip_dynamic_key_info(struct iwl_priv *priv,
961 struct iwl_rxon_context *ctx,
962 struct ieee80211_key_conf *keyconf,
963 u8 sta_id)
964{
965 unsigned long flags;
966 int ret = 0;
967 __le16 key_flags = 0;
968
969 key_flags |= (STA_KEY_FLG_TKIP | STA_KEY_FLG_MAP_KEY_MSK);
970 key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
971 key_flags &= ~STA_KEY_FLG_INVALID;
972
973 if (sta_id == ctx->bcast_sta_id)
974 key_flags |= STA_KEY_MULTICAST_MSK;
975
976 keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
977 keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
978
979 spin_lock_irqsave(&priv->sta_lock, flags);
980
981 priv->stations[sta_id].keyinfo.cipher = keyconf->cipher;
982 priv->stations[sta_id].keyinfo.keylen = 16;
983
984 if ((priv->stations[sta_id].sta.key.key_flags & STA_KEY_FLG_ENCRYPT_MSK)
985 == STA_KEY_FLG_NO_ENC)
986 priv->stations[sta_id].sta.key.key_offset =
987 iwl_get_free_ucode_key_index(priv);
988 /* else, we are overriding an existing key => no need to allocated room
989 * in uCode. */
990
991 WARN(priv->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET,
992 "no space for a new key");
993
994 priv->stations[sta_id].sta.key.key_flags = key_flags;
995
996
997 /* This copy is acutally not needed: we get the key with each TX */
998 memcpy(priv->stations[sta_id].keyinfo.key, keyconf->key, 16);
999
1000 memcpy(priv->stations[sta_id].sta.key.key, keyconf->key, 16);
1001
1002 spin_unlock_irqrestore(&priv->sta_lock, flags);
1003
1004 return ret;
1005}
1006
1007void iwl_update_tkip_key(struct iwl_priv *priv,
1008 struct iwl_rxon_context *ctx,
1009 struct ieee80211_key_conf *keyconf,
1010 struct ieee80211_sta *sta, u32 iv32, u16 *phase1key)
1011{ 640{
1012 u8 sta_id;
1013 unsigned long flags; 641 unsigned long flags;
1014 int i; 642 int i;
1015 643
1016 if (iwl_scan_cancel(priv)) {
1017 /* cancel scan failed, just live w/ bad key and rely
1018 briefly on SW decryption */
1019 return;
1020 }
1021
1022 sta_id = iwl_sta_id_or_broadcast(priv, ctx, sta);
1023 if (sta_id == IWL_INVALID_STATION)
1024 return;
1025
1026 spin_lock_irqsave(&priv->sta_lock, flags); 644 spin_lock_irqsave(&priv->sta_lock, flags);
645 for (i = 0; i < priv->hw_params.max_stations; i++) {
646 if (!(priv->stations[i].used & IWL_STA_BCAST))
647 continue;
1027 648
1028 priv->stations[sta_id].sta.key.tkip_rx_tsc_byte2 = (u8) iv32; 649 priv->stations[i].used &= ~IWL_STA_UCODE_ACTIVE;
1029 650 priv->num_stations--;
1030 for (i = 0; i < 5; i++) 651 BUG_ON(priv->num_stations < 0);
1031 priv->stations[sta_id].sta.key.tkip_rx_ttak[i] = 652 kfree(priv->stations[i].lq);
1032 cpu_to_le16(phase1key[i]); 653 priv->stations[i].lq = NULL;
1033
1034 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
1035 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
1036
1037 iwl_send_add_sta(priv, &priv->stations[sta_id].sta, CMD_ASYNC);
1038
1039 spin_unlock_irqrestore(&priv->sta_lock, flags);
1040
1041}
1042EXPORT_SYMBOL(iwl_update_tkip_key);
1043
1044int iwl_remove_dynamic_key(struct iwl_priv *priv,
1045 struct iwl_rxon_context *ctx,
1046 struct ieee80211_key_conf *keyconf,
1047 u8 sta_id)
1048{
1049 unsigned long flags;
1050 u16 key_flags;
1051 u8 keyidx;
1052 struct iwl_addsta_cmd sta_cmd;
1053
1054 lockdep_assert_held(&priv->mutex);
1055
1056 ctx->key_mapping_keys--;
1057
1058 spin_lock_irqsave(&priv->sta_lock, flags);
1059 key_flags = le16_to_cpu(priv->stations[sta_id].sta.key.key_flags);
1060 keyidx = (key_flags >> STA_KEY_FLG_KEYID_POS) & 0x3;
1061
1062 IWL_DEBUG_WEP(priv, "Remove dynamic key: idx=%d sta=%d\n",
1063 keyconf->keyidx, sta_id);
1064
1065 if (keyconf->keyidx != keyidx) {
1066 /* We need to remove a key with index different that the one
1067 * in the uCode. This means that the key we need to remove has
1068 * been replaced by another one with different index.
1069 * Don't do anything and return ok
1070 */
1071 spin_unlock_irqrestore(&priv->sta_lock, flags);
1072 return 0;
1073 }
1074
1075 if (priv->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET) {
1076 IWL_WARN(priv, "Removing wrong key %d 0x%x\n",
1077 keyconf->keyidx, key_flags);
1078 spin_unlock_irqrestore(&priv->sta_lock, flags);
1079 return 0;
1080 }
1081
1082 if (!test_and_clear_bit(priv->stations[sta_id].sta.key.key_offset,
1083 &priv->ucode_key_table))
1084 IWL_ERR(priv, "index %d not used in uCode key table.\n",
1085 priv->stations[sta_id].sta.key.key_offset);
1086 memset(&priv->stations[sta_id].keyinfo, 0,
1087 sizeof(struct iwl_hw_key));
1088 memset(&priv->stations[sta_id].sta.key, 0,
1089 sizeof(struct iwl4965_keyinfo));
1090 priv->stations[sta_id].sta.key.key_flags =
1091 STA_KEY_FLG_NO_ENC | STA_KEY_FLG_INVALID;
1092 priv->stations[sta_id].sta.key.key_offset = WEP_INVALID_OFFSET;
1093 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
1094 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
1095
1096 if (iwl_is_rfkill(priv)) {
1097 IWL_DEBUG_WEP(priv, "Not sending REPLY_ADD_STA command because RFKILL enabled.\n");
1098 spin_unlock_irqrestore(&priv->sta_lock, flags);
1099 return 0;
1100 } 654 }
1101 memcpy(&sta_cmd, &priv->stations[sta_id].sta, sizeof(struct iwl_addsta_cmd));
1102 spin_unlock_irqrestore(&priv->sta_lock, flags); 655 spin_unlock_irqrestore(&priv->sta_lock, flags);
1103
1104 return iwl_send_add_sta(priv, &sta_cmd, CMD_SYNC);
1105}
1106EXPORT_SYMBOL(iwl_remove_dynamic_key);
1107
1108int iwl_set_dynamic_key(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
1109 struct ieee80211_key_conf *keyconf, u8 sta_id)
1110{
1111 int ret;
1112
1113 lockdep_assert_held(&priv->mutex);
1114
1115 ctx->key_mapping_keys++;
1116 keyconf->hw_key_idx = HW_KEY_DYNAMIC;
1117
1118 switch (keyconf->cipher) {
1119 case WLAN_CIPHER_SUITE_CCMP:
1120 ret = iwl_set_ccmp_dynamic_key_info(priv, ctx, keyconf, sta_id);
1121 break;
1122 case WLAN_CIPHER_SUITE_TKIP:
1123 ret = iwl_set_tkip_dynamic_key_info(priv, ctx, keyconf, sta_id);
1124 break;
1125 case WLAN_CIPHER_SUITE_WEP40:
1126 case WLAN_CIPHER_SUITE_WEP104:
1127 ret = iwl_set_wep_dynamic_key_info(priv, ctx, keyconf, sta_id);
1128 break;
1129 default:
1130 IWL_ERR(priv,
1131 "Unknown alg: %s cipher = %x\n", __func__,
1132 keyconf->cipher);
1133 ret = -EINVAL;
1134 }
1135
1136 IWL_DEBUG_WEP(priv, "Set dynamic key: cipher=%x len=%d idx=%d sta=%d ret=%d\n",
1137 keyconf->cipher, keyconf->keylen, keyconf->keyidx,
1138 sta_id, ret);
1139
1140 return ret;
1141} 656}
1142EXPORT_SYMBOL(iwl_set_dynamic_key); 657EXPORT_SYMBOL_GPL(iwl_dealloc_bcast_stations);
1143 658
1144#ifdef CONFIG_IWLWIFI_DEBUG 659#ifdef CONFIG_IWLWIFI_DEBUG
1145static void iwl_dump_lq_cmd(struct iwl_priv *priv, 660static void iwl_dump_lq_cmd(struct iwl_priv *priv,
@@ -1243,223 +758,6 @@ int iwl_send_lq_cmd(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
1243} 758}
1244EXPORT_SYMBOL(iwl_send_lq_cmd); 759EXPORT_SYMBOL(iwl_send_lq_cmd);
1245 760
1246/**
1247 * iwl_alloc_bcast_station - add broadcast station into driver's station table.
1248 *
1249 * This adds the broadcast station into the driver's station table
1250 * and marks it driver active, so that it will be restored to the
1251 * device at the next best time.
1252 */
1253int iwl_alloc_bcast_station(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
1254 bool init_lq)
1255{
1256 struct iwl_link_quality_cmd *link_cmd;
1257 unsigned long flags;
1258 u8 sta_id;
1259
1260 spin_lock_irqsave(&priv->sta_lock, flags);
1261 sta_id = iwl_prep_station(priv, ctx, iwl_bcast_addr, false, NULL);
1262 if (sta_id == IWL_INVALID_STATION) {
1263 IWL_ERR(priv, "Unable to prepare broadcast station\n");
1264 spin_unlock_irqrestore(&priv->sta_lock, flags);
1265
1266 return -EINVAL;
1267 }
1268
1269 priv->stations[sta_id].used |= IWL_STA_DRIVER_ACTIVE;
1270 priv->stations[sta_id].used |= IWL_STA_BCAST;
1271 spin_unlock_irqrestore(&priv->sta_lock, flags);
1272
1273 if (init_lq) {
1274 link_cmd = iwl_sta_alloc_lq(priv, sta_id);
1275 if (!link_cmd) {
1276 IWL_ERR(priv,
1277 "Unable to initialize rate scaling for bcast station.\n");
1278 return -ENOMEM;
1279 }
1280
1281 spin_lock_irqsave(&priv->sta_lock, flags);
1282 priv->stations[sta_id].lq = link_cmd;
1283 spin_unlock_irqrestore(&priv->sta_lock, flags);
1284 }
1285
1286 return 0;
1287}
1288EXPORT_SYMBOL_GPL(iwl_alloc_bcast_station);
1289
1290/**
1291 * iwl_update_bcast_station - update broadcast station's LQ command
1292 *
1293 * Only used by iwlagn. Placed here to have all bcast station management
1294 * code together.
1295 */
1296static int iwl_update_bcast_station(struct iwl_priv *priv,
1297 struct iwl_rxon_context *ctx)
1298{
1299 unsigned long flags;
1300 struct iwl_link_quality_cmd *link_cmd;
1301 u8 sta_id = ctx->bcast_sta_id;
1302
1303 link_cmd = iwl_sta_alloc_lq(priv, sta_id);
1304 if (!link_cmd) {
1305 IWL_ERR(priv, "Unable to initialize rate scaling for bcast station.\n");
1306 return -ENOMEM;
1307 }
1308
1309 spin_lock_irqsave(&priv->sta_lock, flags);
1310 if (priv->stations[sta_id].lq)
1311 kfree(priv->stations[sta_id].lq);
1312 else
1313 IWL_DEBUG_INFO(priv, "Bcast station rate scaling has not been initialized yet.\n");
1314 priv->stations[sta_id].lq = link_cmd;
1315 spin_unlock_irqrestore(&priv->sta_lock, flags);
1316
1317 return 0;
1318}
1319
1320int iwl_update_bcast_stations(struct iwl_priv *priv)
1321{
1322 struct iwl_rxon_context *ctx;
1323 int ret = 0;
1324
1325 for_each_context(priv, ctx) {
1326 ret = iwl_update_bcast_station(priv, ctx);
1327 if (ret)
1328 break;
1329 }
1330
1331 return ret;
1332}
1333EXPORT_SYMBOL_GPL(iwl_update_bcast_stations);
1334
1335void iwl_dealloc_bcast_stations(struct iwl_priv *priv)
1336{
1337 unsigned long flags;
1338 int i;
1339
1340 spin_lock_irqsave(&priv->sta_lock, flags);
1341 for (i = 0; i < priv->hw_params.max_stations; i++) {
1342 if (!(priv->stations[i].used & IWL_STA_BCAST))
1343 continue;
1344
1345 priv->stations[i].used &= ~IWL_STA_UCODE_ACTIVE;
1346 priv->num_stations--;
1347 BUG_ON(priv->num_stations < 0);
1348 kfree(priv->stations[i].lq);
1349 priv->stations[i].lq = NULL;
1350 }
1351 spin_unlock_irqrestore(&priv->sta_lock, flags);
1352}
1353EXPORT_SYMBOL_GPL(iwl_dealloc_bcast_stations);
1354
1355/**
1356 * iwl_sta_tx_modify_enable_tid - Enable Tx for this TID in station table
1357 */
1358int iwl_sta_tx_modify_enable_tid(struct iwl_priv *priv, int sta_id, int tid)
1359{
1360 unsigned long flags;
1361 struct iwl_addsta_cmd sta_cmd;
1362
1363 lockdep_assert_held(&priv->mutex);
1364
1365 /* Remove "disable" flag, to enable Tx for this TID */
1366 spin_lock_irqsave(&priv->sta_lock, flags);
1367 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_TID_DISABLE_TX;
1368 priv->stations[sta_id].sta.tid_disable_tx &= cpu_to_le16(~(1 << tid));
1369 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
1370 memcpy(&sta_cmd, &priv->stations[sta_id].sta, sizeof(struct iwl_addsta_cmd));
1371 spin_unlock_irqrestore(&priv->sta_lock, flags);
1372
1373 return iwl_send_add_sta(priv, &sta_cmd, CMD_SYNC);
1374}
1375EXPORT_SYMBOL(iwl_sta_tx_modify_enable_tid);
1376
1377int iwl_sta_rx_agg_start(struct iwl_priv *priv, struct ieee80211_sta *sta,
1378 int tid, u16 ssn)
1379{
1380 unsigned long flags;
1381 int sta_id;
1382 struct iwl_addsta_cmd sta_cmd;
1383
1384 lockdep_assert_held(&priv->mutex);
1385
1386 sta_id = iwl_sta_id(sta);
1387 if (sta_id == IWL_INVALID_STATION)
1388 return -ENXIO;
1389
1390 spin_lock_irqsave(&priv->sta_lock, flags);
1391 priv->stations[sta_id].sta.station_flags_msk = 0;
1392 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_ADDBA_TID_MSK;
1393 priv->stations[sta_id].sta.add_immediate_ba_tid = (u8)tid;
1394 priv->stations[sta_id].sta.add_immediate_ba_ssn = cpu_to_le16(ssn);
1395 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
1396 memcpy(&sta_cmd, &priv->stations[sta_id].sta, sizeof(struct iwl_addsta_cmd));
1397 spin_unlock_irqrestore(&priv->sta_lock, flags);
1398
1399 return iwl_send_add_sta(priv, &sta_cmd, CMD_SYNC);
1400}
1401EXPORT_SYMBOL(iwl_sta_rx_agg_start);
1402
1403int iwl_sta_rx_agg_stop(struct iwl_priv *priv, struct ieee80211_sta *sta,
1404 int tid)
1405{
1406 unsigned long flags;
1407 int sta_id;
1408 struct iwl_addsta_cmd sta_cmd;
1409
1410 lockdep_assert_held(&priv->mutex);
1411
1412 sta_id = iwl_sta_id(sta);
1413 if (sta_id == IWL_INVALID_STATION) {
1414 IWL_ERR(priv, "Invalid station for AGG tid %d\n", tid);
1415 return -ENXIO;
1416 }
1417
1418 spin_lock_irqsave(&priv->sta_lock, flags);
1419 priv->stations[sta_id].sta.station_flags_msk = 0;
1420 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_DELBA_TID_MSK;
1421 priv->stations[sta_id].sta.remove_immediate_ba_tid = (u8)tid;
1422 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
1423 memcpy(&sta_cmd, &priv->stations[sta_id].sta, sizeof(struct iwl_addsta_cmd));
1424 spin_unlock_irqrestore(&priv->sta_lock, flags);
1425
1426 return iwl_send_add_sta(priv, &sta_cmd, CMD_SYNC);
1427}
1428EXPORT_SYMBOL(iwl_sta_rx_agg_stop);
1429
1430void iwl_sta_modify_ps_wake(struct iwl_priv *priv, int sta_id)
1431{
1432 unsigned long flags;
1433
1434 spin_lock_irqsave(&priv->sta_lock, flags);
1435 priv->stations[sta_id].sta.station_flags &= ~STA_FLG_PWR_SAVE_MSK;
1436 priv->stations[sta_id].sta.station_flags_msk = STA_FLG_PWR_SAVE_MSK;
1437 priv->stations[sta_id].sta.sta.modify_mask = 0;
1438 priv->stations[sta_id].sta.sleep_tx_count = 0;
1439 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
1440 iwl_send_add_sta(priv, &priv->stations[sta_id].sta, CMD_ASYNC);
1441 spin_unlock_irqrestore(&priv->sta_lock, flags);
1442
1443}
1444EXPORT_SYMBOL(iwl_sta_modify_ps_wake);
1445
1446void iwl_sta_modify_sleep_tx_count(struct iwl_priv *priv, int sta_id, int cnt)
1447{
1448 unsigned long flags;
1449
1450 spin_lock_irqsave(&priv->sta_lock, flags);
1451 priv->stations[sta_id].sta.station_flags |= STA_FLG_PWR_SAVE_MSK;
1452 priv->stations[sta_id].sta.station_flags_msk = STA_FLG_PWR_SAVE_MSK;
1453 priv->stations[sta_id].sta.sta.modify_mask =
1454 STA_MODIFY_SLEEP_TX_COUNT_MSK;
1455 priv->stations[sta_id].sta.sleep_tx_count = cpu_to_le16(cnt);
1456 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
1457 iwl_send_add_sta(priv, &priv->stations[sta_id].sta, CMD_ASYNC);
1458 spin_unlock_irqrestore(&priv->sta_lock, flags);
1459
1460}
1461EXPORT_SYMBOL(iwl_sta_modify_sleep_tx_count);
1462
1463int iwl_mac_sta_remove(struct ieee80211_hw *hw, 761int iwl_mac_sta_remove(struct ieee80211_hw *hw,
1464 struct ieee80211_vif *vif, 762 struct ieee80211_vif *vif,
1465 struct ieee80211_sta *sta) 763 struct ieee80211_sta *sta)
diff --git a/drivers/net/wireless/iwlwifi/iwl-sta.h b/drivers/net/wireless/iwlwifi/iwl-sta.h
index 56bad3f60d81..06475872eee4 100644
--- a/drivers/net/wireless/iwlwifi/iwl-sta.h
+++ b/drivers/net/wireless/iwlwifi/iwl-sta.h
@@ -43,35 +43,13 @@
43#define IWL_STA_BCAST BIT(4) /* this station is the special bcast station */ 43#define IWL_STA_BCAST BIT(4) /* this station is the special bcast station */
44 44
45 45
46int iwl_remove_default_wep_key(struct iwl_priv *priv,
47 struct iwl_rxon_context *ctx,
48 struct ieee80211_key_conf *key);
49int iwl_set_default_wep_key(struct iwl_priv *priv,
50 struct iwl_rxon_context *ctx,
51 struct ieee80211_key_conf *key);
52int iwl_restore_default_wep_keys(struct iwl_priv *priv,
53 struct iwl_rxon_context *ctx);
54int iwl_set_dynamic_key(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
55 struct ieee80211_key_conf *key, u8 sta_id);
56int iwl_remove_dynamic_key(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
57 struct ieee80211_key_conf *key, u8 sta_id);
58void iwl_update_tkip_key(struct iwl_priv *priv,
59 struct iwl_rxon_context *ctx,
60 struct ieee80211_key_conf *keyconf,
61 struct ieee80211_sta *sta, u32 iv32, u16 *phase1key);
62
63void iwl_restore_stations(struct iwl_priv *priv, struct iwl_rxon_context *ctx); 46void iwl_restore_stations(struct iwl_priv *priv, struct iwl_rxon_context *ctx);
64void iwl_clear_ucode_stations(struct iwl_priv *priv, 47void iwl_clear_ucode_stations(struct iwl_priv *priv,
65 struct iwl_rxon_context *ctx); 48 struct iwl_rxon_context *ctx);
66int iwl_alloc_bcast_station(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
67 bool init_lq);
68void iwl_dealloc_bcast_stations(struct iwl_priv *priv); 49void iwl_dealloc_bcast_stations(struct iwl_priv *priv);
69int iwl_update_bcast_stations(struct iwl_priv *priv);
70int iwl_get_free_ucode_key_index(struct iwl_priv *priv); 50int iwl_get_free_ucode_key_index(struct iwl_priv *priv);
71int iwl_send_add_sta(struct iwl_priv *priv, 51int iwl_send_add_sta(struct iwl_priv *priv,
72 struct iwl_addsta_cmd *sta, u8 flags); 52 struct iwl_addsta_cmd *sta, u8 flags);
73int iwl_add_bssid_station(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
74 const u8 *addr, bool init_rs, u8 *sta_id_r);
75int iwl_add_station_common(struct iwl_priv *priv, struct iwl_rxon_context *ctx, 53int iwl_add_station_common(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
76 const u8 *addr, bool is_ap, 54 const u8 *addr, bool is_ap,
77 struct ieee80211_sta *sta, u8 *sta_id_r); 55 struct ieee80211_sta *sta, u8 *sta_id_r);
@@ -79,13 +57,12 @@ int iwl_remove_station(struct iwl_priv *priv, const u8 sta_id,
79 const u8 *addr); 57 const u8 *addr);
80int iwl_mac_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 58int iwl_mac_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
81 struct ieee80211_sta *sta); 59 struct ieee80211_sta *sta);
82int iwl_sta_tx_modify_enable_tid(struct iwl_priv *priv, int sta_id, int tid); 60
83int iwl_sta_rx_agg_start(struct iwl_priv *priv, struct ieee80211_sta *sta, 61u8 iwl_prep_station(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
84 int tid, u16 ssn); 62 const u8 *addr, bool is_ap, struct ieee80211_sta *sta);
85int iwl_sta_rx_agg_stop(struct iwl_priv *priv, struct ieee80211_sta *sta, 63
86 int tid); 64int iwl_send_lq_cmd(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
87void iwl_sta_modify_ps_wake(struct iwl_priv *priv, int sta_id); 65 struct iwl_link_quality_cmd *lq, u8 flags, bool init);
88void iwl_sta_modify_sleep_tx_count(struct iwl_priv *priv, int sta_id, int cnt);
89 66
90/** 67/**
91 * iwl_clear_driver_stations - clear knowledge of all stations from driver 68 * iwl_clear_driver_stations - clear knowledge of all stations from driver
diff --git a/drivers/net/wireless/iwlwifi/iwl-tx.c b/drivers/net/wireless/iwlwifi/iwl-tx.c
index 347d3dc6a015..7261ee49f282 100644
--- a/drivers/net/wireless/iwlwifi/iwl-tx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-tx.c
@@ -636,41 +636,3 @@ void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
636 meta->flags = 0; 636 meta->flags = 0;
637} 637}
638EXPORT_SYMBOL(iwl_tx_cmd_complete); 638EXPORT_SYMBOL(iwl_tx_cmd_complete);
639
640#ifdef CONFIG_IWLWIFI_DEBUG
641#define TX_STATUS_FAIL(x) case TX_STATUS_FAIL_ ## x: return #x
642#define TX_STATUS_POSTPONE(x) case TX_STATUS_POSTPONE_ ## x: return #x
643
644const char *iwl_get_tx_fail_reason(u32 status)
645{
646 switch (status & TX_STATUS_MSK) {
647 case TX_STATUS_SUCCESS:
648 return "SUCCESS";
649 TX_STATUS_POSTPONE(DELAY);
650 TX_STATUS_POSTPONE(FEW_BYTES);
651 TX_STATUS_POSTPONE(BT_PRIO);
652 TX_STATUS_POSTPONE(QUIET_PERIOD);
653 TX_STATUS_POSTPONE(CALC_TTAK);
654 TX_STATUS_FAIL(INTERNAL_CROSSED_RETRY);
655 TX_STATUS_FAIL(SHORT_LIMIT);
656 TX_STATUS_FAIL(LONG_LIMIT);
657 TX_STATUS_FAIL(FIFO_UNDERRUN);
658 TX_STATUS_FAIL(DRAIN_FLOW);
659 TX_STATUS_FAIL(RFKILL_FLUSH);
660 TX_STATUS_FAIL(LIFE_EXPIRE);
661 TX_STATUS_FAIL(DEST_PS);
662 TX_STATUS_FAIL(HOST_ABORTED);
663 TX_STATUS_FAIL(BT_RETRY);
664 TX_STATUS_FAIL(STA_INVALID);
665 TX_STATUS_FAIL(FRAG_DROPPED);
666 TX_STATUS_FAIL(TID_DISABLE);
667 TX_STATUS_FAIL(FIFO_FLUSHED);
668 TX_STATUS_FAIL(INSUFFICIENT_CF_POLL);
669 TX_STATUS_FAIL(FW_DROP);
670 TX_STATUS_FAIL(STA_COLOR_MISMATCH_DROP);
671 }
672
673 return "UNKNOWN";
674}
675EXPORT_SYMBOL(iwl_get_tx_fail_reason);
676#endif /* CONFIG_IWLWIFI_DEBUG */
diff --git a/drivers/net/wireless/iwlwifi/iwl3945-base.c b/drivers/net/wireless/iwlwifi/iwl3945-base.c
index 68e624afb987..8f8c4b73f8b9 100644
--- a/drivers/net/wireless/iwlwifi/iwl3945-base.c
+++ b/drivers/net/wireless/iwlwifi/iwl3945-base.c
@@ -317,15 +317,15 @@ unsigned int iwl3945_fill_beacon_frame(struct iwl_priv *priv,
317 int left) 317 int left)
318{ 318{
319 319
320 if (!iwl_is_associated(priv, IWL_RXON_CTX_BSS) || !priv->ibss_beacon) 320 if (!iwl_is_associated(priv, IWL_RXON_CTX_BSS) || !priv->beacon_skb)
321 return 0; 321 return 0;
322 322
323 if (priv->ibss_beacon->len > left) 323 if (priv->beacon_skb->len > left)
324 return 0; 324 return 0;
325 325
326 memcpy(hdr, priv->ibss_beacon->data, priv->ibss_beacon->len); 326 memcpy(hdr, priv->beacon_skb->data, priv->beacon_skb->len);
327 327
328 return priv->ibss_beacon->len; 328 return priv->beacon_skb->len;
329} 329}
330 330
331static int iwl3945_send_beacon_cmd(struct iwl_priv *priv) 331static int iwl3945_send_beacon_cmd(struct iwl_priv *priv)
@@ -813,10 +813,10 @@ static void iwl3945_bg_beacon_update(struct work_struct *work)
813 813
814 mutex_lock(&priv->mutex); 814 mutex_lock(&priv->mutex);
815 /* new beacon skb is allocated every time; dispose previous.*/ 815 /* new beacon skb is allocated every time; dispose previous.*/
816 if (priv->ibss_beacon) 816 if (priv->beacon_skb)
817 dev_kfree_skb(priv->ibss_beacon); 817 dev_kfree_skb(priv->beacon_skb);
818 818
819 priv->ibss_beacon = beacon; 819 priv->beacon_skb = beacon;
820 mutex_unlock(&priv->mutex); 820 mutex_unlock(&priv->mutex);
821 821
822 iwl3945_send_beacon_cmd(priv); 822 iwl3945_send_beacon_cmd(priv);
@@ -1581,16 +1581,16 @@ int iwl3945_dump_nic_event_log(struct iwl_priv *priv, bool full_log,
1581 num_wraps = iwl_read_targ_mem(priv, base + (2 * sizeof(u32))); 1581 num_wraps = iwl_read_targ_mem(priv, base + (2 * sizeof(u32)));
1582 next_entry = iwl_read_targ_mem(priv, base + (3 * sizeof(u32))); 1582 next_entry = iwl_read_targ_mem(priv, base + (3 * sizeof(u32)));
1583 1583
1584 if (capacity > priv->cfg->max_event_log_size) { 1584 if (capacity > priv->cfg->base_params->max_event_log_size) {
1585 IWL_ERR(priv, "Log capacity %d is bogus, limit to %d entries\n", 1585 IWL_ERR(priv, "Log capacity %d is bogus, limit to %d entries\n",
1586 capacity, priv->cfg->max_event_log_size); 1586 capacity, priv->cfg->base_params->max_event_log_size);
1587 capacity = priv->cfg->max_event_log_size; 1587 capacity = priv->cfg->base_params->max_event_log_size;
1588 } 1588 }
1589 1589
1590 if (next_entry > priv->cfg->max_event_log_size) { 1590 if (next_entry > priv->cfg->base_params->max_event_log_size) {
1591 IWL_ERR(priv, "Log write index %d is bogus, limit to %d\n", 1591 IWL_ERR(priv, "Log write index %d is bogus, limit to %d\n",
1592 next_entry, priv->cfg->max_event_log_size); 1592 next_entry, priv->cfg->base_params->max_event_log_size);
1593 next_entry = priv->cfg->max_event_log_size; 1593 next_entry = priv->cfg->base_params->max_event_log_size;
1594 } 1594 }
1595 1595
1596 size = num_wraps ? capacity : next_entry; 1596 size = num_wraps ? capacity : next_entry;
@@ -1730,7 +1730,6 @@ static void iwl3945_irq_tasklet(struct iwl_priv *priv)
1730 IWL_ERR(priv, "Microcode SW error detected. " 1730 IWL_ERR(priv, "Microcode SW error detected. "
1731 "Restarting 0x%X.\n", inta); 1731 "Restarting 0x%X.\n", inta);
1732 priv->isr_stats.sw++; 1732 priv->isr_stats.sw++;
1733 priv->isr_stats.sw_err = inta;
1734 iwl_irq_handle_error(priv); 1733 iwl_irq_handle_error(priv);
1735 handled |= CSR_INT_BIT_SW_ERR; 1734 handled |= CSR_INT_BIT_SW_ERR;
1736 } 1735 }
@@ -2520,7 +2519,8 @@ static void iwl3945_alive_start(struct iwl_priv *priv)
2520 /* Enable timer to monitor the driver queues */ 2519 /* Enable timer to monitor the driver queues */
2521 mod_timer(&priv->monitor_recover, 2520 mod_timer(&priv->monitor_recover,
2522 jiffies + 2521 jiffies +
2523 msecs_to_jiffies(priv->cfg->monitor_recover_period)); 2522 msecs_to_jiffies(
2523 priv->cfg->base_params->monitor_recover_period));
2524 } 2524 }
2525 2525
2526 if (iwl_is_rfkill(priv)) 2526 if (iwl_is_rfkill(priv))
@@ -2547,7 +2547,7 @@ static void iwl3945_alive_start(struct iwl_priv *priv)
2547 priv->cfg->ops->hcmd->send_bt_config(priv); 2547 priv->cfg->ops->hcmd->send_bt_config(priv);
2548 2548
2549 /* Configure the adapter for unassociated operation */ 2549 /* Configure the adapter for unassociated operation */
2550 iwlcore_commit_rxon(priv, ctx); 2550 iwl3945_commit_rxon(priv, ctx);
2551 2551
2552 iwl3945_reg_txpower_periodic(priv); 2552 iwl3945_reg_txpower_periodic(priv);
2553 2553
@@ -2568,15 +2568,13 @@ static void iwl3945_cancel_deferred_work(struct iwl_priv *priv);
2568static void __iwl3945_down(struct iwl_priv *priv) 2568static void __iwl3945_down(struct iwl_priv *priv)
2569{ 2569{
2570 unsigned long flags; 2570 unsigned long flags;
2571 int exit_pending = test_bit(STATUS_EXIT_PENDING, &priv->status); 2571 int exit_pending;
2572 struct ieee80211_conf *conf = NULL;
2573 2572
2574 IWL_DEBUG_INFO(priv, DRV_NAME " is going down\n"); 2573 IWL_DEBUG_INFO(priv, DRV_NAME " is going down\n");
2575 2574
2576 conf = ieee80211_get_hw_conf(priv->hw); 2575 iwl_scan_cancel_timeout(priv, 200);
2577 2576
2578 if (!exit_pending) 2577 exit_pending = test_and_set_bit(STATUS_EXIT_PENDING, &priv->status);
2579 set_bit(STATUS_EXIT_PENDING, &priv->status);
2580 2578
2581 /* Stop TX queues watchdog. We need to have STATUS_EXIT_PENDING bit set 2579 /* Stop TX queues watchdog. We need to have STATUS_EXIT_PENDING bit set
2582 * to prevent rearm timer */ 2580 * to prevent rearm timer */
@@ -2639,14 +2637,14 @@ static void __iwl3945_down(struct iwl_priv *priv)
2639 udelay(5); 2637 udelay(5);
2640 2638
2641 /* Stop the device, and put it in low power state */ 2639 /* Stop the device, and put it in low power state */
2642 priv->cfg->ops->lib->apm_ops.stop(priv); 2640 iwl_apm_stop(priv);
2643 2641
2644 exit: 2642 exit:
2645 memset(&priv->card_alive, 0, sizeof(struct iwl_alive_resp)); 2643 memset(&priv->card_alive, 0, sizeof(struct iwl_alive_resp));
2646 2644
2647 if (priv->ibss_beacon) 2645 if (priv->beacon_skb)
2648 dev_kfree_skb(priv->ibss_beacon); 2646 dev_kfree_skb(priv->beacon_skb);
2649 priv->ibss_beacon = NULL; 2647 priv->beacon_skb = NULL;
2650 2648
2651 /* clear out any free frames */ 2649 /* clear out any free frames */
2652 iwl3945_clear_free_frames(priv); 2650 iwl3945_clear_free_frames(priv);
@@ -2663,12 +2661,33 @@ static void iwl3945_down(struct iwl_priv *priv)
2663 2661
2664#define MAX_HW_RESTARTS 5 2662#define MAX_HW_RESTARTS 5
2665 2663
2664static int iwl3945_alloc_bcast_station(struct iwl_priv *priv)
2665{
2666 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
2667 unsigned long flags;
2668 u8 sta_id;
2669
2670 spin_lock_irqsave(&priv->sta_lock, flags);
2671 sta_id = iwl_prep_station(priv, ctx, iwl_bcast_addr, false, NULL);
2672 if (sta_id == IWL_INVALID_STATION) {
2673 IWL_ERR(priv, "Unable to prepare broadcast station\n");
2674 spin_unlock_irqrestore(&priv->sta_lock, flags);
2675
2676 return -EINVAL;
2677 }
2678
2679 priv->stations[sta_id].used |= IWL_STA_DRIVER_ACTIVE;
2680 priv->stations[sta_id].used |= IWL_STA_BCAST;
2681 spin_unlock_irqrestore(&priv->sta_lock, flags);
2682
2683 return 0;
2684}
2685
2666static int __iwl3945_up(struct iwl_priv *priv) 2686static int __iwl3945_up(struct iwl_priv *priv)
2667{ 2687{
2668 int rc, i; 2688 int rc, i;
2669 2689
2670 rc = iwl_alloc_bcast_station(priv, &priv->contexts[IWL_RXON_CTX_BSS], 2690 rc = iwl3945_alloc_bcast_station(priv);
2671 false);
2672 if (rc) 2691 if (rc)
2673 return rc; 2692 return rc;
2674 2693
@@ -2820,7 +2839,7 @@ static void iwl3945_rfkill_poll(struct work_struct *data)
2820 2839
2821} 2840}
2822 2841
2823void iwl3945_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif) 2842int iwl3945_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
2824{ 2843{
2825 struct iwl_host_cmd cmd = { 2844 struct iwl_host_cmd cmd = {
2826 .id = REPLY_SCAN_CMD, 2845 .id = REPLY_SCAN_CMD,
@@ -2828,61 +2847,19 @@ void iwl3945_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
2828 .flags = CMD_SIZE_HUGE, 2847 .flags = CMD_SIZE_HUGE,
2829 }; 2848 };
2830 struct iwl3945_scan_cmd *scan; 2849 struct iwl3945_scan_cmd *scan;
2831 struct ieee80211_conf *conf = NULL;
2832 u8 n_probes = 0; 2850 u8 n_probes = 0;
2833 enum ieee80211_band band; 2851 enum ieee80211_band band;
2834 bool is_active = false; 2852 bool is_active = false;
2853 int ret;
2835 2854
2836 conf = ieee80211_get_hw_conf(priv->hw); 2855 lockdep_assert_held(&priv->mutex);
2837
2838 cancel_delayed_work(&priv->scan_check);
2839
2840 if (!iwl_is_ready(priv)) {
2841 IWL_WARN(priv, "request scan called when driver not ready.\n");
2842 goto done;
2843 }
2844
2845 /* Make sure the scan wasn't canceled before this queued work
2846 * was given the chance to run... */
2847 if (!test_bit(STATUS_SCANNING, &priv->status))
2848 goto done;
2849
2850 /* This should never be called or scheduled if there is currently
2851 * a scan active in the hardware. */
2852 if (test_bit(STATUS_SCAN_HW, &priv->status)) {
2853 IWL_DEBUG_INFO(priv, "Multiple concurrent scan requests "
2854 "Ignoring second request.\n");
2855 goto done;
2856 }
2857
2858 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) {
2859 IWL_DEBUG_SCAN(priv, "Aborting scan due to device shutdown\n");
2860 goto done;
2861 }
2862
2863 if (test_bit(STATUS_SCAN_ABORTING, &priv->status)) {
2864 IWL_DEBUG_HC(priv,
2865 "Scan request while abort pending. Queuing.\n");
2866 goto done;
2867 }
2868
2869 if (iwl_is_rfkill(priv)) {
2870 IWL_DEBUG_HC(priv, "Aborting scan due to RF Kill activation\n");
2871 goto done;
2872 }
2873
2874 if (!test_bit(STATUS_READY, &priv->status)) {
2875 IWL_DEBUG_HC(priv,
2876 "Scan request while uninitialized. Queuing.\n");
2877 goto done;
2878 }
2879 2856
2880 if (!priv->scan_cmd) { 2857 if (!priv->scan_cmd) {
2881 priv->scan_cmd = kmalloc(sizeof(struct iwl3945_scan_cmd) + 2858 priv->scan_cmd = kmalloc(sizeof(struct iwl3945_scan_cmd) +
2882 IWL_MAX_SCAN_SIZE, GFP_KERNEL); 2859 IWL_MAX_SCAN_SIZE, GFP_KERNEL);
2883 if (!priv->scan_cmd) { 2860 if (!priv->scan_cmd) {
2884 IWL_DEBUG_SCAN(priv, "Fail to allocate scan memory\n"); 2861 IWL_DEBUG_SCAN(priv, "Fail to allocate scan memory\n");
2885 goto done; 2862 return -ENOMEM;
2886 } 2863 }
2887 } 2864 }
2888 scan = priv->scan_cmd; 2865 scan = priv->scan_cmd;
@@ -2961,25 +2938,25 @@ void iwl3945_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
2961 case IEEE80211_BAND_2GHZ: 2938 case IEEE80211_BAND_2GHZ:
2962 scan->flags = RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK; 2939 scan->flags = RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK;
2963 scan->tx_cmd.rate = IWL_RATE_1M_PLCP; 2940 scan->tx_cmd.rate = IWL_RATE_1M_PLCP;
2964 scan->good_CRC_th = 0;
2965 band = IEEE80211_BAND_2GHZ; 2941 band = IEEE80211_BAND_2GHZ;
2966 break; 2942 break;
2967 case IEEE80211_BAND_5GHZ: 2943 case IEEE80211_BAND_5GHZ:
2968 scan->tx_cmd.rate = IWL_RATE_6M_PLCP; 2944 scan->tx_cmd.rate = IWL_RATE_6M_PLCP;
2969 /*
2970 * If active scaning is requested but a certain channel
2971 * is marked passive, we can do active scanning if we
2972 * detect transmissions.
2973 */
2974 scan->good_CRC_th = is_active ? IWL_GOOD_CRC_TH_DEFAULT :
2975 IWL_GOOD_CRC_TH_DISABLED;
2976 band = IEEE80211_BAND_5GHZ; 2945 band = IEEE80211_BAND_5GHZ;
2977 break; 2946 break;
2978 default: 2947 default:
2979 IWL_WARN(priv, "Invalid scan band\n"); 2948 IWL_WARN(priv, "Invalid scan band\n");
2980 goto done; 2949 return -EIO;
2981 } 2950 }
2982 2951
2952 /*
2953 * If active scaning is requested but a certain channel
2954 * is marked passive, we can do active scanning if we
2955 * detect transmissions.
2956 */
2957 scan->good_CRC_th = is_active ? IWL_GOOD_CRC_TH_DEFAULT :
2958 IWL_GOOD_CRC_TH_DISABLED;
2959
2983 if (!priv->is_internal_short_scan) { 2960 if (!priv->is_internal_short_scan) {
2984 scan->tx_cmd.len = cpu_to_le16( 2961 scan->tx_cmd.len = cpu_to_le16(
2985 iwl_fill_probe_req(priv, 2962 iwl_fill_probe_req(priv,
@@ -3012,7 +2989,7 @@ void iwl3945_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
3012 2989
3013 if (scan->channel_count == 0) { 2990 if (scan->channel_count == 0) {
3014 IWL_DEBUG_SCAN(priv, "channel count %d\n", scan->channel_count); 2991 IWL_DEBUG_SCAN(priv, "channel count %d\n", scan->channel_count);
3015 goto done; 2992 return -EIO;
3016 } 2993 }
3017 2994
3018 cmd.len += le16_to_cpu(scan->tx_cmd.len) + 2995 cmd.len += le16_to_cpu(scan->tx_cmd.len) +
@@ -3021,25 +2998,22 @@ void iwl3945_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
3021 scan->len = cpu_to_le16(cmd.len); 2998 scan->len = cpu_to_le16(cmd.len);
3022 2999
3023 set_bit(STATUS_SCAN_HW, &priv->status); 3000 set_bit(STATUS_SCAN_HW, &priv->status);
3024 if (iwl_send_cmd_sync(priv, &cmd)) 3001 ret = iwl_send_cmd_sync(priv, &cmd);
3025 goto done; 3002 if (ret)
3026 3003 clear_bit(STATUS_SCAN_HW, &priv->status);
3027 queue_delayed_work(priv->workqueue, &priv->scan_check, 3004 return ret;
3028 IWL_SCAN_CHECK_WATCHDOG); 3005}
3029 3006
3030 return; 3007void iwl3945_post_scan(struct iwl_priv *priv)
3008{
3009 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
3031 3010
3032 done: 3011 /*
3033 /* can not perform scan make sure we clear scanning 3012 * Since setting the RXON may have been deferred while
3034 * bits from status so next scan request can be performed. 3013 * performing the scan, fire one off if needed
3035 * if we dont clear scanning status bit here all next scan 3014 */
3036 * will fail 3015 if (memcmp(&ctx->staging, &ctx->active, sizeof(ctx->staging)))
3037 */ 3016 iwl3945_commit_rxon(priv, ctx);
3038 clear_bit(STATUS_SCAN_HW, &priv->status);
3039 clear_bit(STATUS_SCANNING, &priv->status);
3040
3041 /* inform mac80211 scan aborted */
3042 queue_work(priv->workqueue, &priv->scan_completed);
3043} 3017}
3044 3018
3045static void iwl3945_bg_restart(struct work_struct *data) 3019static void iwl3945_bg_restart(struct work_struct *data)
@@ -3108,7 +3082,7 @@ void iwl3945_post_associate(struct iwl_priv *priv, struct ieee80211_vif *vif)
3108 conf = ieee80211_get_hw_conf(priv->hw); 3082 conf = ieee80211_get_hw_conf(priv->hw);
3109 3083
3110 ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK; 3084 ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
3111 iwlcore_commit_rxon(priv, ctx); 3085 iwl3945_commit_rxon(priv, ctx);
3112 3086
3113 rc = iwl_send_rxon_timing(priv, ctx); 3087 rc = iwl_send_rxon_timing(priv, ctx);
3114 if (rc) 3088 if (rc)
@@ -3134,7 +3108,7 @@ void iwl3945_post_associate(struct iwl_priv *priv, struct ieee80211_vif *vif)
3134 ctx->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK; 3108 ctx->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
3135 } 3109 }
3136 3110
3137 iwlcore_commit_rxon(priv, ctx); 3111 iwl3945_commit_rxon(priv, ctx);
3138 3112
3139 switch (vif->type) { 3113 switch (vif->type) {
3140 case NL80211_IFTYPE_STATION: 3114 case NL80211_IFTYPE_STATION:
@@ -3233,15 +3207,6 @@ static void iwl3945_mac_stop(struct ieee80211_hw *hw)
3233 3207
3234 priv->is_open = 0; 3208 priv->is_open = 0;
3235 3209
3236 if (iwl_is_ready_rf(priv)) {
3237 /* stop mac, cancel any scan request and clear
3238 * RXON_FILTER_ASSOC_MSK BIT
3239 */
3240 mutex_lock(&priv->mutex);
3241 iwl_scan_cancel_timeout(priv, 100);
3242 mutex_unlock(&priv->mutex);
3243 }
3244
3245 iwl3945_down(priv); 3210 iwl3945_down(priv);
3246 3211
3247 flush_workqueue(priv->workqueue); 3212 flush_workqueue(priv->workqueue);
@@ -3282,7 +3247,7 @@ void iwl3945_config_ap(struct iwl_priv *priv, struct ieee80211_vif *vif)
3282 3247
3283 /* RXON - unassoc (to set timing command) */ 3248 /* RXON - unassoc (to set timing command) */
3284 ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK; 3249 ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
3285 iwlcore_commit_rxon(priv, ctx); 3250 iwl3945_commit_rxon(priv, ctx);
3286 3251
3287 /* RXON Timing */ 3252 /* RXON Timing */
3288 rc = iwl_send_rxon_timing(priv, ctx); 3253 rc = iwl_send_rxon_timing(priv, ctx);
@@ -3309,7 +3274,7 @@ void iwl3945_config_ap(struct iwl_priv *priv, struct ieee80211_vif *vif)
3309 } 3274 }
3310 /* restore RXON assoc */ 3275 /* restore RXON assoc */
3311 ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK; 3276 ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
3312 iwlcore_commit_rxon(priv, ctx); 3277 iwl3945_commit_rxon(priv, ctx);
3313 } 3278 }
3314 iwl3945_send_beacon_cmd(priv); 3279 iwl3945_send_beacon_cmd(priv);
3315 3280
@@ -3575,7 +3540,7 @@ static ssize_t store_flags(struct device *d,
3575 IWL_DEBUG_INFO(priv, "Committing rxon.flags = 0x%04X\n", 3540 IWL_DEBUG_INFO(priv, "Committing rxon.flags = 0x%04X\n",
3576 flags); 3541 flags);
3577 ctx->staging.flags = cpu_to_le32(flags); 3542 ctx->staging.flags = cpu_to_le32(flags);
3578 iwlcore_commit_rxon(priv, ctx); 3543 iwl3945_commit_rxon(priv, ctx);
3579 } 3544 }
3580 } 3545 }
3581 mutex_unlock(&priv->mutex); 3546 mutex_unlock(&priv->mutex);
@@ -3613,7 +3578,7 @@ static ssize_t store_filter_flags(struct device *d,
3613 "0x%04X\n", filter_flags); 3578 "0x%04X\n", filter_flags);
3614 ctx->staging.filter_flags = 3579 ctx->staging.filter_flags =
3615 cpu_to_le32(filter_flags); 3580 cpu_to_le32(filter_flags);
3616 iwlcore_commit_rxon(priv, ctx); 3581 iwl3945_commit_rxon(priv, ctx);
3617 } 3582 }
3618 } 3583 }
3619 mutex_unlock(&priv->mutex); 3584 mutex_unlock(&priv->mutex);
@@ -3831,10 +3796,10 @@ static void iwl3945_cancel_deferred_work(struct iwl_priv *priv)
3831 iwl3945_hw_cancel_deferred_work(priv); 3796 iwl3945_hw_cancel_deferred_work(priv);
3832 3797
3833 cancel_delayed_work_sync(&priv->init_alive_start); 3798 cancel_delayed_work_sync(&priv->init_alive_start);
3834 cancel_delayed_work(&priv->scan_check);
3835 cancel_delayed_work(&priv->alive_start); 3799 cancel_delayed_work(&priv->alive_start);
3836 cancel_work_sync(&priv->start_internal_scan);
3837 cancel_work_sync(&priv->beacon_update); 3800 cancel_work_sync(&priv->beacon_update);
3801
3802 iwl_cancel_scan_deferred_work(priv);
3838} 3803}
3839 3804
3840static struct attribute *iwl3945_sysfs_entries[] = { 3805static struct attribute *iwl3945_sysfs_entries[] = {
@@ -3883,7 +3848,7 @@ static int iwl3945_init_drv(struct iwl_priv *priv)
3883 struct iwl3945_eeprom *eeprom = (struct iwl3945_eeprom *)priv->eeprom; 3848 struct iwl3945_eeprom *eeprom = (struct iwl3945_eeprom *)priv->eeprom;
3884 3849
3885 priv->retry_rate = 1; 3850 priv->retry_rate = 1;
3886 priv->ibss_beacon = NULL; 3851 priv->beacon_skb = NULL;
3887 3852
3888 spin_lock_init(&priv->sta_lock); 3853 spin_lock_init(&priv->sta_lock);
3889 spin_lock_init(&priv->hcmd_lock); 3854 spin_lock_init(&priv->hcmd_lock);
@@ -3950,7 +3915,7 @@ static int iwl3945_setup_mac(struct iwl_priv *priv)
3950 hw->flags = IEEE80211_HW_SIGNAL_DBM | 3915 hw->flags = IEEE80211_HW_SIGNAL_DBM |
3951 IEEE80211_HW_SPECTRUM_MGMT; 3916 IEEE80211_HW_SPECTRUM_MGMT;
3952 3917
3953 if (!priv->cfg->broken_powersave) 3918 if (!priv->cfg->base_params->broken_powersave)
3954 hw->flags |= IEEE80211_HW_SUPPORTS_PS | 3919 hw->flags |= IEEE80211_HW_SUPPORTS_PS |
3955 IEEE80211_HW_SUPPORTS_DYNAMIC_PS; 3920 IEEE80211_HW_SUPPORTS_DYNAMIC_PS;
3956 3921
@@ -4035,7 +4000,7 @@ static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
4035 * "the hard way", rather than using device's scan. 4000 * "the hard way", rather than using device's scan.
4036 */ 4001 */
4037 if (iwl3945_mod_params.disable_hw_scan) { 4002 if (iwl3945_mod_params.disable_hw_scan) {
4038 IWL_DEBUG_INFO(priv, "Disabling hw_scan\n"); 4003 IWL_ERR(priv, "sw scan support is deprecated\n");
4039 iwl3945_hw_ops.hw_scan = NULL; 4004 iwl3945_hw_ops.hw_scan = NULL;
4040 } 4005 }
4041 4006
@@ -4247,7 +4212,7 @@ static void __devexit iwl3945_pci_remove(struct pci_dev *pdev)
4247 * paths to avoid running iwl_down() at all before leaving driver. 4212 * paths to avoid running iwl_down() at all before leaving driver.
4248 * This (inexpensive) call *makes sure* device is reset. 4213 * This (inexpensive) call *makes sure* device is reset.
4249 */ 4214 */
4250 priv->cfg->ops->lib->apm_ops.stop(priv); 4215 iwl_apm_stop(priv);
4251 4216
4252 /* make sure we flush any pending irq or 4217 /* make sure we flush any pending irq or
4253 * tasklet for the driver 4218 * tasklet for the driver
@@ -4291,8 +4256,8 @@ static void __devexit iwl3945_pci_remove(struct pci_dev *pdev)
4291 iwl_free_channel_map(priv); 4256 iwl_free_channel_map(priv);
4292 iwlcore_free_geos(priv); 4257 iwlcore_free_geos(priv);
4293 kfree(priv->scan_cmd); 4258 kfree(priv->scan_cmd);
4294 if (priv->ibss_beacon) 4259 if (priv->beacon_skb)
4295 dev_kfree_skb(priv->ibss_beacon); 4260 dev_kfree_skb(priv->beacon_skb);
4296 4261
4297 ieee80211_free_hw(priv->hw); 4262 ieee80211_free_hw(priv->hw);
4298} 4263}
@@ -4360,7 +4325,8 @@ MODULE_PARM_DESC(debug, "debug output mask");
4360#endif 4325#endif
4361module_param_named(disable_hw_scan, iwl3945_mod_params.disable_hw_scan, 4326module_param_named(disable_hw_scan, iwl3945_mod_params.disable_hw_scan,
4362 int, S_IRUGO); 4327 int, S_IRUGO);
4363MODULE_PARM_DESC(disable_hw_scan, "disable hardware scanning (default 0)"); 4328MODULE_PARM_DESC(disable_hw_scan,
4329 "disable hardware scanning (default 0) (deprecated)");
4364module_param_named(fw_restart3945, iwl3945_mod_params.restart_fw, int, S_IRUGO); 4330module_param_named(fw_restart3945, iwl3945_mod_params.restart_fw, int, S_IRUGO);
4365MODULE_PARM_DESC(fw_restart3945, "restart firmware in case of error"); 4331MODULE_PARM_DESC(fw_restart3945, "restart firmware in case of error");
4366 4332
diff --git a/drivers/net/wireless/iwmc3200wifi/cfg80211.c b/drivers/net/wireless/iwmc3200wifi/cfg80211.c
index 60619678f4ec..c6c0eff9b5ed 100644
--- a/drivers/net/wireless/iwmc3200wifi/cfg80211.c
+++ b/drivers/net/wireless/iwmc3200wifi/cfg80211.c
@@ -161,7 +161,7 @@ static int iwm_key_init(struct iwm_key *key, u8 key_index,
161} 161}
162 162
163static int iwm_cfg80211_add_key(struct wiphy *wiphy, struct net_device *ndev, 163static int iwm_cfg80211_add_key(struct wiphy *wiphy, struct net_device *ndev,
164 u8 key_index, const u8 *mac_addr, 164 u8 key_index, bool pairwise, const u8 *mac_addr,
165 struct key_params *params) 165 struct key_params *params)
166{ 166{
167 struct iwm_priv *iwm = ndev_to_iwm(ndev); 167 struct iwm_priv *iwm = ndev_to_iwm(ndev);
@@ -181,7 +181,8 @@ static int iwm_cfg80211_add_key(struct wiphy *wiphy, struct net_device *ndev,
181} 181}
182 182
183static int iwm_cfg80211_get_key(struct wiphy *wiphy, struct net_device *ndev, 183static int iwm_cfg80211_get_key(struct wiphy *wiphy, struct net_device *ndev,
184 u8 key_index, const u8 *mac_addr, void *cookie, 184 u8 key_index, bool pairwise, const u8 *mac_addr,
185 void *cookie,
185 void (*callback)(void *cookie, 186 void (*callback)(void *cookie,
186 struct key_params*)) 187 struct key_params*))
187{ 188{
@@ -206,7 +207,7 @@ static int iwm_cfg80211_get_key(struct wiphy *wiphy, struct net_device *ndev,
206 207
207 208
208static int iwm_cfg80211_del_key(struct wiphy *wiphy, struct net_device *ndev, 209static int iwm_cfg80211_del_key(struct wiphy *wiphy, struct net_device *ndev,
209 u8 key_index, const u8 *mac_addr) 210 u8 key_index, bool pairwise, const u8 *mac_addr)
210{ 211{
211 struct iwm_priv *iwm = ndev_to_iwm(ndev); 212 struct iwm_priv *iwm = ndev_to_iwm(ndev);
212 struct iwm_key *key = &iwm->keys[key_index]; 213 struct iwm_key *key = &iwm->keys[key_index];
diff --git a/drivers/net/wireless/libertas/cfg.c b/drivers/net/wireless/libertas/cfg.c
index 317f086ced0a..5046a0005034 100644
--- a/drivers/net/wireless/libertas/cfg.c
+++ b/drivers/net/wireless/libertas/cfg.c
@@ -481,7 +481,6 @@ static int lbs_ret_scan(struct lbs_private *priv, unsigned long dummy,
481 struct cmd_ds_802_11_scan_rsp *scanresp = (void *)resp; 481 struct cmd_ds_802_11_scan_rsp *scanresp = (void *)resp;
482 int bsssize; 482 int bsssize;
483 const u8 *pos; 483 const u8 *pos;
484 u16 nr_sets;
485 const u8 *tsfdesc; 484 const u8 *tsfdesc;
486 int tsfsize; 485 int tsfsize;
487 int i; 486 int i;
@@ -490,12 +489,11 @@ static int lbs_ret_scan(struct lbs_private *priv, unsigned long dummy,
490 lbs_deb_enter(LBS_DEB_CFG80211); 489 lbs_deb_enter(LBS_DEB_CFG80211);
491 490
492 bsssize = get_unaligned_le16(&scanresp->bssdescriptsize); 491 bsssize = get_unaligned_le16(&scanresp->bssdescriptsize);
493 nr_sets = le16_to_cpu(scanresp->nr_sets);
494 492
495 lbs_deb_scan("scan response: %d BSSs (%d bytes); resp size %d bytes\n", 493 lbs_deb_scan("scan response: %d BSSs (%d bytes); resp size %d bytes\n",
496 nr_sets, bsssize, le16_to_cpu(resp->size)); 494 scanresp->nr_sets, bsssize, le16_to_cpu(resp->size));
497 495
498 if (nr_sets == 0) { 496 if (scanresp->nr_sets == 0) {
499 ret = 0; 497 ret = 0;
500 goto done; 498 goto done;
501 } 499 }
@@ -1442,7 +1440,7 @@ static int lbs_cfg_set_default_key(struct wiphy *wiphy,
1442 1440
1443 1441
1444static int lbs_cfg_add_key(struct wiphy *wiphy, struct net_device *netdev, 1442static int lbs_cfg_add_key(struct wiphy *wiphy, struct net_device *netdev,
1445 u8 idx, const u8 *mac_addr, 1443 u8 idx, bool pairwise, const u8 *mac_addr,
1446 struct key_params *params) 1444 struct key_params *params)
1447{ 1445{
1448 struct lbs_private *priv = wiphy_priv(wiphy); 1446 struct lbs_private *priv = wiphy_priv(wiphy);
@@ -1502,7 +1500,7 @@ static int lbs_cfg_add_key(struct wiphy *wiphy, struct net_device *netdev,
1502 1500
1503 1501
1504static int lbs_cfg_del_key(struct wiphy *wiphy, struct net_device *netdev, 1502static int lbs_cfg_del_key(struct wiphy *wiphy, struct net_device *netdev,
1505 u8 key_index, const u8 *mac_addr) 1503 u8 key_index, bool pairwise, const u8 *mac_addr)
1506{ 1504{
1507 1505
1508 lbs_deb_enter(LBS_DEB_CFG80211); 1506 lbs_deb_enter(LBS_DEB_CFG80211);
diff --git a/drivers/net/wireless/libertas/if_usb.c b/drivers/net/wireless/libertas/if_usb.c
index e906616232a2..efaf85032208 100644
--- a/drivers/net/wireless/libertas/if_usb.c
+++ b/drivers/net/wireless/libertas/if_usb.c
@@ -487,11 +487,12 @@ static int if_usb_reset_device(struct if_usb_card *cardp)
487 */ 487 */
488static int usb_tx_block(struct if_usb_card *cardp, uint8_t *payload, uint16_t nb) 488static int usb_tx_block(struct if_usb_card *cardp, uint8_t *payload, uint16_t nb)
489{ 489{
490 int ret = -1; 490 int ret;
491 491
492 /* check if device is removed */ 492 /* check if device is removed */
493 if (cardp->surprise_removed) { 493 if (cardp->surprise_removed) {
494 lbs_deb_usbd(&cardp->udev->dev, "Device removed\n"); 494 lbs_deb_usbd(&cardp->udev->dev, "Device removed\n");
495 ret = -ENODEV;
495 goto tx_ret; 496 goto tx_ret;
496 } 497 }
497 498
@@ -504,7 +505,6 @@ static int usb_tx_block(struct if_usb_card *cardp, uint8_t *payload, uint16_t nb
504 505
505 if ((ret = usb_submit_urb(cardp->tx_urb, GFP_ATOMIC))) { 506 if ((ret = usb_submit_urb(cardp->tx_urb, GFP_ATOMIC))) {
506 lbs_deb_usbd(&cardp->udev->dev, "usb_submit_urb failed: %d\n", ret); 507 lbs_deb_usbd(&cardp->udev->dev, "usb_submit_urb failed: %d\n", ret);
507 ret = -1;
508 } else { 508 } else {
509 lbs_deb_usb2(&cardp->udev->dev, "usb_submit_urb success\n"); 509 lbs_deb_usb2(&cardp->udev->dev, "usb_submit_urb success\n");
510 ret = 0; 510 ret = 0;
diff --git a/drivers/net/wireless/libertas/mesh.c b/drivers/net/wireless/libertas/mesh.c
index 194762ab0142..acf3bf63ee33 100644
--- a/drivers/net/wireless/libertas/mesh.c
+++ b/drivers/net/wireless/libertas/mesh.c
@@ -574,7 +574,7 @@ int lbs_mesh_bt_set_inverted(struct lbs_private *priv, bool inverted)
574 memset(&cmd, 0, sizeof(cmd)); 574 memset(&cmd, 0, sizeof(cmd));
575 cmd.hdr.size = cpu_to_le16(sizeof(cmd)); 575 cmd.hdr.size = cpu_to_le16(sizeof(cmd));
576 cmd.action = cpu_to_le16(CMD_ACT_BT_ACCESS_SET_INVERT); 576 cmd.action = cpu_to_le16(CMD_ACT_BT_ACCESS_SET_INVERT);
577 cmd.id = !!inverted; 577 cmd.id = cpu_to_le32(!!inverted);
578 578
579 ret = lbs_cmd_with_response(priv, CMD_BT_ACCESS, &cmd); 579 ret = lbs_cmd_with_response(priv, CMD_BT_ACCESS, &cmd);
580 580
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 92b486d46eb9..7eaaa3bab547 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -595,7 +595,8 @@ static int mac80211_hwsim_add_interface(struct ieee80211_hw *hw,
595 struct ieee80211_vif *vif) 595 struct ieee80211_vif *vif)
596{ 596{
597 wiphy_debug(hw->wiphy, "%s (type=%d mac_addr=%pM)\n", 597 wiphy_debug(hw->wiphy, "%s (type=%d mac_addr=%pM)\n",
598 __func__, vif->type, vif->addr); 598 __func__, ieee80211_vif_type_p2p(vif),
599 vif->addr);
599 hwsim_set_magic(vif); 600 hwsim_set_magic(vif);
600 return 0; 601 return 0;
601} 602}
@@ -603,11 +604,14 @@ static int mac80211_hwsim_add_interface(struct ieee80211_hw *hw,
603 604
604static int mac80211_hwsim_change_interface(struct ieee80211_hw *hw, 605static int mac80211_hwsim_change_interface(struct ieee80211_hw *hw,
605 struct ieee80211_vif *vif, 606 struct ieee80211_vif *vif,
606 enum nl80211_iftype newtype) 607 enum nl80211_iftype newtype,
608 bool newp2p)
607{ 609{
610 newtype = ieee80211_iftype_p2p(newtype, newp2p);
608 wiphy_debug(hw->wiphy, 611 wiphy_debug(hw->wiphy,
609 "%s (old type=%d, new type=%d, mac_addr=%pM)\n", 612 "%s (old type=%d, new type=%d, mac_addr=%pM)\n",
610 __func__, vif->type, newtype, vif->addr); 613 __func__, ieee80211_vif_type_p2p(vif),
614 newtype, vif->addr);
611 hwsim_check_magic(vif); 615 hwsim_check_magic(vif);
612 616
613 return 0; 617 return 0;
@@ -617,7 +621,8 @@ static void mac80211_hwsim_remove_interface(
617 struct ieee80211_hw *hw, struct ieee80211_vif *vif) 621 struct ieee80211_hw *hw, struct ieee80211_vif *vif)
618{ 622{
619 wiphy_debug(hw->wiphy, "%s (type=%d mac_addr=%pM)\n", 623 wiphy_debug(hw->wiphy, "%s (type=%d mac_addr=%pM)\n",
620 __func__, vif->type, vif->addr); 624 __func__, ieee80211_vif_type_p2p(vif),
625 vif->addr);
621 hwsim_check_magic(vif); 626 hwsim_check_magic(vif);
622 hwsim_clear_magic(vif); 627 hwsim_clear_magic(vif);
623} 628}
@@ -1310,6 +1315,8 @@ static int __init init_mac80211_hwsim(void)
1310 hw->wiphy->interface_modes = 1315 hw->wiphy->interface_modes =
1311 BIT(NL80211_IFTYPE_STATION) | 1316 BIT(NL80211_IFTYPE_STATION) |
1312 BIT(NL80211_IFTYPE_AP) | 1317 BIT(NL80211_IFTYPE_AP) |
1318 BIT(NL80211_IFTYPE_P2P_CLIENT) |
1319 BIT(NL80211_IFTYPE_P2P_GO) |
1313 BIT(NL80211_IFTYPE_ADHOC) | 1320 BIT(NL80211_IFTYPE_ADHOC) |
1314 BIT(NL80211_IFTYPE_MESH_POINT); 1321 BIT(NL80211_IFTYPE_MESH_POINT);
1315 1322
diff --git a/drivers/net/wireless/p54/eeprom.c b/drivers/net/wireless/p54/eeprom.c
index 8c05266d37f4..35b09aa0529b 100644
--- a/drivers/net/wireless/p54/eeprom.c
+++ b/drivers/net/wireless/p54/eeprom.c
@@ -261,8 +261,10 @@ static int p54_generate_channel_lists(struct ieee80211_hw *dev)
261 list->max_entries = max_channel_num; 261 list->max_entries = max_channel_num;
262 list->channels = kzalloc(sizeof(struct p54_channel_entry) * 262 list->channels = kzalloc(sizeof(struct p54_channel_entry) *
263 max_channel_num, GFP_KERNEL); 263 max_channel_num, GFP_KERNEL);
264 if (!list->channels) 264 if (!list->channels) {
265 ret = -ENOMEM;
265 goto free; 266 goto free;
267 }
266 268
267 for (i = 0; i < max_channel_num; i++) { 269 for (i = 0; i < max_channel_num; i++) {
268 if (i < priv->iq_autocal_len) { 270 if (i < priv->iq_autocal_len) {
diff --git a/drivers/net/wireless/p54/p54spi.c b/drivers/net/wireless/p54/p54spi.c
index 156e57dbd2cf..18d24b7b1e34 100644
--- a/drivers/net/wireless/p54/p54spi.c
+++ b/drivers/net/wireless/p54/p54spi.c
@@ -202,6 +202,8 @@ static int p54spi_request_eeprom(struct ieee80211_hw *dev)
202 dev_info(&priv->spi->dev, "loading default eeprom...\n"); 202 dev_info(&priv->spi->dev, "loading default eeprom...\n");
203 ret = p54_parse_eeprom(dev, (void *) p54spi_eeprom, 203 ret = p54_parse_eeprom(dev, (void *) p54spi_eeprom,
204 sizeof(p54spi_eeprom)); 204 sizeof(p54spi_eeprom));
205#else
206 dev_err(&priv->spi->dev, "Failed to request user eeprom\n");
205#endif /* CONFIG_P54_SPI_DEFAULT_EEPROM */ 207#endif /* CONFIG_P54_SPI_DEFAULT_EEPROM */
206 } else { 208 } else {
207 dev_info(&priv->spi->dev, "loading user eeprom...\n"); 209 dev_info(&priv->spi->dev, "loading user eeprom...\n");
diff --git a/drivers/net/wireless/p54/p54usb.c b/drivers/net/wireless/p54/p54usb.c
index 063248b35069..d5bc21e5a02c 100644
--- a/drivers/net/wireless/p54/p54usb.c
+++ b/drivers/net/wireless/p54/p54usb.c
@@ -33,8 +33,17 @@ MODULE_ALIAS("prism54usb");
33MODULE_FIRMWARE("isl3886usb"); 33MODULE_FIRMWARE("isl3886usb");
34MODULE_FIRMWARE("isl3887usb"); 34MODULE_FIRMWARE("isl3887usb");
35 35
36/*
37 * Note:
38 *
39 * Always update our wiki's device list (located at:
40 * http://wireless.kernel.org/en/users/Drivers/p54/devices ),
41 * whenever you add a new device.
42 */
43
36static struct usb_device_id p54u_table[] __devinitdata = { 44static struct usb_device_id p54u_table[] __devinitdata = {
37 /* Version 1 devices (pci chip + net2280) */ 45 /* Version 1 devices (pci chip + net2280) */
46 {USB_DEVICE(0x045e, 0x00c2)}, /* Microsoft MN-710 */
38 {USB_DEVICE(0x0506, 0x0a11)}, /* 3COM 3CRWE254G72 */ 47 {USB_DEVICE(0x0506, 0x0a11)}, /* 3COM 3CRWE254G72 */
39 {USB_DEVICE(0x06b9, 0x0120)}, /* Thomson SpeedTouch 120g */ 48 {USB_DEVICE(0x06b9, 0x0120)}, /* Thomson SpeedTouch 120g */
40 {USB_DEVICE(0x0707, 0xee06)}, /* SMC 2862W-G */ 49 {USB_DEVICE(0x0707, 0xee06)}, /* SMC 2862W-G */
@@ -47,7 +56,9 @@ static struct usb_device_id p54u_table[] __devinitdata = {
47 {USB_DEVICE(0x0846, 0x4220)}, /* Netgear WG111 */ 56 {USB_DEVICE(0x0846, 0x4220)}, /* Netgear WG111 */
48 {USB_DEVICE(0x09aa, 0x1000)}, /* Spinnaker Proto board */ 57 {USB_DEVICE(0x09aa, 0x1000)}, /* Spinnaker Proto board */
49 {USB_DEVICE(0x0cde, 0x0006)}, /* Medion 40900, Roper Europe */ 58 {USB_DEVICE(0x0cde, 0x0006)}, /* Medion 40900, Roper Europe */
59 {USB_DEVICE(0x107b, 0x55f2)}, /* Gateway WGU-210 (Gemtek) */
50 {USB_DEVICE(0x124a, 0x4023)}, /* Shuttle PN15, Airvast WM168g, IOGear GWU513 */ 60 {USB_DEVICE(0x124a, 0x4023)}, /* Shuttle PN15, Airvast WM168g, IOGear GWU513 */
61 {USB_DEVICE(0x1630, 0x0005)}, /* 2Wire 802.11g USB (v1) / Z-Com */
51 {USB_DEVICE(0x1915, 0x2234)}, /* Linksys WUSB54G OEM */ 62 {USB_DEVICE(0x1915, 0x2234)}, /* Linksys WUSB54G OEM */
52 {USB_DEVICE(0x1915, 0x2235)}, /* Linksys WUSB54G Portable OEM */ 63 {USB_DEVICE(0x1915, 0x2235)}, /* Linksys WUSB54G Portable OEM */
53 {USB_DEVICE(0x2001, 0x3701)}, /* DLink DWL-G120 Spinnaker */ 64 {USB_DEVICE(0x2001, 0x3701)}, /* DLink DWL-G120 Spinnaker */
@@ -60,6 +71,7 @@ static struct usb_device_id p54u_table[] __devinitdata = {
60 {USB_DEVICE(0x050d, 0x7050)}, /* Belkin F5D7050 ver 1000 */ 71 {USB_DEVICE(0x050d, 0x7050)}, /* Belkin F5D7050 ver 1000 */
61 {USB_DEVICE(0x0572, 0x2000)}, /* Cohiba Proto board */ 72 {USB_DEVICE(0x0572, 0x2000)}, /* Cohiba Proto board */
62 {USB_DEVICE(0x0572, 0x2002)}, /* Cohiba Proto board */ 73 {USB_DEVICE(0x0572, 0x2002)}, /* Cohiba Proto board */
74 {USB_DEVICE(0x06a9, 0x000e)}, /* Westell 802.11g USB (A90-211WG-01) */
63 {USB_DEVICE(0x06b9, 0x0121)}, /* Thomson SpeedTouch 121g */ 75 {USB_DEVICE(0x06b9, 0x0121)}, /* Thomson SpeedTouch 121g */
64 {USB_DEVICE(0x0707, 0xee13)}, /* SMC 2862W-G version 2 */ 76 {USB_DEVICE(0x0707, 0xee13)}, /* SMC 2862W-G version 2 */
65 {USB_DEVICE(0x083a, 0x4521)}, /* Siemens Gigaset USB Adapter 54 version 2 */ 77 {USB_DEVICE(0x083a, 0x4521)}, /* Siemens Gigaset USB Adapter 54 version 2 */
@@ -80,6 +92,7 @@ static struct usb_device_id p54u_table[] __devinitdata = {
80 {USB_DEVICE(0x13B1, 0x000C)}, /* Linksys WUSB54AG */ 92 {USB_DEVICE(0x13B1, 0x000C)}, /* Linksys WUSB54AG */
81 {USB_DEVICE(0x1413, 0x5400)}, /* Telsey 802.11g USB2.0 Adapter */ 93 {USB_DEVICE(0x1413, 0x5400)}, /* Telsey 802.11g USB2.0 Adapter */
82 {USB_DEVICE(0x1435, 0x0427)}, /* Inventel UR054G */ 94 {USB_DEVICE(0x1435, 0x0427)}, /* Inventel UR054G */
95 {USB_DEVICE(0x1668, 0x1050)}, /* Actiontec 802UIG-1 */
83 {USB_DEVICE(0x2001, 0x3704)}, /* DLink DWL-G122 rev A2 */ 96 {USB_DEVICE(0x2001, 0x3704)}, /* DLink DWL-G122 rev A2 */
84 {USB_DEVICE(0x413c, 0x5513)}, /* Dell WLA3310 USB Wireless Adapter */ 97 {USB_DEVICE(0x413c, 0x5513)}, /* Dell WLA3310 USB Wireless Adapter */
85 {USB_DEVICE(0x413c, 0x8102)}, /* Spinnaker DUT */ 98 {USB_DEVICE(0x413c, 0x8102)}, /* Spinnaker DUT */
diff --git a/drivers/net/wireless/ray_cs.c b/drivers/net/wireless/ray_cs.c
index d91a831a7700..5ca624a64c42 100644
--- a/drivers/net/wireless/ray_cs.c
+++ b/drivers/net/wireless/ray_cs.c
@@ -194,7 +194,7 @@ module_param(bc, int, 0);
194module_param(phy_addr, charp, 0); 194module_param(phy_addr, charp, 0);
195module_param(ray_mem_speed, int, 0); 195module_param(ray_mem_speed, int, 0);
196 196
197static UCHAR b5_default_startup_parms[] = { 197static const UCHAR b5_default_startup_parms[] = {
198 0, 0, /* Adhoc station */ 198 0, 0, /* Adhoc station */
199 'L', 'I', 'N', 'U', 'X', 0, 0, 0, /* 32 char ESSID */ 199 'L', 'I', 'N', 'U', 'X', 0, 0, 0, /* 32 char ESSID */
200 0, 0, 0, 0, 0, 0, 0, 0, 200 0, 0, 0, 0, 0, 0, 0, 0,
@@ -229,7 +229,7 @@ static UCHAR b5_default_startup_parms[] = {
229 2, 0, 0, 0, 0, 0, 0, 0 /* basic rate set */ 229 2, 0, 0, 0, 0, 0, 0, 0 /* basic rate set */
230}; 230};
231 231
232static UCHAR b4_default_startup_parms[] = { 232static const UCHAR b4_default_startup_parms[] = {
233 0, 0, /* Adhoc station */ 233 0, 0, /* Adhoc station */
234 'L', 'I', 'N', 'U', 'X', 0, 0, 0, /* 32 char ESSID */ 234 'L', 'I', 'N', 'U', 'X', 0, 0, 0, /* 32 char ESSID */
235 0, 0, 0, 0, 0, 0, 0, 0, 235 0, 0, 0, 0, 0, 0, 0, 0,
@@ -261,9 +261,9 @@ static UCHAR b4_default_startup_parms[] = {
261}; 261};
262 262
263/*===========================================================================*/ 263/*===========================================================================*/
264static unsigned char eth2_llc[] = { 0xaa, 0xaa, 3, 0, 0, 0 }; 264static const u8 eth2_llc[] = { 0xaa, 0xaa, 3, 0, 0, 0 };
265 265
266static char hop_pattern_length[] = { 1, 266static const char hop_pattern_length[] = { 1,
267 USA_HOP_MOD, EUROPE_HOP_MOD, 267 USA_HOP_MOD, EUROPE_HOP_MOD,
268 JAPAN_HOP_MOD, KOREA_HOP_MOD, 268 JAPAN_HOP_MOD, KOREA_HOP_MOD,
269 SPAIN_HOP_MOD, FRANCE_HOP_MOD, 269 SPAIN_HOP_MOD, FRANCE_HOP_MOD,
@@ -271,7 +271,7 @@ static char hop_pattern_length[] = { 1,
271 JAPAN_TEST_HOP_MOD 271 JAPAN_TEST_HOP_MOD
272}; 272};
273 273
274static char rcsid[] = 274static const char rcsid[] =
275 "Raylink/WebGear wireless LAN - Corey <Thomas corey@world.std.com>"; 275 "Raylink/WebGear wireless LAN - Corey <Thomas corey@world.std.com>";
276 276
277static const struct net_device_ops ray_netdev_ops = { 277static const struct net_device_ops ray_netdev_ops = {
@@ -2575,7 +2575,7 @@ static void clear_interrupt(ray_dev_t *local)
2575#ifdef CONFIG_PROC_FS 2575#ifdef CONFIG_PROC_FS
2576#define MAXDATA (PAGE_SIZE - 80) 2576#define MAXDATA (PAGE_SIZE - 80)
2577 2577
2578static char *card_status[] = { 2578static const char *card_status[] = {
2579 "Card inserted - uninitialized", /* 0 */ 2579 "Card inserted - uninitialized", /* 0 */
2580 "Card not downloaded", /* 1 */ 2580 "Card not downloaded", /* 1 */
2581 "Waiting for download parameters", /* 2 */ 2581 "Waiting for download parameters", /* 2 */
@@ -2592,8 +2592,8 @@ static char *card_status[] = {
2592 "Association failed" /* 16 */ 2592 "Association failed" /* 16 */
2593}; 2593};
2594 2594
2595static char *nettype[] = { "Adhoc", "Infra " }; 2595static const char *nettype[] = { "Adhoc", "Infra " };
2596static char *framing[] = { "Encapsulation", "Translation" } 2596static const char *framing[] = { "Encapsulation", "Translation" }
2597 2597
2598; 2598;
2599/*===========================================================================*/ 2599/*===========================================================================*/
diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
index 719573bbbf81..71b5971da597 100644
--- a/drivers/net/wireless/rndis_wlan.c
+++ b/drivers/net/wireless/rndis_wlan.c
@@ -540,11 +540,11 @@ static int rndis_set_channel(struct wiphy *wiphy, struct net_device *dev,
540 struct ieee80211_channel *chan, enum nl80211_channel_type channel_type); 540 struct ieee80211_channel *chan, enum nl80211_channel_type channel_type);
541 541
542static int rndis_add_key(struct wiphy *wiphy, struct net_device *netdev, 542static int rndis_add_key(struct wiphy *wiphy, struct net_device *netdev,
543 u8 key_index, const u8 *mac_addr, 543 u8 key_index, bool pairwise, const u8 *mac_addr,
544 struct key_params *params); 544 struct key_params *params);
545 545
546static int rndis_del_key(struct wiphy *wiphy, struct net_device *netdev, 546static int rndis_del_key(struct wiphy *wiphy, struct net_device *netdev,
547 u8 key_index, const u8 *mac_addr); 547 u8 key_index, bool pairwise, const u8 *mac_addr);
548 548
549static int rndis_set_default_key(struct wiphy *wiphy, struct net_device *netdev, 549static int rndis_set_default_key(struct wiphy *wiphy, struct net_device *netdev,
550 u8 key_index); 550 u8 key_index);
@@ -2308,8 +2308,8 @@ static int rndis_set_channel(struct wiphy *wiphy, struct net_device *netdev,
2308} 2308}
2309 2309
2310static int rndis_add_key(struct wiphy *wiphy, struct net_device *netdev, 2310static int rndis_add_key(struct wiphy *wiphy, struct net_device *netdev,
2311 u8 key_index, const u8 *mac_addr, 2311 u8 key_index, bool pairwise, const u8 *mac_addr,
2312 struct key_params *params) 2312 struct key_params *params)
2313{ 2313{
2314 struct rndis_wlan_private *priv = wiphy_priv(wiphy); 2314 struct rndis_wlan_private *priv = wiphy_priv(wiphy);
2315 struct usbnet *usbdev = priv->usbdev; 2315 struct usbnet *usbdev = priv->usbdev;
@@ -2344,7 +2344,7 @@ static int rndis_add_key(struct wiphy *wiphy, struct net_device *netdev,
2344} 2344}
2345 2345
2346static int rndis_del_key(struct wiphy *wiphy, struct net_device *netdev, 2346static int rndis_del_key(struct wiphy *wiphy, struct net_device *netdev,
2347 u8 key_index, const u8 *mac_addr) 2347 u8 key_index, bool pairwise, const u8 *mac_addr)
2348{ 2348{
2349 struct rndis_wlan_private *priv = wiphy_priv(wiphy); 2349 struct rndis_wlan_private *priv = wiphy_priv(wiphy);
2350 struct usbnet *usbdev = priv->usbdev; 2350 struct usbnet *usbdev = priv->usbdev;
diff --git a/drivers/net/wireless/rt2x00/rt2400pci.c b/drivers/net/wireless/rt2x00/rt2400pci.c
index 103c71164f10..4f420a9ec5dc 100644
--- a/drivers/net/wireless/rt2x00/rt2400pci.c
+++ b/drivers/net/wireless/rt2x00/rt2400pci.c
@@ -321,7 +321,8 @@ static void rt2400pci_config_intf(struct rt2x00_dev *rt2x00dev,
321} 321}
322 322
323static void rt2400pci_config_erp(struct rt2x00_dev *rt2x00dev, 323static void rt2400pci_config_erp(struct rt2x00_dev *rt2x00dev,
324 struct rt2x00lib_erp *erp) 324 struct rt2x00lib_erp *erp,
325 u32 changed)
325{ 326{
326 int preamble_mask; 327 int preamble_mask;
327 u32 reg; 328 u32 reg;
@@ -329,59 +330,72 @@ static void rt2400pci_config_erp(struct rt2x00_dev *rt2x00dev,
329 /* 330 /*
330 * When short preamble is enabled, we should set bit 0x08 331 * When short preamble is enabled, we should set bit 0x08
331 */ 332 */
332 preamble_mask = erp->short_preamble << 3; 333 if (changed & BSS_CHANGED_ERP_PREAMBLE) {
333 334 preamble_mask = erp->short_preamble << 3;
334 rt2x00pci_register_read(rt2x00dev, TXCSR1, &reg); 335
335 rt2x00_set_field32(&reg, TXCSR1_ACK_TIMEOUT, 0x1ff); 336 rt2x00pci_register_read(rt2x00dev, TXCSR1, &reg);
336 rt2x00_set_field32(&reg, TXCSR1_ACK_CONSUME_TIME, 0x13a); 337 rt2x00_set_field32(&reg, TXCSR1_ACK_TIMEOUT, 0x1ff);
337 rt2x00_set_field32(&reg, TXCSR1_TSF_OFFSET, IEEE80211_HEADER); 338 rt2x00_set_field32(&reg, TXCSR1_ACK_CONSUME_TIME, 0x13a);
338 rt2x00_set_field32(&reg, TXCSR1_AUTORESPONDER, 1); 339 rt2x00_set_field32(&reg, TXCSR1_TSF_OFFSET, IEEE80211_HEADER);
339 rt2x00pci_register_write(rt2x00dev, TXCSR1, reg); 340 rt2x00_set_field32(&reg, TXCSR1_AUTORESPONDER, 1);
340 341 rt2x00pci_register_write(rt2x00dev, TXCSR1, reg);
341 rt2x00pci_register_read(rt2x00dev, ARCSR2, &reg); 342
342 rt2x00_set_field32(&reg, ARCSR2_SIGNAL, 0x00); 343 rt2x00pci_register_read(rt2x00dev, ARCSR2, &reg);
343 rt2x00_set_field32(&reg, ARCSR2_SERVICE, 0x04); 344 rt2x00_set_field32(&reg, ARCSR2_SIGNAL, 0x00);
344 rt2x00_set_field32(&reg, ARCSR2_LENGTH, GET_DURATION(ACK_SIZE, 10)); 345 rt2x00_set_field32(&reg, ARCSR2_SERVICE, 0x04);
345 rt2x00pci_register_write(rt2x00dev, ARCSR2, reg); 346 rt2x00_set_field32(&reg, ARCSR2_LENGTH,
346 347 GET_DURATION(ACK_SIZE, 10));
347 rt2x00pci_register_read(rt2x00dev, ARCSR3, &reg); 348 rt2x00pci_register_write(rt2x00dev, ARCSR2, reg);
348 rt2x00_set_field32(&reg, ARCSR3_SIGNAL, 0x01 | preamble_mask); 349
349 rt2x00_set_field32(&reg, ARCSR3_SERVICE, 0x04); 350 rt2x00pci_register_read(rt2x00dev, ARCSR3, &reg);
350 rt2x00_set_field32(&reg, ARCSR2_LENGTH, GET_DURATION(ACK_SIZE, 20)); 351 rt2x00_set_field32(&reg, ARCSR3_SIGNAL, 0x01 | preamble_mask);
351 rt2x00pci_register_write(rt2x00dev, ARCSR3, reg); 352 rt2x00_set_field32(&reg, ARCSR3_SERVICE, 0x04);
352 353 rt2x00_set_field32(&reg, ARCSR2_LENGTH,
353 rt2x00pci_register_read(rt2x00dev, ARCSR4, &reg); 354 GET_DURATION(ACK_SIZE, 20));
354 rt2x00_set_field32(&reg, ARCSR4_SIGNAL, 0x02 | preamble_mask); 355 rt2x00pci_register_write(rt2x00dev, ARCSR3, reg);
355 rt2x00_set_field32(&reg, ARCSR4_SERVICE, 0x04); 356
356 rt2x00_set_field32(&reg, ARCSR2_LENGTH, GET_DURATION(ACK_SIZE, 55)); 357 rt2x00pci_register_read(rt2x00dev, ARCSR4, &reg);
357 rt2x00pci_register_write(rt2x00dev, ARCSR4, reg); 358 rt2x00_set_field32(&reg, ARCSR4_SIGNAL, 0x02 | preamble_mask);
358 359 rt2x00_set_field32(&reg, ARCSR4_SERVICE, 0x04);
359 rt2x00pci_register_read(rt2x00dev, ARCSR5, &reg); 360 rt2x00_set_field32(&reg, ARCSR2_LENGTH,
360 rt2x00_set_field32(&reg, ARCSR5_SIGNAL, 0x03 | preamble_mask); 361 GET_DURATION(ACK_SIZE, 55));
361 rt2x00_set_field32(&reg, ARCSR5_SERVICE, 0x84); 362 rt2x00pci_register_write(rt2x00dev, ARCSR4, reg);
362 rt2x00_set_field32(&reg, ARCSR2_LENGTH, GET_DURATION(ACK_SIZE, 110)); 363
363 rt2x00pci_register_write(rt2x00dev, ARCSR5, reg); 364 rt2x00pci_register_read(rt2x00dev, ARCSR5, &reg);
364 365 rt2x00_set_field32(&reg, ARCSR5_SIGNAL, 0x03 | preamble_mask);
365 rt2x00pci_register_write(rt2x00dev, ARCSR1, erp->basic_rates); 366 rt2x00_set_field32(&reg, ARCSR5_SERVICE, 0x84);
367 rt2x00_set_field32(&reg, ARCSR2_LENGTH,
368 GET_DURATION(ACK_SIZE, 110));
369 rt2x00pci_register_write(rt2x00dev, ARCSR5, reg);
370 }
366 371
367 rt2x00pci_register_read(rt2x00dev, CSR11, &reg); 372 if (changed & BSS_CHANGED_BASIC_RATES)
368 rt2x00_set_field32(&reg, CSR11_SLOT_TIME, erp->slot_time); 373 rt2x00pci_register_write(rt2x00dev, ARCSR1, erp->basic_rates);
369 rt2x00pci_register_write(rt2x00dev, CSR11, reg);
370 374
371 rt2x00pci_register_read(rt2x00dev, CSR12, &reg); 375 if (changed & BSS_CHANGED_ERP_SLOT) {
372 rt2x00_set_field32(&reg, CSR12_BEACON_INTERVAL, erp->beacon_int * 16); 376 rt2x00pci_register_read(rt2x00dev, CSR11, &reg);
373 rt2x00_set_field32(&reg, CSR12_CFP_MAX_DURATION, erp->beacon_int * 16); 377 rt2x00_set_field32(&reg, CSR11_SLOT_TIME, erp->slot_time);
374 rt2x00pci_register_write(rt2x00dev, CSR12, reg); 378 rt2x00pci_register_write(rt2x00dev, CSR11, reg);
375 379
376 rt2x00pci_register_read(rt2x00dev, CSR18, &reg); 380 rt2x00pci_register_read(rt2x00dev, CSR18, &reg);
377 rt2x00_set_field32(&reg, CSR18_SIFS, erp->sifs); 381 rt2x00_set_field32(&reg, CSR18_SIFS, erp->sifs);
378 rt2x00_set_field32(&reg, CSR18_PIFS, erp->pifs); 382 rt2x00_set_field32(&reg, CSR18_PIFS, erp->pifs);
379 rt2x00pci_register_write(rt2x00dev, CSR18, reg); 383 rt2x00pci_register_write(rt2x00dev, CSR18, reg);
380 384
381 rt2x00pci_register_read(rt2x00dev, CSR19, &reg); 385 rt2x00pci_register_read(rt2x00dev, CSR19, &reg);
382 rt2x00_set_field32(&reg, CSR19_DIFS, erp->difs); 386 rt2x00_set_field32(&reg, CSR19_DIFS, erp->difs);
383 rt2x00_set_field32(&reg, CSR19_EIFS, erp->eifs); 387 rt2x00_set_field32(&reg, CSR19_EIFS, erp->eifs);
384 rt2x00pci_register_write(rt2x00dev, CSR19, reg); 388 rt2x00pci_register_write(rt2x00dev, CSR19, reg);
389 }
390
391 if (changed & BSS_CHANGED_BEACON_INT) {
392 rt2x00pci_register_read(rt2x00dev, CSR12, &reg);
393 rt2x00_set_field32(&reg, CSR12_BEACON_INTERVAL,
394 erp->beacon_int * 16);
395 rt2x00_set_field32(&reg, CSR12_CFP_MAX_DURATION,
396 erp->beacon_int * 16);
397 rt2x00pci_register_write(rt2x00dev, CSR12, reg);
398 }
385} 399}
386 400
387static void rt2400pci_config_ant(struct rt2x00_dev *rt2x00dev, 401static void rt2400pci_config_ant(struct rt2x00_dev *rt2x00dev,
@@ -1090,7 +1104,7 @@ static void rt2400pci_write_beacon(struct queue_entry *entry,
1090 rt2x00_set_field32(&reg, CSR14_BEACON_GEN, 0); 1104 rt2x00_set_field32(&reg, CSR14_BEACON_GEN, 0);
1091 rt2x00pci_register_write(rt2x00dev, CSR14, reg); 1105 rt2x00pci_register_write(rt2x00dev, CSR14, reg);
1092 1106
1093 rt2x00queue_map_txskb(rt2x00dev, entry->skb); 1107 rt2x00queue_map_txskb(entry);
1094 1108
1095 /* 1109 /*
1096 * Write the TX descriptor for the beacon. 1110 * Write the TX descriptor for the beacon.
diff --git a/drivers/net/wireless/rt2x00/rt2500pci.c b/drivers/net/wireless/rt2x00/rt2500pci.c
index ab0507110e42..97feb7aef809 100644
--- a/drivers/net/wireless/rt2x00/rt2500pci.c
+++ b/drivers/net/wireless/rt2x00/rt2500pci.c
@@ -327,7 +327,8 @@ static void rt2500pci_config_intf(struct rt2x00_dev *rt2x00dev,
327} 327}
328 328
329static void rt2500pci_config_erp(struct rt2x00_dev *rt2x00dev, 329static void rt2500pci_config_erp(struct rt2x00_dev *rt2x00dev,
330 struct rt2x00lib_erp *erp) 330 struct rt2x00lib_erp *erp,
331 u32 changed)
331{ 332{
332 int preamble_mask; 333 int preamble_mask;
333 u32 reg; 334 u32 reg;
@@ -335,59 +336,73 @@ static void rt2500pci_config_erp(struct rt2x00_dev *rt2x00dev,
335 /* 336 /*
336 * When short preamble is enabled, we should set bit 0x08 337 * When short preamble is enabled, we should set bit 0x08
337 */ 338 */
338 preamble_mask = erp->short_preamble << 3; 339 if (changed & BSS_CHANGED_ERP_PREAMBLE) {
339 340 preamble_mask = erp->short_preamble << 3;
340 rt2x00pci_register_read(rt2x00dev, TXCSR1, &reg); 341
341 rt2x00_set_field32(&reg, TXCSR1_ACK_TIMEOUT, 0x162); 342 rt2x00pci_register_read(rt2x00dev, TXCSR1, &reg);
342 rt2x00_set_field32(&reg, TXCSR1_ACK_CONSUME_TIME, 0xa2); 343 rt2x00_set_field32(&reg, TXCSR1_ACK_TIMEOUT, 0x162);
343 rt2x00_set_field32(&reg, TXCSR1_TSF_OFFSET, IEEE80211_HEADER); 344 rt2x00_set_field32(&reg, TXCSR1_ACK_CONSUME_TIME, 0xa2);
344 rt2x00_set_field32(&reg, TXCSR1_AUTORESPONDER, 1); 345 rt2x00_set_field32(&reg, TXCSR1_TSF_OFFSET, IEEE80211_HEADER);
345 rt2x00pci_register_write(rt2x00dev, TXCSR1, reg); 346 rt2x00_set_field32(&reg, TXCSR1_AUTORESPONDER, 1);
346 347 rt2x00pci_register_write(rt2x00dev, TXCSR1, reg);
347 rt2x00pci_register_read(rt2x00dev, ARCSR2, &reg); 348
348 rt2x00_set_field32(&reg, ARCSR2_SIGNAL, 0x00); 349 rt2x00pci_register_read(rt2x00dev, ARCSR2, &reg);
349 rt2x00_set_field32(&reg, ARCSR2_SERVICE, 0x04); 350 rt2x00_set_field32(&reg, ARCSR2_SIGNAL, 0x00);
350 rt2x00_set_field32(&reg, ARCSR2_LENGTH, GET_DURATION(ACK_SIZE, 10)); 351 rt2x00_set_field32(&reg, ARCSR2_SERVICE, 0x04);
351 rt2x00pci_register_write(rt2x00dev, ARCSR2, reg); 352 rt2x00_set_field32(&reg, ARCSR2_LENGTH,
352 353 GET_DURATION(ACK_SIZE, 10));
353 rt2x00pci_register_read(rt2x00dev, ARCSR3, &reg); 354 rt2x00pci_register_write(rt2x00dev, ARCSR2, reg);
354 rt2x00_set_field32(&reg, ARCSR3_SIGNAL, 0x01 | preamble_mask); 355
355 rt2x00_set_field32(&reg, ARCSR3_SERVICE, 0x04); 356 rt2x00pci_register_read(rt2x00dev, ARCSR3, &reg);
356 rt2x00_set_field32(&reg, ARCSR2_LENGTH, GET_DURATION(ACK_SIZE, 20)); 357 rt2x00_set_field32(&reg, ARCSR3_SIGNAL, 0x01 | preamble_mask);
357 rt2x00pci_register_write(rt2x00dev, ARCSR3, reg); 358 rt2x00_set_field32(&reg, ARCSR3_SERVICE, 0x04);
358 359 rt2x00_set_field32(&reg, ARCSR2_LENGTH,
359 rt2x00pci_register_read(rt2x00dev, ARCSR4, &reg); 360 GET_DURATION(ACK_SIZE, 20));
360 rt2x00_set_field32(&reg, ARCSR4_SIGNAL, 0x02 | preamble_mask); 361 rt2x00pci_register_write(rt2x00dev, ARCSR3, reg);
361 rt2x00_set_field32(&reg, ARCSR4_SERVICE, 0x04); 362
362 rt2x00_set_field32(&reg, ARCSR2_LENGTH, GET_DURATION(ACK_SIZE, 55)); 363 rt2x00pci_register_read(rt2x00dev, ARCSR4, &reg);
363 rt2x00pci_register_write(rt2x00dev, ARCSR4, reg); 364 rt2x00_set_field32(&reg, ARCSR4_SIGNAL, 0x02 | preamble_mask);
364 365 rt2x00_set_field32(&reg, ARCSR4_SERVICE, 0x04);
365 rt2x00pci_register_read(rt2x00dev, ARCSR5, &reg); 366 rt2x00_set_field32(&reg, ARCSR2_LENGTH,
366 rt2x00_set_field32(&reg, ARCSR5_SIGNAL, 0x03 | preamble_mask); 367 GET_DURATION(ACK_SIZE, 55));
367 rt2x00_set_field32(&reg, ARCSR5_SERVICE, 0x84); 368 rt2x00pci_register_write(rt2x00dev, ARCSR4, reg);
368 rt2x00_set_field32(&reg, ARCSR2_LENGTH, GET_DURATION(ACK_SIZE, 110)); 369
369 rt2x00pci_register_write(rt2x00dev, ARCSR5, reg); 370 rt2x00pci_register_read(rt2x00dev, ARCSR5, &reg);
370 371 rt2x00_set_field32(&reg, ARCSR5_SIGNAL, 0x03 | preamble_mask);
371 rt2x00pci_register_write(rt2x00dev, ARCSR1, erp->basic_rates); 372 rt2x00_set_field32(&reg, ARCSR5_SERVICE, 0x84);
373 rt2x00_set_field32(&reg, ARCSR2_LENGTH,
374 GET_DURATION(ACK_SIZE, 110));
375 rt2x00pci_register_write(rt2x00dev, ARCSR5, reg);
376 }
372 377
373 rt2x00pci_register_read(rt2x00dev, CSR11, &reg); 378 if (changed & BSS_CHANGED_BASIC_RATES)
374 rt2x00_set_field32(&reg, CSR11_SLOT_TIME, erp->slot_time); 379 rt2x00pci_register_write(rt2x00dev, ARCSR1, erp->basic_rates);
375 rt2x00pci_register_write(rt2x00dev, CSR11, reg); 380
381 if (changed & BSS_CHANGED_ERP_SLOT) {
382 rt2x00pci_register_read(rt2x00dev, CSR11, &reg);
383 rt2x00_set_field32(&reg, CSR11_SLOT_TIME, erp->slot_time);
384 rt2x00pci_register_write(rt2x00dev, CSR11, reg);
376 385
377 rt2x00pci_register_read(rt2x00dev, CSR12, &reg); 386 rt2x00pci_register_read(rt2x00dev, CSR18, &reg);
378 rt2x00_set_field32(&reg, CSR12_BEACON_INTERVAL, erp->beacon_int * 16); 387 rt2x00_set_field32(&reg, CSR18_SIFS, erp->sifs);
379 rt2x00_set_field32(&reg, CSR12_CFP_MAX_DURATION, erp->beacon_int * 16); 388 rt2x00_set_field32(&reg, CSR18_PIFS, erp->pifs);
380 rt2x00pci_register_write(rt2x00dev, CSR12, reg); 389 rt2x00pci_register_write(rt2x00dev, CSR18, reg);
381 390
382 rt2x00pci_register_read(rt2x00dev, CSR18, &reg); 391 rt2x00pci_register_read(rt2x00dev, CSR19, &reg);
383 rt2x00_set_field32(&reg, CSR18_SIFS, erp->sifs); 392 rt2x00_set_field32(&reg, CSR19_DIFS, erp->difs);
384 rt2x00_set_field32(&reg, CSR18_PIFS, erp->pifs); 393 rt2x00_set_field32(&reg, CSR19_EIFS, erp->eifs);
385 rt2x00pci_register_write(rt2x00dev, CSR18, reg); 394 rt2x00pci_register_write(rt2x00dev, CSR19, reg);
395 }
396
397 if (changed & BSS_CHANGED_BEACON_INT) {
398 rt2x00pci_register_read(rt2x00dev, CSR12, &reg);
399 rt2x00_set_field32(&reg, CSR12_BEACON_INTERVAL,
400 erp->beacon_int * 16);
401 rt2x00_set_field32(&reg, CSR12_CFP_MAX_DURATION,
402 erp->beacon_int * 16);
403 rt2x00pci_register_write(rt2x00dev, CSR12, reg);
404 }
386 405
387 rt2x00pci_register_read(rt2x00dev, CSR19, &reg);
388 rt2x00_set_field32(&reg, CSR19_DIFS, erp->difs);
389 rt2x00_set_field32(&reg, CSR19_EIFS, erp->eifs);
390 rt2x00pci_register_write(rt2x00dev, CSR19, reg);
391} 406}
392 407
393static void rt2500pci_config_ant(struct rt2x00_dev *rt2x00dev, 408static void rt2500pci_config_ant(struct rt2x00_dev *rt2x00dev,
@@ -1243,7 +1258,7 @@ static void rt2500pci_write_beacon(struct queue_entry *entry,
1243 rt2x00_set_field32(&reg, CSR14_BEACON_GEN, 0); 1258 rt2x00_set_field32(&reg, CSR14_BEACON_GEN, 0);
1244 rt2x00pci_register_write(rt2x00dev, CSR14, reg); 1259 rt2x00pci_register_write(rt2x00dev, CSR14, reg);
1245 1260
1246 rt2x00queue_map_txskb(rt2x00dev, entry->skb); 1261 rt2x00queue_map_txskb(entry);
1247 1262
1248 /* 1263 /*
1249 * Write the TX descriptor for the beacon. 1264 * Write the TX descriptor for the beacon.
diff --git a/drivers/net/wireless/rt2x00/rt2500usb.c b/drivers/net/wireless/rt2x00/rt2500usb.c
index db64df4267d8..93e44c7f3a74 100644
--- a/drivers/net/wireless/rt2x00/rt2500usb.c
+++ b/drivers/net/wireless/rt2x00/rt2500usb.c
@@ -494,24 +494,34 @@ static void rt2500usb_config_intf(struct rt2x00_dev *rt2x00dev,
494} 494}
495 495
496static void rt2500usb_config_erp(struct rt2x00_dev *rt2x00dev, 496static void rt2500usb_config_erp(struct rt2x00_dev *rt2x00dev,
497 struct rt2x00lib_erp *erp) 497 struct rt2x00lib_erp *erp,
498 u32 changed)
498{ 499{
499 u16 reg; 500 u16 reg;
500 501
501 rt2500usb_register_read(rt2x00dev, TXRX_CSR10, &reg); 502 if (changed & BSS_CHANGED_ERP_PREAMBLE) {
502 rt2x00_set_field16(&reg, TXRX_CSR10_AUTORESPOND_PREAMBLE, 503 rt2500usb_register_read(rt2x00dev, TXRX_CSR10, &reg);
503 !!erp->short_preamble); 504 rt2x00_set_field16(&reg, TXRX_CSR10_AUTORESPOND_PREAMBLE,
504 rt2500usb_register_write(rt2x00dev, TXRX_CSR10, reg); 505 !!erp->short_preamble);
506 rt2500usb_register_write(rt2x00dev, TXRX_CSR10, reg);
507 }
505 508
506 rt2500usb_register_write(rt2x00dev, TXRX_CSR11, erp->basic_rates); 509 if (changed & BSS_CHANGED_BASIC_RATES)
510 rt2500usb_register_write(rt2x00dev, TXRX_CSR11,
511 erp->basic_rates);
507 512
508 rt2500usb_register_read(rt2x00dev, TXRX_CSR18, &reg); 513 if (changed & BSS_CHANGED_BEACON_INT) {
509 rt2x00_set_field16(&reg, TXRX_CSR18_INTERVAL, erp->beacon_int * 4); 514 rt2500usb_register_read(rt2x00dev, TXRX_CSR18, &reg);
510 rt2500usb_register_write(rt2x00dev, TXRX_CSR18, reg); 515 rt2x00_set_field16(&reg, TXRX_CSR18_INTERVAL,
516 erp->beacon_int * 4);
517 rt2500usb_register_write(rt2x00dev, TXRX_CSR18, reg);
518 }
511 519
512 rt2500usb_register_write(rt2x00dev, MAC_CSR10, erp->slot_time); 520 if (changed & BSS_CHANGED_ERP_SLOT) {
513 rt2500usb_register_write(rt2x00dev, MAC_CSR11, erp->sifs); 521 rt2500usb_register_write(rt2x00dev, MAC_CSR10, erp->slot_time);
514 rt2500usb_register_write(rt2x00dev, MAC_CSR12, erp->eifs); 522 rt2500usb_register_write(rt2x00dev, MAC_CSR11, erp->sifs);
523 rt2500usb_register_write(rt2x00dev, MAC_CSR12, erp->eifs);
524 }
515} 525}
516 526
517static void rt2500usb_config_ant(struct rt2x00_dev *rt2x00dev, 527static void rt2500usb_config_ant(struct rt2x00_dev *rt2x00dev,
@@ -1664,10 +1674,15 @@ static int rt2500usb_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
1664 1674
1665 /* 1675 /*
1666 * Initialize all hw fields. 1676 * Initialize all hw fields.
1677 *
1678 * Don't set IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING unless we are
1679 * capable of sending the buffered frames out after the DTIM
1680 * transmission using rt2x00lib_beacondone. This will send out
1681 * multicast and broadcast traffic immediately instead of buffering it
1682 * infinitly and thus dropping it after some time.
1667 */ 1683 */
1668 rt2x00dev->hw->flags = 1684 rt2x00dev->hw->flags =
1669 IEEE80211_HW_RX_INCLUDES_FCS | 1685 IEEE80211_HW_RX_INCLUDES_FCS |
1670 IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
1671 IEEE80211_HW_SIGNAL_DBM | 1686 IEEE80211_HW_SIGNAL_DBM |
1672 IEEE80211_HW_SUPPORTS_PS | 1687 IEEE80211_HW_SUPPORTS_PS |
1673 IEEE80211_HW_PS_NULLFUNC_STACK; 1688 IEEE80211_HW_PS_NULLFUNC_STACK;
diff --git a/drivers/net/wireless/rt2x00/rt2800.h b/drivers/net/wireless/rt2x00/rt2800.h
index 70a5cb86405b..eb8b6cab9925 100644
--- a/drivers/net/wireless/rt2x00/rt2800.h
+++ b/drivers/net/wireless/rt2x00/rt2800.h
@@ -1,5 +1,6 @@
1/* 1/*
2 Copyright (C) 2004 - 2009 Ivo van Doorn <IvDoorn@gmail.com> 2 Copyright (C) 2004 - 2010 Ivo van Doorn <IvDoorn@gmail.com>
3 Copyright (C) 2010 Willow Garage <http://www.willowgarage.com>
3 Copyright (C) 2009 Alban Browaeys <prahal@yahoo.com> 4 Copyright (C) 2009 Alban Browaeys <prahal@yahoo.com>
4 Copyright (C) 2009 Felix Fietkau <nbd@openwrt.org> 5 Copyright (C) 2009 Felix Fietkau <nbd@openwrt.org>
5 Copyright (C) 2009 Luis Correia <luis.f.correia@gmail.com> 6 Copyright (C) 2009 Luis Correia <luis.f.correia@gmail.com>
@@ -639,6 +640,18 @@
639#define LED_CFG_LED_POLAR FIELD32(0x40000000) 640#define LED_CFG_LED_POLAR FIELD32(0x40000000)
640 641
641/* 642/*
643 * AMPDU_BA_WINSIZE: Force BlockAck window size
644 * FORCE_WINSIZE_ENABLE:
645 * 0: Disable forcing of BlockAck window size
646 * 1: Enable forcing of BlockAck window size, overwrites values BlockAck
647 * window size values in the TXWI
648 * FORCE_WINSIZE: BlockAck window size
649 */
650#define AMPDU_BA_WINSIZE 0x1040
651#define AMPDU_BA_WINSIZE_FORCE_WINSIZE_ENABLE FIELD32(0x00000020)
652#define AMPDU_BA_WINSIZE_FORCE_WINSIZE FIELD32(0x0000001f)
653
654/*
642 * XIFS_TIME_CFG: MAC timing 655 * XIFS_TIME_CFG: MAC timing
643 * CCKM_SIFS_TIME: unit 1us. Applied after CCK RX/TX 656 * CCKM_SIFS_TIME: unit 1us. Applied after CCK RX/TX
644 * OFDM_SIFS_TIME: unit 1us. Applied after OFDM RX/TX 657 * OFDM_SIFS_TIME: unit 1us. Applied after OFDM RX/TX
@@ -698,8 +711,14 @@
698 711
699/* 712/*
700 * TBTT_SYNC_CFG: 713 * TBTT_SYNC_CFG:
714 * BCN_AIFSN: Beacon AIFSN after TBTT interrupt in slots
715 * BCN_CWMIN: Beacon CWMin after TBTT interrupt in slots
701 */ 716 */
702#define TBTT_SYNC_CFG 0x1118 717#define TBTT_SYNC_CFG 0x1118
718#define TBTT_SYNC_CFG_TBTT_ADJUST FIELD32(0x000000ff)
719#define TBTT_SYNC_CFG_BCN_EXP_WIN FIELD32(0x0000ff00)
720#define TBTT_SYNC_CFG_BCN_AIFSN FIELD32(0x000f0000)
721#define TBTT_SYNC_CFG_BCN_CWMIN FIELD32(0x00f00000)
703 722
704/* 723/*
705 * TSF_TIMER_DW0: Local lsb TSF timer, read-only 724 * TSF_TIMER_DW0: Local lsb TSF timer, read-only
@@ -735,16 +754,21 @@
735#define INT_TIMER_EN_GP_TIMER FIELD32(0x00000002) 754#define INT_TIMER_EN_GP_TIMER FIELD32(0x00000002)
736 755
737/* 756/*
738 * CH_IDLE_STA: channel idle time 757 * CH_IDLE_STA: channel idle time (in us)
739 */ 758 */
740#define CH_IDLE_STA 0x1130 759#define CH_IDLE_STA 0x1130
741 760
742/* 761/*
743 * CH_BUSY_STA: channel busy time 762 * CH_BUSY_STA: channel busy time on primary channel (in us)
744 */ 763 */
745#define CH_BUSY_STA 0x1134 764#define CH_BUSY_STA 0x1134
746 765
747/* 766/*
767 * CH_BUSY_STA_SEC: channel busy time on secondary channel in HT40 mode (in us)
768 */
769#define CH_BUSY_STA_SEC 0x1138
770
771/*
748 * MAC_STATUS_CFG: 772 * MAC_STATUS_CFG:
749 * BBP_RF_BUSY: When set to 0, BBP and RF are stable. 773 * BBP_RF_BUSY: When set to 0, BBP and RF are stable.
750 * if 1 or higher one of the 2 registers is busy. 774 * if 1 or higher one of the 2 registers is busy.
@@ -1330,6 +1354,9 @@
1330 * PID_TYPE: The PID latched from the PID field in the TXWI, can be used 1354 * PID_TYPE: The PID latched from the PID field in the TXWI, can be used
1331 * to match a frame with its tx result (even though the PID is 1355 * to match a frame with its tx result (even though the PID is
1332 * only 4 bits wide). 1356 * only 4 bits wide).
1357 * PID_QUEUE: Part of PID_TYPE, this is the queue index number (0-3)
1358 * PID_ENTRY: Part of PID_TYPE, this is the queue entry index number (1-3)
1359 * This identification number is calculated by ((idx % 3) + 1).
1333 * TX_SUCCESS: Indicates tx success (1) or failure (0) 1360 * TX_SUCCESS: Indicates tx success (1) or failure (0)
1334 * TX_AGGRE: Indicates if the frame was part of an aggregate (1) or not (0) 1361 * TX_AGGRE: Indicates if the frame was part of an aggregate (1) or not (0)
1335 * TX_ACK_REQUIRED: Indicates if the frame needed to get ack'ed (1) or not (0) 1362 * TX_ACK_REQUIRED: Indicates if the frame needed to get ack'ed (1) or not (0)
@@ -1341,6 +1368,8 @@
1341#define TX_STA_FIFO 0x1718 1368#define TX_STA_FIFO 0x1718
1342#define TX_STA_FIFO_VALID FIELD32(0x00000001) 1369#define TX_STA_FIFO_VALID FIELD32(0x00000001)
1343#define TX_STA_FIFO_PID_TYPE FIELD32(0x0000001e) 1370#define TX_STA_FIFO_PID_TYPE FIELD32(0x0000001e)
1371#define TX_STA_FIFO_PID_QUEUE FIELD32(0x00000006)
1372#define TX_STA_FIFO_PID_ENTRY FIELD32(0x00000018)
1344#define TX_STA_FIFO_TX_SUCCESS FIELD32(0x00000020) 1373#define TX_STA_FIFO_TX_SUCCESS FIELD32(0x00000020)
1345#define TX_STA_FIFO_TX_AGGRE FIELD32(0x00000040) 1374#define TX_STA_FIFO_TX_AGGRE FIELD32(0x00000040)
1346#define TX_STA_FIFO_TX_ACK_REQUIRED FIELD32(0x00000080) 1375#define TX_STA_FIFO_TX_ACK_REQUIRED FIELD32(0x00000080)
@@ -1423,6 +1452,24 @@
1423 1452
1424/* 1453/*
1425 * Security key table memory. 1454 * Security key table memory.
1455 *
1456 * The pairwise key table shares some memory with the beacon frame
1457 * buffers 6 and 7. That basically means that when beacon 6 & 7
1458 * are used we should only use the reduced pairwise key table which
1459 * has a maximum of 222 entries.
1460 *
1461 * ---------------------------------------------
1462 * |0x4000 | Pairwise Key | Reduced Pairwise |
1463 * | | Table | Key Table |
1464 * | | Size: 256 * 32 | Size: 222 * 32 |
1465 * |0x5BC0 | |-------------------
1466 * | | | Beacon 6 |
1467 * |0x5DC0 | |-------------------
1468 * | | | Beacon 7 |
1469 * |0x5FC0 | |-------------------
1470 * |0x5FFF | |
1471 * --------------------------
1472 *
1426 * MAC_WCID_BASE: 8-bytes (use only 6 bytes) * 256 entry 1473 * MAC_WCID_BASE: 8-bytes (use only 6 bytes) * 256 entry
1427 * PAIRWISE_KEY_TABLE_BASE: 32-byte * 256 entry 1474 * PAIRWISE_KEY_TABLE_BASE: 32-byte * 256 entry
1428 * MAC_IVEIV_TABLE_BASE: 8-byte * 256-entry 1475 * MAC_IVEIV_TABLE_BASE: 8-byte * 256-entry
@@ -1572,7 +1619,8 @@ struct mac_iveiv_entry {
1572 * 2. Extract memory from FCE table for BCN 4~5 1619 * 2. Extract memory from FCE table for BCN 4~5
1573 * 3. Extract memory from Pair-wise key table for BCN 6~7 1620 * 3. Extract memory from Pair-wise key table for BCN 6~7
1574 * It occupied those memory of wcid 238~253 for BCN 6 1621 * It occupied those memory of wcid 238~253 for BCN 6
1575 * and wcid 222~237 for BCN 7 1622 * and wcid 222~237 for BCN 7 (see Security key table memory
1623 * for more info).
1576 * 1624 *
1577 * IMPORTANT NOTE: Not sure why legacy driver does this, 1625 * IMPORTANT NOTE: Not sure why legacy driver does this,
1578 * but HW_BEACON_BASE7 is 0x0200 bytes below HW_BEACON_BASE6. 1626 * but HW_BEACON_BASE7 is 0x0200 bytes below HW_BEACON_BASE6.
@@ -1951,10 +1999,17 @@ struct mac_iveiv_entry {
1951 * FRAG: 1 To inform TKIP engine this is a fragment. 1999 * FRAG: 1 To inform TKIP engine this is a fragment.
1952 * MIMO_PS: The remote peer is in dynamic MIMO-PS mode 2000 * MIMO_PS: The remote peer is in dynamic MIMO-PS mode
1953 * TX_OP: 0:HT TXOP rule , 1:PIFS TX ,2:Backoff, 3:sifs 2001 * TX_OP: 0:HT TXOP rule , 1:PIFS TX ,2:Backoff, 3:sifs
1954 * BW: Channel bandwidth 20MHz or 40 MHz 2002 * BW: Channel bandwidth 0:20MHz, 1:40 MHz (for legacy rates this will
2003 * duplicate the frame to both channels).
1955 * STBC: 1: STBC support MCS =0-7, 2,3 : RESERVED 2004 * STBC: 1: STBC support MCS =0-7, 2,3 : RESERVED
1956 * AMPDU: 1: this frame is eligible for AMPDU aggregation, the hw will 2005 * AMPDU: 1: this frame is eligible for AMPDU aggregation, the hw will
1957 * aggregate consecutive frames with the same RA and QoS TID. 2006 * aggregate consecutive frames with the same RA and QoS TID. If
2007 * a frame A with the same RA and QoS TID but AMPDU=0 is queued
2008 * directly after a frame B with AMPDU=1, frame A might still
2009 * get aggregated into the AMPDU started by frame B. So, setting
2010 * AMPDU to 0 does _not_ necessarily mean the frame is sent as
2011 * MPDU, it can still end up in an AMPDU if the previous frame
2012 * was tagged as AMPDU.
1958 */ 2013 */
1959#define TXWI_W0_FRAG FIELD32(0x00000001) 2014#define TXWI_W0_FRAG FIELD32(0x00000001)
1960#define TXWI_W0_MIMO_PS FIELD32(0x00000002) 2015#define TXWI_W0_MIMO_PS FIELD32(0x00000002)
@@ -1981,6 +2036,10 @@ struct mac_iveiv_entry {
1981 * frame was processed. If multiple frames are aggregated together 2036 * frame was processed. If multiple frames are aggregated together
1982 * (AMPDU==1) the reported tx status will always contain the packet 2037 * (AMPDU==1) the reported tx status will always contain the packet
1983 * id of the first frame. 0: Don't report tx status for this frame. 2038 * id of the first frame. 0: Don't report tx status for this frame.
2039 * PACKETID_QUEUE: Part of PACKETID, This is the queue index (0-3)
2040 * PACKETID_ENTRY: Part of PACKETID, THis is the queue entry index (1-3)
2041 * This identification number is calculated by ((idx % 3) + 1).
2042 * The (+1) is required to prevent PACKETID to become 0.
1984 */ 2043 */
1985#define TXWI_W1_ACK FIELD32(0x00000001) 2044#define TXWI_W1_ACK FIELD32(0x00000001)
1986#define TXWI_W1_NSEQ FIELD32(0x00000002) 2045#define TXWI_W1_NSEQ FIELD32(0x00000002)
@@ -1988,6 +2047,8 @@ struct mac_iveiv_entry {
1988#define TXWI_W1_WIRELESS_CLI_ID FIELD32(0x0000ff00) 2047#define TXWI_W1_WIRELESS_CLI_ID FIELD32(0x0000ff00)
1989#define TXWI_W1_MPDU_TOTAL_BYTE_COUNT FIELD32(0x0fff0000) 2048#define TXWI_W1_MPDU_TOTAL_BYTE_COUNT FIELD32(0x0fff0000)
1990#define TXWI_W1_PACKETID FIELD32(0xf0000000) 2049#define TXWI_W1_PACKETID FIELD32(0xf0000000)
2050#define TXWI_W1_PACKETID_QUEUE FIELD32(0x30000000)
2051#define TXWI_W1_PACKETID_ENTRY FIELD32(0xc0000000)
1991 2052
1992/* 2053/*
1993 * Word2 2054 * Word2
diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c
index 27a6e225083c..5f00e00789d8 100644
--- a/drivers/net/wireless/rt2x00/rt2800lib.c
+++ b/drivers/net/wireless/rt2x00/rt2800lib.c
@@ -483,7 +483,8 @@ void rt2800_write_tx_data(struct queue_entry *entry,
483 txdesc->key_idx : 0xff); 483 txdesc->key_idx : 0xff);
484 rt2x00_set_field32(&word, TXWI_W1_MPDU_TOTAL_BYTE_COUNT, 484 rt2x00_set_field32(&word, TXWI_W1_MPDU_TOTAL_BYTE_COUNT,
485 txdesc->length); 485 txdesc->length);
486 rt2x00_set_field32(&word, TXWI_W1_PACKETID, txdesc->qid + 1); 486 rt2x00_set_field32(&word, TXWI_W1_PACKETID_QUEUE, txdesc->qid);
487 rt2x00_set_field32(&word, TXWI_W1_PACKETID_ENTRY, (entry->entry_idx % 3) + 1);
487 rt2x00_desc_write(txwi, 1, word); 488 rt2x00_desc_write(txwi, 1, word);
488 489
489 /* 490 /*
@@ -498,7 +499,7 @@ void rt2800_write_tx_data(struct queue_entry *entry,
498} 499}
499EXPORT_SYMBOL_GPL(rt2800_write_tx_data); 500EXPORT_SYMBOL_GPL(rt2800_write_tx_data);
500 501
501static int rt2800_agc_to_rssi(struct rt2x00_dev *rt2x00dev, int rxwi_w2) 502static int rt2800_agc_to_rssi(struct rt2x00_dev *rt2x00dev, u32 rxwi_w2)
502{ 503{
503 int rssi0 = rt2x00_get_field32(rxwi_w2, RXWI_W2_RSSI0); 504 int rssi0 = rt2x00_get_field32(rxwi_w2, RXWI_W2_RSSI0);
504 int rssi1 = rt2x00_get_field32(rxwi_w2, RXWI_W2_RSSI1); 505 int rssi1 = rt2x00_get_field32(rxwi_w2, RXWI_W2_RSSI1);
@@ -630,15 +631,90 @@ static bool rt2800_txdone_entry_check(struct queue_entry *entry, u32 reg)
630 return true; 631 return true;
631} 632}
632 633
634void rt2800_txdone_entry(struct queue_entry *entry, u32 status)
635{
636 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
637 struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
638 struct txdone_entry_desc txdesc;
639 u32 word;
640 u16 mcs, real_mcs;
641 int aggr, ampdu;
642 __le32 *txwi;
643
644 /*
645 * Obtain the status about this packet.
646 */
647 txdesc.flags = 0;
648 txwi = rt2800_drv_get_txwi(entry);
649 rt2x00_desc_read(txwi, 0, &word);
650
651 mcs = rt2x00_get_field32(word, TXWI_W0_MCS);
652 ampdu = rt2x00_get_field32(word, TXWI_W0_AMPDU);
653
654 real_mcs = rt2x00_get_field32(status, TX_STA_FIFO_MCS);
655 aggr = rt2x00_get_field32(status, TX_STA_FIFO_TX_AGGRE);
656
657 /*
658 * If a frame was meant to be sent as a single non-aggregated MPDU
659 * but ended up in an aggregate the used tx rate doesn't correlate
660 * with the one specified in the TXWI as the whole aggregate is sent
661 * with the same rate.
662 *
663 * For example: two frames are sent to rt2x00, the first one sets
664 * AMPDU=1 and requests MCS7 whereas the second frame sets AMDPU=0
665 * and requests MCS15. If the hw aggregates both frames into one
666 * AMDPU the tx status for both frames will contain MCS7 although
667 * the frame was sent successfully.
668 *
669 * Hence, replace the requested rate with the real tx rate to not
670 * confuse the rate control algortihm by providing clearly wrong
671 * data.
672 */
673 if (aggr == 1 && ampdu == 0 && real_mcs != mcs) {
674 skbdesc->tx_rate_idx = real_mcs;
675 mcs = real_mcs;
676 }
677
678 /*
679 * Ralink has a retry mechanism using a global fallback
680 * table. We setup this fallback table to try the immediate
681 * lower rate for all rates. In the TX_STA_FIFO, the MCS field
682 * always contains the MCS used for the last transmission, be
683 * it successful or not.
684 */
685 if (rt2x00_get_field32(status, TX_STA_FIFO_TX_SUCCESS)) {
686 /*
687 * Transmission succeeded. The number of retries is
688 * mcs - real_mcs
689 */
690 __set_bit(TXDONE_SUCCESS, &txdesc.flags);
691 txdesc.retry = ((mcs > real_mcs) ? mcs - real_mcs : 0);
692 } else {
693 /*
694 * Transmission failed. The number of retries is
695 * always 7 in this case (for a total number of 8
696 * frames sent).
697 */
698 __set_bit(TXDONE_FAILURE, &txdesc.flags);
699 txdesc.retry = rt2x00dev->long_retry;
700 }
701
702 /*
703 * the frame was retried at least once
704 * -> hw used fallback rates
705 */
706 if (txdesc.retry)
707 __set_bit(TXDONE_FALLBACK, &txdesc.flags);
708
709 rt2x00lib_txdone(entry, &txdesc);
710}
711EXPORT_SYMBOL_GPL(rt2800_txdone_entry);
712
633void rt2800_txdone(struct rt2x00_dev *rt2x00dev) 713void rt2800_txdone(struct rt2x00_dev *rt2x00dev)
634{ 714{
635 struct data_queue *queue; 715 struct data_queue *queue;
636 struct queue_entry *entry; 716 struct queue_entry *entry;
637 __le32 *txwi;
638 struct txdone_entry_desc txdesc;
639 u32 word;
640 u32 reg; 717 u32 reg;
641 u16 mcs, real_mcs;
642 u8 pid; 718 u8 pid;
643 int i; 719 int i;
644 720
@@ -660,7 +736,7 @@ void rt2800_txdone(struct rt2x00_dev *rt2x00dev)
660 * Skip this entry when it contains an invalid 736 * Skip this entry when it contains an invalid
661 * queue identication number. 737 * queue identication number.
662 */ 738 */
663 pid = rt2x00_get_field32(reg, TX_STA_FIFO_PID_TYPE) - 1; 739 pid = rt2x00_get_field32(reg, TX_STA_FIFO_PID_QUEUE);
664 if (pid >= QID_RX) 740 if (pid >= QID_RX)
665 continue; 741 continue;
666 742
@@ -673,7 +749,6 @@ void rt2800_txdone(struct rt2x00_dev *rt2x00dev)
673 * order. We first check that the queue is not empty. 749 * order. We first check that the queue is not empty.
674 */ 750 */
675 entry = NULL; 751 entry = NULL;
676 txwi = NULL;
677 while (!rt2x00queue_empty(queue)) { 752 while (!rt2x00queue_empty(queue)) {
678 entry = rt2x00queue_get_entry(queue, Q_INDEX_DONE); 753 entry = rt2x00queue_get_entry(queue, Q_INDEX_DONE);
679 if (rt2800_txdone_entry_check(entry, reg)) 754 if (rt2800_txdone_entry_check(entry, reg))
@@ -683,48 +758,7 @@ void rt2800_txdone(struct rt2x00_dev *rt2x00dev)
683 if (!entry || rt2x00queue_empty(queue)) 758 if (!entry || rt2x00queue_empty(queue))
684 break; 759 break;
685 760
686 761 rt2800_txdone_entry(entry, reg);
687 /*
688 * Obtain the status about this packet.
689 */
690 txdesc.flags = 0;
691 txwi = rt2800_drv_get_txwi(entry);
692 rt2x00_desc_read(txwi, 0, &word);
693 mcs = rt2x00_get_field32(word, TXWI_W0_MCS);
694 real_mcs = rt2x00_get_field32(reg, TX_STA_FIFO_MCS);
695
696 /*
697 * Ralink has a retry mechanism using a global fallback
698 * table. We setup this fallback table to try the immediate
699 * lower rate for all rates. In the TX_STA_FIFO, the MCS field
700 * always contains the MCS used for the last transmission, be
701 * it successful or not.
702 */
703 if (rt2x00_get_field32(reg, TX_STA_FIFO_TX_SUCCESS)) {
704 /*
705 * Transmission succeeded. The number of retries is
706 * mcs - real_mcs
707 */
708 __set_bit(TXDONE_SUCCESS, &txdesc.flags);
709 txdesc.retry = ((mcs > real_mcs) ? mcs - real_mcs : 0);
710 } else {
711 /*
712 * Transmission failed. The number of retries is
713 * always 7 in this case (for a total number of 8
714 * frames sent).
715 */
716 __set_bit(TXDONE_FAILURE, &txdesc.flags);
717 txdesc.retry = rt2x00dev->long_retry;
718 }
719
720 /*
721 * the frame was retried at least once
722 * -> hw used fallback rates
723 */
724 if (txdesc.retry)
725 __set_bit(TXDONE_FALLBACK, &txdesc.flags);
726
727 rt2x00lib_txdone(entry, &txdesc);
728 } 762 }
729} 763}
730EXPORT_SYMBOL_GPL(rt2800_txdone); 764EXPORT_SYMBOL_GPL(rt2800_txdone);
@@ -1031,8 +1065,12 @@ int rt2800_config_pairwise_key(struct rt2x00_dev *rt2x00dev,
1031 * 1 pairwise key is possible per AID, this means that the AID 1065 * 1 pairwise key is possible per AID, this means that the AID
1032 * equals our hw_key_idx. Make sure the WCID starts _after_ the 1066 * equals our hw_key_idx. Make sure the WCID starts _after_ the
1033 * last possible shared key entry. 1067 * last possible shared key entry.
1068 *
1069 * Since parts of the pairwise key table might be shared with
1070 * the beacon frame buffers 6 & 7 we should only write into the
1071 * first 222 entries.
1034 */ 1072 */
1035 if (crypto->aid > (256 - 32)) 1073 if (crypto->aid > (222 - 32))
1036 return -ENOSPC; 1074 return -ENOSPC;
1037 1075
1038 key->hw_key_idx = 32 + crypto->aid; 1076 key->hw_key_idx = 32 + crypto->aid;
@@ -1159,38 +1197,149 @@ void rt2800_config_intf(struct rt2x00_dev *rt2x00dev, struct rt2x00_intf *intf,
1159} 1197}
1160EXPORT_SYMBOL_GPL(rt2800_config_intf); 1198EXPORT_SYMBOL_GPL(rt2800_config_intf);
1161 1199
1162void rt2800_config_erp(struct rt2x00_dev *rt2x00dev, struct rt2x00lib_erp *erp) 1200static void rt2800_config_ht_opmode(struct rt2x00_dev *rt2x00dev,
1201 struct rt2x00lib_erp *erp)
1163{ 1202{
1203 bool any_sta_nongf = !!(erp->ht_opmode &
1204 IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT);
1205 u8 protection = erp->ht_opmode & IEEE80211_HT_OP_MODE_PROTECTION;
1206 u8 mm20_mode, mm40_mode, gf20_mode, gf40_mode;
1207 u16 mm20_rate, mm40_rate, gf20_rate, gf40_rate;
1164 u32 reg; 1208 u32 reg;
1165 1209
1166 rt2800_register_read(rt2x00dev, AUTO_RSP_CFG, &reg); 1210 /* default protection rate for HT20: OFDM 24M */
1167 rt2x00_set_field32(&reg, AUTO_RSP_CFG_BAC_ACK_POLICY, 1211 mm20_rate = gf20_rate = 0x4004;
1168 !!erp->short_preamble);
1169 rt2x00_set_field32(&reg, AUTO_RSP_CFG_AR_PREAMBLE,
1170 !!erp->short_preamble);
1171 rt2800_register_write(rt2x00dev, AUTO_RSP_CFG, reg);
1172 1212
1173 rt2800_register_read(rt2x00dev, OFDM_PROT_CFG, &reg); 1213 /* default protection rate for HT40: duplicate OFDM 24M */
1174 rt2x00_set_field32(&reg, OFDM_PROT_CFG_PROTECT_CTRL, 1214 mm40_rate = gf40_rate = 0x4084;
1175 erp->cts_protection ? 2 : 0);
1176 rt2800_register_write(rt2x00dev, OFDM_PROT_CFG, reg);
1177 1215
1178 rt2800_register_write(rt2x00dev, LEGACY_BASIC_RATE, 1216 switch (protection) {
1179 erp->basic_rates); 1217 case IEEE80211_HT_OP_MODE_PROTECTION_NONE:
1180 rt2800_register_write(rt2x00dev, HT_BASIC_RATE, 0x00008003); 1218 /*
1219 * All STAs in this BSS are HT20/40 but there might be
1220 * STAs not supporting greenfield mode.
1221 * => Disable protection for HT transmissions.
1222 */
1223 mm20_mode = mm40_mode = gf20_mode = gf40_mode = 0;
1181 1224
1182 rt2800_register_read(rt2x00dev, BKOFF_SLOT_CFG, &reg); 1225 break;
1183 rt2x00_set_field32(&reg, BKOFF_SLOT_CFG_SLOT_TIME, erp->slot_time); 1226 case IEEE80211_HT_OP_MODE_PROTECTION_20MHZ:
1184 rt2800_register_write(rt2x00dev, BKOFF_SLOT_CFG, reg); 1227 /*
1228 * All STAs in this BSS are HT20 or HT20/40 but there
1229 * might be STAs not supporting greenfield mode.
1230 * => Protect all HT40 transmissions.
1231 */
1232 mm20_mode = gf20_mode = 0;
1233 mm40_mode = gf40_mode = 2;
1185 1234
1186 rt2800_register_read(rt2x00dev, XIFS_TIME_CFG, &reg); 1235 break;
1187 rt2x00_set_field32(&reg, XIFS_TIME_CFG_EIFS, erp->eifs); 1236 case IEEE80211_HT_OP_MODE_PROTECTION_NONMEMBER:
1188 rt2800_register_write(rt2x00dev, XIFS_TIME_CFG, reg); 1237 /*
1238 * Nonmember protection:
1239 * According to 802.11n we _should_ protect all
1240 * HT transmissions (but we don't have to).
1241 *
1242 * But if cts_protection is enabled we _shall_ protect
1243 * all HT transmissions using a CCK rate.
1244 *
1245 * And if any station is non GF we _shall_ protect
1246 * GF transmissions.
1247 *
1248 * We decide to protect everything
1249 * -> fall through to mixed mode.
1250 */
1251 case IEEE80211_HT_OP_MODE_PROTECTION_NONHT_MIXED:
1252 /*
1253 * Legacy STAs are present
1254 * => Protect all HT transmissions.
1255 */
1256 mm20_mode = mm40_mode = gf20_mode = gf40_mode = 2;
1189 1257
1190 rt2800_register_read(rt2x00dev, BCN_TIME_CFG, &reg); 1258 /*
1191 rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_INTERVAL, 1259 * If erp protection is needed we have to protect HT
1192 erp->beacon_int * 16); 1260 * transmissions with CCK 11M long preamble.
1193 rt2800_register_write(rt2x00dev, BCN_TIME_CFG, reg); 1261 */
1262 if (erp->cts_protection) {
1263 /* don't duplicate RTS/CTS in CCK mode */
1264 mm20_rate = mm40_rate = 0x0003;
1265 gf20_rate = gf40_rate = 0x0003;
1266 }
1267 break;
1268 };
1269
1270 /* check for STAs not supporting greenfield mode */
1271 if (any_sta_nongf)
1272 gf20_mode = gf40_mode = 2;
1273
1274 /* Update HT protection config */
1275 rt2800_register_read(rt2x00dev, MM20_PROT_CFG, &reg);
1276 rt2x00_set_field32(&reg, MM20_PROT_CFG_PROTECT_RATE, mm20_rate);
1277 rt2x00_set_field32(&reg, MM20_PROT_CFG_PROTECT_CTRL, mm20_mode);
1278 rt2800_register_write(rt2x00dev, MM20_PROT_CFG, reg);
1279
1280 rt2800_register_read(rt2x00dev, MM40_PROT_CFG, &reg);
1281 rt2x00_set_field32(&reg, MM40_PROT_CFG_PROTECT_RATE, mm40_rate);
1282 rt2x00_set_field32(&reg, MM40_PROT_CFG_PROTECT_CTRL, mm40_mode);
1283 rt2800_register_write(rt2x00dev, MM40_PROT_CFG, reg);
1284
1285 rt2800_register_read(rt2x00dev, GF20_PROT_CFG, &reg);
1286 rt2x00_set_field32(&reg, GF20_PROT_CFG_PROTECT_RATE, gf20_rate);
1287 rt2x00_set_field32(&reg, GF20_PROT_CFG_PROTECT_CTRL, gf20_mode);
1288 rt2800_register_write(rt2x00dev, GF20_PROT_CFG, reg);
1289
1290 rt2800_register_read(rt2x00dev, GF40_PROT_CFG, &reg);
1291 rt2x00_set_field32(&reg, GF40_PROT_CFG_PROTECT_RATE, gf40_rate);
1292 rt2x00_set_field32(&reg, GF40_PROT_CFG_PROTECT_CTRL, gf40_mode);
1293 rt2800_register_write(rt2x00dev, GF40_PROT_CFG, reg);
1294}
1295
1296void rt2800_config_erp(struct rt2x00_dev *rt2x00dev, struct rt2x00lib_erp *erp,
1297 u32 changed)
1298{
1299 u32 reg;
1300
1301 if (changed & BSS_CHANGED_ERP_PREAMBLE) {
1302 rt2800_register_read(rt2x00dev, AUTO_RSP_CFG, &reg);
1303 rt2x00_set_field32(&reg, AUTO_RSP_CFG_BAC_ACK_POLICY,
1304 !!erp->short_preamble);
1305 rt2x00_set_field32(&reg, AUTO_RSP_CFG_AR_PREAMBLE,
1306 !!erp->short_preamble);
1307 rt2800_register_write(rt2x00dev, AUTO_RSP_CFG, reg);
1308 }
1309
1310 if (changed & BSS_CHANGED_ERP_CTS_PROT) {
1311 rt2800_register_read(rt2x00dev, OFDM_PROT_CFG, &reg);
1312 rt2x00_set_field32(&reg, OFDM_PROT_CFG_PROTECT_CTRL,
1313 erp->cts_protection ? 2 : 0);
1314 rt2800_register_write(rt2x00dev, OFDM_PROT_CFG, reg);
1315 }
1316
1317 if (changed & BSS_CHANGED_BASIC_RATES) {
1318 rt2800_register_write(rt2x00dev, LEGACY_BASIC_RATE,
1319 erp->basic_rates);
1320 rt2800_register_write(rt2x00dev, HT_BASIC_RATE, 0x00008003);
1321 }
1322
1323 if (changed & BSS_CHANGED_ERP_SLOT) {
1324 rt2800_register_read(rt2x00dev, BKOFF_SLOT_CFG, &reg);
1325 rt2x00_set_field32(&reg, BKOFF_SLOT_CFG_SLOT_TIME,
1326 erp->slot_time);
1327 rt2800_register_write(rt2x00dev, BKOFF_SLOT_CFG, reg);
1328
1329 rt2800_register_read(rt2x00dev, XIFS_TIME_CFG, &reg);
1330 rt2x00_set_field32(&reg, XIFS_TIME_CFG_EIFS, erp->eifs);
1331 rt2800_register_write(rt2x00dev, XIFS_TIME_CFG, reg);
1332 }
1333
1334 if (changed & BSS_CHANGED_BEACON_INT) {
1335 rt2800_register_read(rt2x00dev, BCN_TIME_CFG, &reg);
1336 rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_INTERVAL,
1337 erp->beacon_int * 16);
1338 rt2800_register_write(rt2x00dev, BCN_TIME_CFG, reg);
1339 }
1340
1341 if (changed & BSS_CHANGED_HT)
1342 rt2800_config_ht_opmode(rt2x00dev, erp);
1194} 1343}
1195EXPORT_SYMBOL_GPL(rt2800_config_erp); 1344EXPORT_SYMBOL_GPL(rt2800_config_erp);
1196 1345
@@ -1895,8 +2044,7 @@ static int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
1895 2044
1896 rt2800_register_read(rt2x00dev, MM40_PROT_CFG, &reg); 2045 rt2800_register_read(rt2x00dev, MM40_PROT_CFG, &reg);
1897 rt2x00_set_field32(&reg, MM40_PROT_CFG_PROTECT_RATE, 0x4084); 2046 rt2x00_set_field32(&reg, MM40_PROT_CFG_PROTECT_RATE, 0x4084);
1898 rt2x00_set_field32(&reg, MM40_PROT_CFG_PROTECT_CTRL, 2047 rt2x00_set_field32(&reg, MM40_PROT_CFG_PROTECT_CTRL, 0);
1899 !rt2x00_is_usb(rt2x00dev));
1900 rt2x00_set_field32(&reg, MM40_PROT_CFG_PROTECT_NAV, 1); 2048 rt2x00_set_field32(&reg, MM40_PROT_CFG_PROTECT_NAV, 1);
1901 rt2x00_set_field32(&reg, MM40_PROT_CFG_TX_OP_ALLOW_CCK, 1); 2049 rt2x00_set_field32(&reg, MM40_PROT_CFG_TX_OP_ALLOW_CCK, 1);
1902 rt2x00_set_field32(&reg, MM40_PROT_CFG_TX_OP_ALLOW_OFDM, 1); 2050 rt2x00_set_field32(&reg, MM40_PROT_CFG_TX_OP_ALLOW_OFDM, 1);
@@ -2053,6 +2201,14 @@ static int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
2053 rt2800_register_write(rt2x00dev, LG_FBK_CFG1, reg); 2201 rt2800_register_write(rt2x00dev, LG_FBK_CFG1, reg);
2054 2202
2055 /* 2203 /*
2204 * Do not force the BA window size, we use the TXWI to set it
2205 */
2206 rt2800_register_read(rt2x00dev, AMPDU_BA_WINSIZE, &reg);
2207 rt2x00_set_field32(&reg, AMPDU_BA_WINSIZE_FORCE_WINSIZE_ENABLE, 0);
2208 rt2x00_set_field32(&reg, AMPDU_BA_WINSIZE_FORCE_WINSIZE, 0);
2209 rt2800_register_write(rt2x00dev, AMPDU_BA_WINSIZE, reg);
2210
2211 /*
2056 * We must clear the error counters. 2212 * We must clear the error counters.
2057 * These registers are cleared on read, 2213 * These registers are cleared on read,
2058 * so we may pass a useless variable to store the value. 2214 * so we may pass a useless variable to store the value.
@@ -3036,11 +3192,20 @@ int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
3036 * Initialize all hw fields. 3192 * Initialize all hw fields.
3037 */ 3193 */
3038 rt2x00dev->hw->flags = 3194 rt2x00dev->hw->flags =
3039 IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
3040 IEEE80211_HW_SIGNAL_DBM | 3195 IEEE80211_HW_SIGNAL_DBM |
3041 IEEE80211_HW_SUPPORTS_PS | 3196 IEEE80211_HW_SUPPORTS_PS |
3042 IEEE80211_HW_PS_NULLFUNC_STACK | 3197 IEEE80211_HW_PS_NULLFUNC_STACK |
3043 IEEE80211_HW_AMPDU_AGGREGATION; 3198 IEEE80211_HW_AMPDU_AGGREGATION;
3199 /*
3200 * Don't set IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING for USB devices
3201 * unless we are capable of sending the buffered frames out after the
3202 * DTIM transmission using rt2x00lib_beacondone. This will send out
3203 * multicast and broadcast traffic immediately instead of buffering it
3204 * infinitly and thus dropping it after some time.
3205 */
3206 if (!rt2x00_is_usb(rt2x00dev))
3207 rt2x00dev->hw->flags |=
3208 IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING;
3044 3209
3045 SET_IEEE80211_DEV(rt2x00dev->hw, rt2x00dev->dev); 3210 SET_IEEE80211_DEV(rt2x00dev->hw, rt2x00dev->dev);
3046 SET_IEEE80211_PERM_ADDR(rt2x00dev->hw, 3211 SET_IEEE80211_PERM_ADDR(rt2x00dev->hw,
@@ -3051,12 +3216,13 @@ int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
3051 * As rt2800 has a global fallback table we cannot specify 3216 * As rt2800 has a global fallback table we cannot specify
3052 * more then one tx rate per frame but since the hw will 3217 * more then one tx rate per frame but since the hw will
3053 * try several rates (based on the fallback table) we should 3218 * try several rates (based on the fallback table) we should
3054 * still initialize max_rates to the maximum number of rates 3219 * initialize max_report_rates to the maximum number of rates
3055 * we are going to try. Otherwise mac80211 will truncate our 3220 * we are going to try. Otherwise mac80211 will truncate our
3056 * reported tx rates and the rc algortihm will end up with 3221 * reported tx rates and the rc algortihm will end up with
3057 * incorrect data. 3222 * incorrect data.
3058 */ 3223 */
3059 rt2x00dev->hw->max_rates = 7; 3224 rt2x00dev->hw->max_rates = 1;
3225 rt2x00dev->hw->max_report_rates = 7;
3060 rt2x00dev->hw->max_rate_tries = 1; 3226 rt2x00dev->hw->max_rate_tries = 1;
3061 3227
3062 rt2x00_eeprom_read(rt2x00dev, EEPROM_ANTENNA, &eeprom); 3228 rt2x00_eeprom_read(rt2x00dev, EEPROM_ANTENNA, &eeprom);
@@ -3313,8 +3479,12 @@ int rt2800_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
3313 switch (action) { 3479 switch (action) {
3314 case IEEE80211_AMPDU_RX_START: 3480 case IEEE80211_AMPDU_RX_START:
3315 case IEEE80211_AMPDU_RX_STOP: 3481 case IEEE80211_AMPDU_RX_STOP:
3316 /* we don't support RX aggregation yet */ 3482 /*
3317 ret = -ENOTSUPP; 3483 * The hw itself takes care of setting up BlockAck mechanisms.
3484 * So, we only have to allow mac80211 to nagotiate a BlockAck
3485 * agreement. Once that is done, the hw will BlockAck incoming
3486 * AMPDUs without further setup.
3487 */
3318 break; 3488 break;
3319 case IEEE80211_AMPDU_TX_START: 3489 case IEEE80211_AMPDU_TX_START:
3320 ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid); 3490 ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
diff --git a/drivers/net/wireless/rt2x00/rt2800lib.h b/drivers/net/wireless/rt2x00/rt2800lib.h
index 986229c06c19..81cbc92e7857 100644
--- a/drivers/net/wireless/rt2x00/rt2800lib.h
+++ b/drivers/net/wireless/rt2x00/rt2800lib.h
@@ -153,6 +153,7 @@ void rt2800_write_tx_data(struct queue_entry *entry,
153void rt2800_process_rxwi(struct queue_entry *entry, struct rxdone_entry_desc *txdesc); 153void rt2800_process_rxwi(struct queue_entry *entry, struct rxdone_entry_desc *txdesc);
154 154
155void rt2800_txdone(struct rt2x00_dev *rt2x00dev); 155void rt2800_txdone(struct rt2x00_dev *rt2x00dev);
156void rt2800_txdone_entry(struct queue_entry *entry, u32 status);
156 157
157void rt2800_write_beacon(struct queue_entry *entry, struct txentry_desc *txdesc); 158void rt2800_write_beacon(struct queue_entry *entry, struct txentry_desc *txdesc);
158 159
@@ -169,7 +170,8 @@ void rt2800_config_filter(struct rt2x00_dev *rt2x00dev,
169 const unsigned int filter_flags); 170 const unsigned int filter_flags);
170void rt2800_config_intf(struct rt2x00_dev *rt2x00dev, struct rt2x00_intf *intf, 171void rt2800_config_intf(struct rt2x00_dev *rt2x00dev, struct rt2x00_intf *intf,
171 struct rt2x00intf_conf *conf, const unsigned int flags); 172 struct rt2x00intf_conf *conf, const unsigned int flags);
172void rt2800_config_erp(struct rt2x00_dev *rt2x00dev, struct rt2x00lib_erp *erp); 173void rt2800_config_erp(struct rt2x00_dev *rt2x00dev, struct rt2x00lib_erp *erp,
174 u32 changed);
173void rt2800_config_ant(struct rt2x00_dev *rt2x00dev, struct antenna_setup *ant); 175void rt2800_config_ant(struct rt2x00_dev *rt2x00dev, struct antenna_setup *ant);
174void rt2800_config(struct rt2x00_dev *rt2x00dev, 176void rt2800_config(struct rt2x00_dev *rt2x00dev,
175 struct rt2x00lib_conf *libconf, 177 struct rt2x00lib_conf *libconf,
diff --git a/drivers/net/wireless/rt2x00/rt2800pci.c b/drivers/net/wireless/rt2x00/rt2800pci.c
index 2bcb1507e3ac..b26739535986 100644
--- a/drivers/net/wireless/rt2x00/rt2800pci.c
+++ b/drivers/net/wireless/rt2x00/rt2800pci.c
@@ -241,6 +241,7 @@ static void rt2800pci_clear_entry(struct queue_entry *entry)
241{ 241{
242 struct queue_entry_priv_pci *entry_priv = entry->priv_data; 242 struct queue_entry_priv_pci *entry_priv = entry->priv_data;
243 struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb); 243 struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
244 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
244 u32 word; 245 u32 word;
245 246
246 if (entry->queue->qid == QID_RX) { 247 if (entry->queue->qid == QID_RX) {
@@ -251,6 +252,13 @@ static void rt2800pci_clear_entry(struct queue_entry *entry)
251 rt2x00_desc_read(entry_priv->desc, 1, &word); 252 rt2x00_desc_read(entry_priv->desc, 1, &word);
252 rt2x00_set_field32(&word, RXD_W1_DMA_DONE, 0); 253 rt2x00_set_field32(&word, RXD_W1_DMA_DONE, 0);
253 rt2x00_desc_write(entry_priv->desc, 1, word); 254 rt2x00_desc_write(entry_priv->desc, 1, word);
255
256 /*
257 * Set RX IDX in register to inform hardware that we have
258 * handled this entry and it is available for reuse again.
259 */
260 rt2800_register_write(rt2x00dev, RX_CRX_IDX,
261 entry->entry_idx);
254 } else { 262 } else {
255 rt2x00_desc_read(entry_priv->desc, 1, &word); 263 rt2x00_desc_read(entry_priv->desc, 1, &word);
256 rt2x00_set_field32(&word, TXD_W1_DMA_DONE, 1); 264 rt2x00_set_field32(&word, TXD_W1_DMA_DONE, 1);
@@ -342,24 +350,24 @@ static void rt2800pci_toggle_irq(struct rt2x00_dev *rt2x00dev,
342 } 350 }
343 351
344 rt2800_register_read(rt2x00dev, INT_MASK_CSR, &reg); 352 rt2800_register_read(rt2x00dev, INT_MASK_CSR, &reg);
345 rt2x00_set_field32(&reg, INT_MASK_CSR_RXDELAYINT, mask); 353 rt2x00_set_field32(&reg, INT_MASK_CSR_RXDELAYINT, 0);
346 rt2x00_set_field32(&reg, INT_MASK_CSR_TXDELAYINT, mask); 354 rt2x00_set_field32(&reg, INT_MASK_CSR_TXDELAYINT, 0);
347 rt2x00_set_field32(&reg, INT_MASK_CSR_RX_DONE, mask); 355 rt2x00_set_field32(&reg, INT_MASK_CSR_RX_DONE, mask);
348 rt2x00_set_field32(&reg, INT_MASK_CSR_AC0_DMA_DONE, mask); 356 rt2x00_set_field32(&reg, INT_MASK_CSR_AC0_DMA_DONE, 0);
349 rt2x00_set_field32(&reg, INT_MASK_CSR_AC1_DMA_DONE, mask); 357 rt2x00_set_field32(&reg, INT_MASK_CSR_AC1_DMA_DONE, 0);
350 rt2x00_set_field32(&reg, INT_MASK_CSR_AC2_DMA_DONE, mask); 358 rt2x00_set_field32(&reg, INT_MASK_CSR_AC2_DMA_DONE, 0);
351 rt2x00_set_field32(&reg, INT_MASK_CSR_AC3_DMA_DONE, mask); 359 rt2x00_set_field32(&reg, INT_MASK_CSR_AC3_DMA_DONE, 0);
352 rt2x00_set_field32(&reg, INT_MASK_CSR_HCCA_DMA_DONE, mask); 360 rt2x00_set_field32(&reg, INT_MASK_CSR_HCCA_DMA_DONE, 0);
353 rt2x00_set_field32(&reg, INT_MASK_CSR_MGMT_DMA_DONE, mask); 361 rt2x00_set_field32(&reg, INT_MASK_CSR_MGMT_DMA_DONE, 0);
354 rt2x00_set_field32(&reg, INT_MASK_CSR_MCU_COMMAND, mask); 362 rt2x00_set_field32(&reg, INT_MASK_CSR_MCU_COMMAND, 0);
355 rt2x00_set_field32(&reg, INT_MASK_CSR_RXTX_COHERENT, mask); 363 rt2x00_set_field32(&reg, INT_MASK_CSR_RXTX_COHERENT, 0);
356 rt2x00_set_field32(&reg, INT_MASK_CSR_TBTT, mask); 364 rt2x00_set_field32(&reg, INT_MASK_CSR_TBTT, mask);
357 rt2x00_set_field32(&reg, INT_MASK_CSR_PRE_TBTT, mask); 365 rt2x00_set_field32(&reg, INT_MASK_CSR_PRE_TBTT, mask);
358 rt2x00_set_field32(&reg, INT_MASK_CSR_TX_FIFO_STATUS, mask); 366 rt2x00_set_field32(&reg, INT_MASK_CSR_TX_FIFO_STATUS, mask);
359 rt2x00_set_field32(&reg, INT_MASK_CSR_AUTO_WAKEUP, mask); 367 rt2x00_set_field32(&reg, INT_MASK_CSR_AUTO_WAKEUP, mask);
360 rt2x00_set_field32(&reg, INT_MASK_CSR_GPTIMER, mask); 368 rt2x00_set_field32(&reg, INT_MASK_CSR_GPTIMER, 0);
361 rt2x00_set_field32(&reg, INT_MASK_CSR_RX_COHERENT, mask); 369 rt2x00_set_field32(&reg, INT_MASK_CSR_RX_COHERENT, 0);
362 rt2x00_set_field32(&reg, INT_MASK_CSR_TX_COHERENT, mask); 370 rt2x00_set_field32(&reg, INT_MASK_CSR_TX_COHERENT, 0);
363 rt2800_register_write(rt2x00dev, INT_MASK_CSR, reg); 371 rt2800_register_write(rt2x00dev, INT_MASK_CSR, reg);
364} 372}
365 373
@@ -565,7 +573,7 @@ static void rt2800pci_kick_tx_queue(struct data_queue *queue)
565{ 573{
566 struct rt2x00_dev *rt2x00dev = queue->rt2x00dev; 574 struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
567 struct queue_entry *entry = rt2x00queue_get_entry(queue, Q_INDEX); 575 struct queue_entry *entry = rt2x00queue_get_entry(queue, Q_INDEX);
568 unsigned int qidx = 0; 576 unsigned int qidx;
569 577
570 if (queue->qid == QID_MGMT) 578 if (queue->qid == QID_MGMT)
571 qidx = 5; 579 qidx = 5;
@@ -599,7 +607,6 @@ static void rt2800pci_kill_tx_queue(struct data_queue *queue)
599static void rt2800pci_fill_rxdone(struct queue_entry *entry, 607static void rt2800pci_fill_rxdone(struct queue_entry *entry,
600 struct rxdone_entry_desc *rxdesc) 608 struct rxdone_entry_desc *rxdesc)
601{ 609{
602 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
603 struct queue_entry_priv_pci *entry_priv = entry->priv_data; 610 struct queue_entry_priv_pci *entry_priv = entry->priv_data;
604 __le32 *rxd = entry_priv->desc; 611 __le32 *rxd = entry_priv->desc;
605 u32 word; 612 u32 word;
@@ -641,12 +648,6 @@ static void rt2800pci_fill_rxdone(struct queue_entry *entry,
641 * Process the RXWI structure that is at the start of the buffer. 648 * Process the RXWI structure that is at the start of the buffer.
642 */ 649 */
643 rt2800_process_rxwi(entry, rxdesc); 650 rt2800_process_rxwi(entry, rxdesc);
644
645 /*
646 * Set RX IDX in register to inform hardware that we have handled
647 * this entry and it is available for reuse again.
648 */
649 rt2800_register_write(rt2x00dev, RX_CRX_IDX, entry->entry_idx);
650} 651}
651 652
652/* 653/*
@@ -660,6 +661,63 @@ static void rt2800pci_wakeup(struct rt2x00_dev *rt2x00dev)
660 rt2800_config(rt2x00dev, &libconf, IEEE80211_CONF_CHANGE_PS); 661 rt2800_config(rt2x00dev, &libconf, IEEE80211_CONF_CHANGE_PS);
661} 662}
662 663
664static void rt2800pci_txdone(struct rt2x00_dev *rt2x00dev)
665{
666 struct data_queue *queue;
667 struct queue_entry *entry;
668 u32 status;
669 u8 qid;
670
671 while (!kfifo_is_empty(&rt2x00dev->txstatus_fifo)) {
672 /* Now remove the tx status from the FIFO */
673 if (kfifo_out(&rt2x00dev->txstatus_fifo, &status,
674 sizeof(status)) != sizeof(status)) {
675 WARN_ON(1);
676 break;
677 }
678
679 qid = rt2x00_get_field32(status, TX_STA_FIFO_PID_QUEUE);
680 if (qid >= QID_RX) {
681 /*
682 * Unknown queue, this shouldn't happen. Just drop
683 * this tx status.
684 */
685 WARNING(rt2x00dev, "Got TX status report with "
686 "unexpected pid %u, dropping", qid);
687 break;
688 }
689
690 queue = rt2x00queue_get_queue(rt2x00dev, qid);
691 if (unlikely(queue == NULL)) {
692 /*
693 * The queue is NULL, this shouldn't happen. Stop
694 * processing here and drop the tx status
695 */
696 WARNING(rt2x00dev, "Got TX status for an unavailable "
697 "queue %u, dropping", qid);
698 break;
699 }
700
701 if (rt2x00queue_empty(queue)) {
702 /*
703 * The queue is empty. Stop processing here
704 * and drop the tx status.
705 */
706 WARNING(rt2x00dev, "Got TX status for an empty "
707 "queue %u, dropping", qid);
708 break;
709 }
710
711 entry = rt2x00queue_get_entry(queue, Q_INDEX_DONE);
712 rt2800_txdone_entry(entry, status);
713 }
714}
715
716static void rt2800pci_txstatus_tasklet(unsigned long data)
717{
718 rt2800pci_txdone((struct rt2x00_dev *)data);
719}
720
663static irqreturn_t rt2800pci_interrupt_thread(int irq, void *dev_instance) 721static irqreturn_t rt2800pci_interrupt_thread(int irq, void *dev_instance)
664{ 722{
665 struct rt2x00_dev *rt2x00dev = dev_instance; 723 struct rt2x00_dev *rt2x00dev = dev_instance;
@@ -684,13 +742,7 @@ static irqreturn_t rt2800pci_interrupt_thread(int irq, void *dev_instance)
684 rt2x00pci_rxdone(rt2x00dev); 742 rt2x00pci_rxdone(rt2x00dev);
685 743
686 /* 744 /*
687 * 4 - Tx done interrupt. 745 * 4 - Auto wakeup interrupt.
688 */
689 if (rt2x00_get_field32(reg, INT_SOURCE_CSR_TX_FIFO_STATUS))
690 rt2800_txdone(rt2x00dev);
691
692 /*
693 * 5 - Auto wakeup interrupt.
694 */ 746 */
695 if (rt2x00_get_field32(reg, INT_SOURCE_CSR_AUTO_WAKEUP)) 747 if (rt2x00_get_field32(reg, INT_SOURCE_CSR_AUTO_WAKEUP))
696 rt2800pci_wakeup(rt2x00dev); 748 rt2800pci_wakeup(rt2x00dev);
@@ -702,10 +754,58 @@ static irqreturn_t rt2800pci_interrupt_thread(int irq, void *dev_instance)
702 return IRQ_HANDLED; 754 return IRQ_HANDLED;
703} 755}
704 756
757static void rt2800pci_txstatus_interrupt(struct rt2x00_dev *rt2x00dev)
758{
759 u32 status;
760 int i;
761
762 /*
763 * The TX_FIFO_STATUS interrupt needs special care. We should
764 * read TX_STA_FIFO but we should do it immediately as otherwise
765 * the register can overflow and we would lose status reports.
766 *
767 * Hence, read the TX_STA_FIFO register and copy all tx status
768 * reports into a kernel FIFO which is handled in the txstatus
769 * tasklet. We use a tasklet to process the tx status reports
770 * because we can schedule the tasklet multiple times (when the
771 * interrupt fires again during tx status processing).
772 *
773 * Furthermore we don't disable the TX_FIFO_STATUS
774 * interrupt here but leave it enabled so that the TX_STA_FIFO
775 * can also be read while the interrupt thread gets executed.
776 *
777 * Since we have only one producer and one consumer we don't
778 * need to lock the kfifo.
779 */
780 for (i = 0; i < TX_ENTRIES; i++) {
781 rt2800_register_read(rt2x00dev, TX_STA_FIFO, &status);
782
783 if (!rt2x00_get_field32(status, TX_STA_FIFO_VALID))
784 break;
785
786 if (kfifo_is_full(&rt2x00dev->txstatus_fifo)) {
787 WARNING(rt2x00dev, "TX status FIFO overrun,"
788 " drop tx status report.\n");
789 break;
790 }
791
792 if (kfifo_in(&rt2x00dev->txstatus_fifo, &status,
793 sizeof(status)) != sizeof(status)) {
794 WARNING(rt2x00dev, "TX status FIFO overrun,"
795 "drop tx status report.\n");
796 break;
797 }
798 }
799
800 /* Schedule the tasklet for processing the tx status. */
801 tasklet_schedule(&rt2x00dev->txstatus_tasklet);
802}
803
705static irqreturn_t rt2800pci_interrupt(int irq, void *dev_instance) 804static irqreturn_t rt2800pci_interrupt(int irq, void *dev_instance)
706{ 805{
707 struct rt2x00_dev *rt2x00dev = dev_instance; 806 struct rt2x00_dev *rt2x00dev = dev_instance;
708 u32 reg; 807 u32 reg;
808 irqreturn_t ret = IRQ_HANDLED;
709 809
710 /* Read status and ACK all interrupts */ 810 /* Read status and ACK all interrupts */
711 rt2800_register_read(rt2x00dev, INT_SOURCE_CSR, &reg); 811 rt2800_register_read(rt2x00dev, INT_SOURCE_CSR, &reg);
@@ -717,15 +817,38 @@ static irqreturn_t rt2800pci_interrupt(int irq, void *dev_instance)
717 if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags)) 817 if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
718 return IRQ_HANDLED; 818 return IRQ_HANDLED;
719 819
720 /* Store irqvalue for use in the interrupt thread. */ 820 if (rt2x00_get_field32(reg, INT_SOURCE_CSR_TX_FIFO_STATUS))
721 rt2x00dev->irqvalue[0] = reg; 821 rt2800pci_txstatus_interrupt(rt2x00dev);
722 822
723 /* Disable interrupts, will be enabled again in the interrupt thread. */ 823 if (rt2x00_get_field32(reg, INT_SOURCE_CSR_PRE_TBTT) ||
724 rt2x00dev->ops->lib->set_device_state(rt2x00dev, 824 rt2x00_get_field32(reg, INT_SOURCE_CSR_TBTT) ||
725 STATE_RADIO_IRQ_OFF_ISR); 825 rt2x00_get_field32(reg, INT_SOURCE_CSR_RX_DONE) ||
826 rt2x00_get_field32(reg, INT_SOURCE_CSR_AUTO_WAKEUP)) {
827 /*
828 * All other interrupts are handled in the interrupt thread.
829 * Store irqvalue for use in the interrupt thread.
830 */
831 rt2x00dev->irqvalue[0] = reg;
832
833 /*
834 * Disable interrupts, will be enabled again in the
835 * interrupt thread.
836 */
837 rt2x00dev->ops->lib->set_device_state(rt2x00dev,
838 STATE_RADIO_IRQ_OFF_ISR);
839
840 /*
841 * Leave the TX_FIFO_STATUS interrupt enabled to not lose any
842 * tx status reports.
843 */
844 rt2800_register_read(rt2x00dev, INT_MASK_CSR, &reg);
845 rt2x00_set_field32(&reg, INT_MASK_CSR_TX_FIFO_STATUS, 1);
846 rt2800_register_write(rt2x00dev, INT_MASK_CSR, reg);
726 847
848 ret = IRQ_WAKE_THREAD;
849 }
727 850
728 return IRQ_WAKE_THREAD; 851 return ret;
729} 852}
730 853
731/* 854/*
@@ -788,6 +911,7 @@ static int rt2800pci_probe_hw(struct rt2x00_dev *rt2x00dev)
788 __set_bit(DRIVER_REQUIRE_FIRMWARE, &rt2x00dev->flags); 911 __set_bit(DRIVER_REQUIRE_FIRMWARE, &rt2x00dev->flags);
789 __set_bit(DRIVER_REQUIRE_DMA, &rt2x00dev->flags); 912 __set_bit(DRIVER_REQUIRE_DMA, &rt2x00dev->flags);
790 __set_bit(DRIVER_REQUIRE_L2PAD, &rt2x00dev->flags); 913 __set_bit(DRIVER_REQUIRE_L2PAD, &rt2x00dev->flags);
914 __set_bit(DRIVER_REQUIRE_TXSTATUS_FIFO, &rt2x00dev->flags);
791 if (!modparam_nohwcrypt) 915 if (!modparam_nohwcrypt)
792 __set_bit(CONFIG_SUPPORT_HW_CRYPTO, &rt2x00dev->flags); 916 __set_bit(CONFIG_SUPPORT_HW_CRYPTO, &rt2x00dev->flags);
793 __set_bit(DRIVER_SUPPORT_LINK_TUNING, &rt2x00dev->flags); 917 __set_bit(DRIVER_SUPPORT_LINK_TUNING, &rt2x00dev->flags);
@@ -837,6 +961,7 @@ static const struct rt2800_ops rt2800pci_rt2800_ops = {
837static const struct rt2x00lib_ops rt2800pci_rt2x00_ops = { 961static const struct rt2x00lib_ops rt2800pci_rt2x00_ops = {
838 .irq_handler = rt2800pci_interrupt, 962 .irq_handler = rt2800pci_interrupt,
839 .irq_handler_thread = rt2800pci_interrupt_thread, 963 .irq_handler_thread = rt2800pci_interrupt_thread,
964 .txstatus_tasklet = rt2800pci_txstatus_tasklet,
840 .probe_hw = rt2800pci_probe_hw, 965 .probe_hw = rt2800pci_probe_hw,
841 .get_firmware_name = rt2800pci_get_firmware_name, 966 .get_firmware_name = rt2800pci_get_firmware_name,
842 .check_firmware = rt2800_check_firmware, 967 .check_firmware = rt2800_check_firmware,
diff --git a/drivers/net/wireless/rt2x00/rt2x00.h b/drivers/net/wireless/rt2x00/rt2x00.h
index 0ae942cb66df..94fe589acfaa 100644
--- a/drivers/net/wireless/rt2x00/rt2x00.h
+++ b/drivers/net/wireless/rt2x00/rt2x00.h
@@ -36,6 +36,7 @@
36#include <linux/mutex.h> 36#include <linux/mutex.h>
37#include <linux/etherdevice.h> 37#include <linux/etherdevice.h>
38#include <linux/input-polldev.h> 38#include <linux/input-polldev.h>
39#include <linux/kfifo.h>
39 40
40#include <net/mac80211.h> 41#include <net/mac80211.h>
41 42
@@ -337,6 +338,11 @@ struct link {
337 338
338 /* 339 /*
339 * Work structure for scheduling periodic watchdog monitoring. 340 * Work structure for scheduling periodic watchdog monitoring.
341 * This work must be scheduled on the kernel workqueue, while
342 * all other work structures must be queued on the mac80211
343 * workqueue. This guarantees that the watchdog can schedule
344 * other work structures and wait for their completion in order
345 * to bring the device/driver back into the desired state.
340 */ 346 */
341 struct delayed_work watchdog_work; 347 struct delayed_work watchdog_work;
342}; 348};
@@ -457,6 +463,7 @@ struct rt2x00lib_erp {
457 short eifs; 463 short eifs;
458 464
459 u16 beacon_int; 465 u16 beacon_int;
466 u16 ht_opmode;
460}; 467};
461 468
462/* 469/*
@@ -522,6 +529,11 @@ struct rt2x00lib_ops {
522 irq_handler_t irq_handler_thread; 529 irq_handler_t irq_handler_thread;
523 530
524 /* 531 /*
532 * TX status tasklet handler.
533 */
534 void (*txstatus_tasklet) (unsigned long data);
535
536 /*
525 * Device init handlers. 537 * Device init handlers.
526 */ 538 */
527 int (*probe_hw) (struct rt2x00_dev *rt2x00dev); 539 int (*probe_hw) (struct rt2x00_dev *rt2x00dev);
@@ -596,7 +608,8 @@ struct rt2x00lib_ops {
596#define CONFIG_UPDATE_BSSID ( 1 << 3 ) 608#define CONFIG_UPDATE_BSSID ( 1 << 3 )
597 609
598 void (*config_erp) (struct rt2x00_dev *rt2x00dev, 610 void (*config_erp) (struct rt2x00_dev *rt2x00dev,
599 struct rt2x00lib_erp *erp); 611 struct rt2x00lib_erp *erp,
612 u32 changed);
600 void (*config_ant) (struct rt2x00_dev *rt2x00dev, 613 void (*config_ant) (struct rt2x00_dev *rt2x00dev,
601 struct antenna_setup *ant); 614 struct antenna_setup *ant);
602 void (*config) (struct rt2x00_dev *rt2x00dev, 615 void (*config) (struct rt2x00_dev *rt2x00dev,
@@ -650,6 +663,7 @@ enum rt2x00_flags {
650 DRIVER_REQUIRE_DMA, 663 DRIVER_REQUIRE_DMA,
651 DRIVER_REQUIRE_COPY_IV, 664 DRIVER_REQUIRE_COPY_IV,
652 DRIVER_REQUIRE_L2PAD, 665 DRIVER_REQUIRE_L2PAD,
666 DRIVER_REQUIRE_TXSTATUS_FIFO,
653 667
654 /* 668 /*
655 * Driver features 669 * Driver features
@@ -883,6 +897,16 @@ struct rt2x00_dev {
883 * and interrupt thread routine. 897 * and interrupt thread routine.
884 */ 898 */
885 u32 irqvalue[2]; 899 u32 irqvalue[2];
900
901 /*
902 * FIFO for storing tx status reports between isr and tasklet.
903 */
904 struct kfifo txstatus_fifo;
905
906 /*
907 * Tasklet for processing tx status reports (rt2800pci).
908 */
909 struct tasklet_struct txstatus_tasklet;
886}; 910};
887 911
888/* 912/*
@@ -1017,17 +1041,15 @@ static inline bool rt2x00_is_soc(struct rt2x00_dev *rt2x00dev)
1017 1041
1018/** 1042/**
1019 * rt2x00queue_map_txskb - Map a skb into DMA for TX purposes. 1043 * rt2x00queue_map_txskb - Map a skb into DMA for TX purposes.
1020 * @rt2x00dev: Pointer to &struct rt2x00_dev. 1044 * @entry: Pointer to &struct queue_entry
1021 * @skb: The skb to map.
1022 */ 1045 */
1023void rt2x00queue_map_txskb(struct rt2x00_dev *rt2x00dev, struct sk_buff *skb); 1046void rt2x00queue_map_txskb(struct queue_entry *entry);
1024 1047
1025/** 1048/**
1026 * rt2x00queue_unmap_skb - Unmap a skb from DMA. 1049 * rt2x00queue_unmap_skb - Unmap a skb from DMA.
1027 * @rt2x00dev: Pointer to &struct rt2x00_dev. 1050 * @entry: Pointer to &struct queue_entry
1028 * @skb: The skb to unmap.
1029 */ 1051 */
1030void rt2x00queue_unmap_skb(struct rt2x00_dev *rt2x00dev, struct sk_buff *skb); 1052void rt2x00queue_unmap_skb(struct queue_entry *entry);
1031 1053
1032/** 1054/**
1033 * rt2x00queue_get_queue - Convert queue index to queue pointer 1055 * rt2x00queue_get_queue - Convert queue index to queue pointer
@@ -1074,8 +1096,7 @@ void rt2x00lib_dmadone(struct queue_entry *entry);
1074void rt2x00lib_txdone(struct queue_entry *entry, 1096void rt2x00lib_txdone(struct queue_entry *entry,
1075 struct txdone_entry_desc *txdesc); 1097 struct txdone_entry_desc *txdesc);
1076void rt2x00lib_txdone_noinfo(struct queue_entry *entry, u32 status); 1098void rt2x00lib_txdone_noinfo(struct queue_entry *entry, u32 status);
1077void rt2x00lib_rxdone(struct rt2x00_dev *rt2x00dev, 1099void rt2x00lib_rxdone(struct queue_entry *entry);
1078 struct queue_entry *entry);
1079 1100
1080/* 1101/*
1081 * mac80211 handlers. 1102 * mac80211 handlers.
diff --git a/drivers/net/wireless/rt2x00/rt2x00config.c b/drivers/net/wireless/rt2x00/rt2x00config.c
index 34f34fa7f53a..54ffb5aeb34e 100644
--- a/drivers/net/wireless/rt2x00/rt2x00config.c
+++ b/drivers/net/wireless/rt2x00/rt2x00config.c
@@ -81,7 +81,8 @@ void rt2x00lib_config_intf(struct rt2x00_dev *rt2x00dev,
81 81
82void rt2x00lib_config_erp(struct rt2x00_dev *rt2x00dev, 82void rt2x00lib_config_erp(struct rt2x00_dev *rt2x00dev,
83 struct rt2x00_intf *intf, 83 struct rt2x00_intf *intf,
84 struct ieee80211_bss_conf *bss_conf) 84 struct ieee80211_bss_conf *bss_conf,
85 u32 changed)
85{ 86{
86 struct rt2x00lib_erp erp; 87 struct rt2x00lib_erp erp;
87 88
@@ -102,7 +103,10 @@ void rt2x00lib_config_erp(struct rt2x00_dev *rt2x00dev,
102 /* Update global beacon interval time, this is needed for PS support */ 103 /* Update global beacon interval time, this is needed for PS support */
103 rt2x00dev->beacon_int = bss_conf->beacon_int; 104 rt2x00dev->beacon_int = bss_conf->beacon_int;
104 105
105 rt2x00dev->ops->lib->config_erp(rt2x00dev, &erp); 106 if (changed & BSS_CHANGED_HT)
107 erp.ht_opmode = bss_conf->ht_operation_mode;
108
109 rt2x00dev->ops->lib->config_erp(rt2x00dev, &erp, changed);
106} 110}
107 111
108static inline 112static inline
@@ -129,12 +133,12 @@ void rt2x00lib_config_antenna(struct rt2x00_dev *rt2x00dev,
129 */ 133 */
130 if (!(ant->flags & ANTENNA_RX_DIVERSITY)) 134 if (!(ant->flags & ANTENNA_RX_DIVERSITY))
131 config.rx = rt2x00lib_config_antenna_check(config.rx, def->rx); 135 config.rx = rt2x00lib_config_antenna_check(config.rx, def->rx);
132 else 136 else if(config.rx == ANTENNA_SW_DIVERSITY)
133 config.rx = active->rx; 137 config.rx = active->rx;
134 138
135 if (!(ant->flags & ANTENNA_TX_DIVERSITY)) 139 if (!(ant->flags & ANTENNA_TX_DIVERSITY))
136 config.tx = rt2x00lib_config_antenna_check(config.tx, def->tx); 140 config.tx = rt2x00lib_config_antenna_check(config.tx, def->tx);
137 else 141 else if (config.tx == ANTENNA_SW_DIVERSITY)
138 config.tx = active->tx; 142 config.tx = active->tx;
139 143
140 /* 144 /*
diff --git a/drivers/net/wireless/rt2x00/rt2x00debug.c b/drivers/net/wireless/rt2x00/rt2x00debug.c
index b8cf45c4e9f5..c1710b27ba70 100644
--- a/drivers/net/wireless/rt2x00/rt2x00debug.c
+++ b/drivers/net/wireless/rt2x00/rt2x00debug.c
@@ -380,7 +380,7 @@ static ssize_t rt2x00debug_read_crypto_stats(struct file *file,
380 loff_t *offset) 380 loff_t *offset)
381{ 381{
382 struct rt2x00debug_intf *intf = file->private_data; 382 struct rt2x00debug_intf *intf = file->private_data;
383 char *name[] = { "WEP64", "WEP128", "TKIP", "AES" }; 383 static const char * const name[] = { "WEP64", "WEP128", "TKIP", "AES" };
384 char *data; 384 char *data;
385 char *temp; 385 char *temp;
386 size_t size; 386 size_t size;
diff --git a/drivers/net/wireless/rt2x00/rt2x00dev.c b/drivers/net/wireless/rt2x00/rt2x00dev.c
index 053fdd3bd720..5ba79b935f09 100644
--- a/drivers/net/wireless/rt2x00/rt2x00dev.c
+++ b/drivers/net/wireless/rt2x00/rt2x00dev.c
@@ -253,6 +253,7 @@ EXPORT_SYMBOL_GPL(rt2x00lib_pretbtt);
253 253
254void rt2x00lib_dmadone(struct queue_entry *entry) 254void rt2x00lib_dmadone(struct queue_entry *entry)
255{ 255{
256 clear_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags);
256 rt2x00queue_index_inc(entry->queue, Q_INDEX_DMA_DONE); 257 rt2x00queue_index_inc(entry->queue, Q_INDEX_DMA_DONE);
257} 258}
258EXPORT_SYMBOL_GPL(rt2x00lib_dmadone); 259EXPORT_SYMBOL_GPL(rt2x00lib_dmadone);
@@ -273,7 +274,7 @@ void rt2x00lib_txdone(struct queue_entry *entry,
273 /* 274 /*
274 * Unmap the skb. 275 * Unmap the skb.
275 */ 276 */
276 rt2x00queue_unmap_skb(rt2x00dev, entry->skb); 277 rt2x00queue_unmap_skb(entry);
277 278
278 /* 279 /*
279 * Remove the extra tx headroom from the skb. 280 * Remove the extra tx headroom from the skb.
@@ -432,42 +433,50 @@ static int rt2x00lib_rxdone_read_signal(struct rt2x00_dev *rt2x00dev,
432 struct ieee80211_supported_band *sband; 433 struct ieee80211_supported_band *sband;
433 const struct rt2x00_rate *rate; 434 const struct rt2x00_rate *rate;
434 unsigned int i; 435 unsigned int i;
435 int signal; 436 int signal = rxdesc->signal;
436 int type; 437 int type = (rxdesc->dev_flags & RXDONE_SIGNAL_MASK);
437 438
438 /* 439 switch (rxdesc->rate_mode) {
439 * For non-HT rates the MCS value needs to contain the 440 case RATE_MODE_CCK:
440 * actually used rate modulation (CCK or OFDM). 441 case RATE_MODE_OFDM:
441 */ 442 /*
442 if (rxdesc->dev_flags & RXDONE_SIGNAL_MCS) 443 * For non-HT rates the MCS value needs to contain the
443 signal = RATE_MCS(rxdesc->rate_mode, rxdesc->signal); 444 * actually used rate modulation (CCK or OFDM).
444 else 445 */
445 signal = rxdesc->signal; 446 if (rxdesc->dev_flags & RXDONE_SIGNAL_MCS)
446 447 signal = RATE_MCS(rxdesc->rate_mode, signal);
447 type = (rxdesc->dev_flags & RXDONE_SIGNAL_MASK); 448
448 449 sband = &rt2x00dev->bands[rt2x00dev->curr_band];
449 sband = &rt2x00dev->bands[rt2x00dev->curr_band]; 450 for (i = 0; i < sband->n_bitrates; i++) {
450 for (i = 0; i < sband->n_bitrates; i++) { 451 rate = rt2x00_get_rate(sband->bitrates[i].hw_value);
451 rate = rt2x00_get_rate(sband->bitrates[i].hw_value); 452 if (((type == RXDONE_SIGNAL_PLCP) &&
452 453 (rate->plcp == signal)) ||
453 if (((type == RXDONE_SIGNAL_PLCP) && 454 ((type == RXDONE_SIGNAL_BITRATE) &&
454 (rate->plcp == signal)) || 455 (rate->bitrate == signal)) ||
455 ((type == RXDONE_SIGNAL_BITRATE) && 456 ((type == RXDONE_SIGNAL_MCS) &&
456 (rate->bitrate == signal)) || 457 (rate->mcs == signal))) {
457 ((type == RXDONE_SIGNAL_MCS) && 458 return i;
458 (rate->mcs == signal))) { 459 }
459 return i;
460 } 460 }
461 break;
462 case RATE_MODE_HT_MIX:
463 case RATE_MODE_HT_GREENFIELD:
464 if (signal >= 0 && signal <= 76)
465 return signal;
466 break;
467 default:
468 break;
461 } 469 }
462 470
463 WARNING(rt2x00dev, "Frame received with unrecognized signal, " 471 WARNING(rt2x00dev, "Frame received with unrecognized signal, "
464 "signal=0x%.4x, type=%d.\n", signal, type); 472 "mode=0x%.4x, signal=0x%.4x, type=%d.\n",
473 rxdesc->rate_mode, signal, type);
465 return 0; 474 return 0;
466} 475}
467 476
468void rt2x00lib_rxdone(struct rt2x00_dev *rt2x00dev, 477void rt2x00lib_rxdone(struct queue_entry *entry)
469 struct queue_entry *entry)
470{ 478{
479 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
471 struct rxdone_entry_desc rxdesc; 480 struct rxdone_entry_desc rxdesc;
472 struct sk_buff *skb; 481 struct sk_buff *skb;
473 struct ieee80211_rx_status *rx_status; 482 struct ieee80211_rx_status *rx_status;
@@ -481,14 +490,14 @@ void rt2x00lib_rxdone(struct rt2x00_dev *rt2x00dev,
481 * Allocate a new sk_buffer. If no new buffer available, drop the 490 * Allocate a new sk_buffer. If no new buffer available, drop the
482 * received frame and reuse the existing buffer. 491 * received frame and reuse the existing buffer.
483 */ 492 */
484 skb = rt2x00queue_alloc_rxskb(rt2x00dev, entry); 493 skb = rt2x00queue_alloc_rxskb(entry);
485 if (!skb) 494 if (!skb)
486 return; 495 goto submit_entry;
487 496
488 /* 497 /*
489 * Unmap the skb. 498 * Unmap the skb.
490 */ 499 */
491 rt2x00queue_unmap_skb(rt2x00dev, entry->skb); 500 rt2x00queue_unmap_skb(entry);
492 501
493 /* 502 /*
494 * Extract the RXD details. 503 * Extract the RXD details.
@@ -523,18 +532,12 @@ void rt2x00lib_rxdone(struct rt2x00_dev *rt2x00dev,
523 skb_trim(entry->skb, rxdesc.size); 532 skb_trim(entry->skb, rxdesc.size);
524 533
525 /* 534 /*
526 * Check if the frame was received using HT. In that case, 535 * Translate the signal to the correct bitrate index.
527 * the rate is the MCS index and should be passed to mac80211
528 * directly. Otherwise we need to translate the signal to
529 * the correct bitrate index.
530 */ 536 */
531 if (rxdesc.rate_mode == RATE_MODE_CCK || 537 rate_idx = rt2x00lib_rxdone_read_signal(rt2x00dev, &rxdesc);
532 rxdesc.rate_mode == RATE_MODE_OFDM) { 538 if (rxdesc.rate_mode == RATE_MODE_HT_MIX ||
533 rate_idx = rt2x00lib_rxdone_read_signal(rt2x00dev, &rxdesc); 539 rxdesc.rate_mode == RATE_MODE_HT_GREENFIELD)
534 } else {
535 rxdesc.flags |= RX_FLAG_HT; 540 rxdesc.flags |= RX_FLAG_HT;
536 rate_idx = rxdesc.signal;
537 }
538 541
539 /* 542 /*
540 * Update extra components 543 * Update extra components
@@ -813,6 +816,30 @@ static int rt2x00lib_probe_hw(struct rt2x00_dev *rt2x00dev)
813 rt2x00dev->hw->extra_tx_headroom += RT2X00_ALIGN_SIZE; 816 rt2x00dev->hw->extra_tx_headroom += RT2X00_ALIGN_SIZE;
814 817
815 /* 818 /*
819 * Allocate tx status FIFO for driver use.
820 */
821 if (test_bit(DRIVER_REQUIRE_TXSTATUS_FIFO, &rt2x00dev->flags) &&
822 rt2x00dev->ops->lib->txstatus_tasklet) {
823 /*
824 * Allocate txstatus fifo and tasklet, we use a size of 512
825 * for the kfifo which is big enough to store 512/4=128 tx
826 * status reports. In the worst case (tx status for all tx
827 * queues gets reported before we've got a chance to handle
828 * them) 24*4=384 tx status reports need to be cached.
829 */
830 status = kfifo_alloc(&rt2x00dev->txstatus_fifo, 512,
831 GFP_KERNEL);
832 if (status)
833 return status;
834
835 /* tasklet for processing the tx status reports. */
836 tasklet_init(&rt2x00dev->txstatus_tasklet,
837 rt2x00dev->ops->lib->txstatus_tasklet,
838 (unsigned long)rt2x00dev);
839
840 }
841
842 /*
816 * Register HW. 843 * Register HW.
817 */ 844 */
818 status = ieee80211_register_hw(rt2x00dev->hw); 845 status = ieee80211_register_hw(rt2x00dev->hw);
@@ -909,10 +936,8 @@ int rt2x00lib_start(struct rt2x00_dev *rt2x00dev)
909 936
910 /* Enable the radio */ 937 /* Enable the radio */
911 retval = rt2x00lib_enable_radio(rt2x00dev); 938 retval = rt2x00lib_enable_radio(rt2x00dev);
912 if (retval) { 939 if (retval)
913 rt2x00queue_uninitialize(rt2x00dev);
914 return retval; 940 return retval;
915 }
916 941
917 set_bit(DEVICE_STATE_STARTED, &rt2x00dev->flags); 942 set_bit(DEVICE_STATE_STARTED, &rt2x00dev->flags);
918 943
@@ -1028,6 +1053,16 @@ void rt2x00lib_remove_dev(struct rt2x00_dev *rt2x00dev)
1028 cancel_work_sync(&rt2x00dev->txdone_work); 1053 cancel_work_sync(&rt2x00dev->txdone_work);
1029 1054
1030 /* 1055 /*
1056 * Free the tx status fifo.
1057 */
1058 kfifo_free(&rt2x00dev->txstatus_fifo);
1059
1060 /*
1061 * Kill the tx status tasklet.
1062 */
1063 tasklet_kill(&rt2x00dev->txstatus_tasklet);
1064
1065 /*
1031 * Uninitialize device. 1066 * Uninitialize device.
1032 */ 1067 */
1033 rt2x00lib_uninitialize(rt2x00dev); 1068 rt2x00lib_uninitialize(rt2x00dev);
diff --git a/drivers/net/wireless/rt2x00/rt2x00ht.c b/drivers/net/wireless/rt2x00/rt2x00ht.c
index ad3c7ff4837b..c637bcaec5f8 100644
--- a/drivers/net/wireless/rt2x00/rt2x00ht.c
+++ b/drivers/net/wireless/rt2x00/rt2x00ht.c
@@ -60,9 +60,10 @@ void rt2x00ht_create_tx_descriptor(struct queue_entry *entry,
60 * when using more then one tx stream (>MCS7). 60 * when using more then one tx stream (>MCS7).
61 */ 61 */
62 if (tx_info->control.sta && txdesc->mcs > 7 && 62 if (tx_info->control.sta && txdesc->mcs > 7 &&
63 (tx_info->control.sta->ht_cap.cap & 63 ((tx_info->control.sta->ht_cap.cap &
64 (WLAN_HT_CAP_SM_PS_DYNAMIC << 64 IEEE80211_HT_CAP_SM_PS) >>
65 IEEE80211_HT_CAP_SM_PS_SHIFT))) 65 IEEE80211_HT_CAP_SM_PS_SHIFT) ==
66 WLAN_HT_CAP_SM_PS_DYNAMIC)
66 __set_bit(ENTRY_TXD_HT_MIMO_PS, &txdesc->flags); 67 __set_bit(ENTRY_TXD_HT_MIMO_PS, &txdesc->flags);
67 } else { 68 } else {
68 txdesc->mcs = rt2x00_get_rate_mcs(hwrate->mcs); 69 txdesc->mcs = rt2x00_get_rate_mcs(hwrate->mcs);
@@ -72,9 +73,11 @@ void rt2x00ht_create_tx_descriptor(struct queue_entry *entry,
72 73
73 74
74 /* 75 /*
75 * Convert flags 76 * This frame is eligible for an AMPDU, however, don't aggregate
77 * frames that are intended to probe a specific tx rate.
76 */ 78 */
77 if (tx_info->flags & IEEE80211_TX_CTL_AMPDU) 79 if (tx_info->flags & IEEE80211_TX_CTL_AMPDU &&
80 !(tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE))
78 __set_bit(ENTRY_TXD_HT_AMPDU, &txdesc->flags); 81 __set_bit(ENTRY_TXD_HT_AMPDU, &txdesc->flags);
79 82
80 /* 83 /*
@@ -84,7 +87,13 @@ void rt2x00ht_create_tx_descriptor(struct queue_entry *entry,
84 txdesc->rate_mode = RATE_MODE_HT_MIX; 87 txdesc->rate_mode = RATE_MODE_HT_MIX;
85 if (txrate->flags & IEEE80211_TX_RC_GREEN_FIELD) 88 if (txrate->flags & IEEE80211_TX_RC_GREEN_FIELD)
86 txdesc->rate_mode = RATE_MODE_HT_GREENFIELD; 89 txdesc->rate_mode = RATE_MODE_HT_GREENFIELD;
87 if (txrate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH) 90
91 /*
92 * Set 40Mhz mode if necessary (for legacy rates this will
93 * duplicate the frame to both channels).
94 */
95 if (txrate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH ||
96 txrate->flags & IEEE80211_TX_RC_DUP_DATA)
88 __set_bit(ENTRY_TXD_HT_BW_40, &txdesc->flags); 97 __set_bit(ENTRY_TXD_HT_BW_40, &txdesc->flags);
89 if (txrate->flags & IEEE80211_TX_RC_SHORT_GI) 98 if (txrate->flags & IEEE80211_TX_RC_SHORT_GI)
90 __set_bit(ENTRY_TXD_HT_SHORT_GI, &txdesc->flags); 99 __set_bit(ENTRY_TXD_HT_SHORT_GI, &txdesc->flags);
diff --git a/drivers/net/wireless/rt2x00/rt2x00lib.h b/drivers/net/wireless/rt2x00/rt2x00lib.h
index dc5c6574aaf4..619da23b7b56 100644
--- a/drivers/net/wireless/rt2x00/rt2x00lib.h
+++ b/drivers/net/wireless/rt2x00/rt2x00lib.h
@@ -86,7 +86,8 @@ void rt2x00lib_config_intf(struct rt2x00_dev *rt2x00dev,
86 const u8 *mac, const u8 *bssid); 86 const u8 *mac, const u8 *bssid);
87void rt2x00lib_config_erp(struct rt2x00_dev *rt2x00dev, 87void rt2x00lib_config_erp(struct rt2x00_dev *rt2x00dev,
88 struct rt2x00_intf *intf, 88 struct rt2x00_intf *intf,
89 struct ieee80211_bss_conf *conf); 89 struct ieee80211_bss_conf *conf,
90 u32 changed);
90void rt2x00lib_config_antenna(struct rt2x00_dev *rt2x00dev, 91void rt2x00lib_config_antenna(struct rt2x00_dev *rt2x00dev,
91 struct antenna_setup ant); 92 struct antenna_setup ant);
92void rt2x00lib_config(struct rt2x00_dev *rt2x00dev, 93void rt2x00lib_config(struct rt2x00_dev *rt2x00dev,
@@ -99,18 +100,15 @@ void rt2x00lib_config(struct rt2x00_dev *rt2x00dev,
99 100
100/** 101/**
101 * rt2x00queue_alloc_rxskb - allocate a skb for RX purposes. 102 * rt2x00queue_alloc_rxskb - allocate a skb for RX purposes.
102 * @rt2x00dev: Pointer to &struct rt2x00_dev. 103 * @entry: The entry for which the skb will be applicable.
103 * @queue: The queue for which the skb will be applicable.
104 */ 104 */
105struct sk_buff *rt2x00queue_alloc_rxskb(struct rt2x00_dev *rt2x00dev, 105struct sk_buff *rt2x00queue_alloc_rxskb(struct queue_entry *entry);
106 struct queue_entry *entry);
107 106
108/** 107/**
109 * rt2x00queue_free_skb - free a skb 108 * rt2x00queue_free_skb - free a skb
110 * @rt2x00dev: Pointer to &struct rt2x00_dev. 109 * @entry: The entry for which the skb will be applicable.
111 * @skb: The skb to free.
112 */ 110 */
113void rt2x00queue_free_skb(struct rt2x00_dev *rt2x00dev, struct sk_buff *skb); 111void rt2x00queue_free_skb(struct queue_entry *entry);
114 112
115/** 113/**
116 * rt2x00queue_align_frame - Align 802.11 frame to 4-byte boundary 114 * rt2x00queue_align_frame - Align 802.11 frame to 4-byte boundary
diff --git a/drivers/net/wireless/rt2x00/rt2x00link.c b/drivers/net/wireless/rt2x00/rt2x00link.c
index 666cef3f8472..b971d8798ebf 100644
--- a/drivers/net/wireless/rt2x00/rt2x00link.c
+++ b/drivers/net/wireless/rt2x00/rt2x00link.c
@@ -188,7 +188,6 @@ static void rt2x00lib_antenna_diversity_eval(struct rt2x00_dev *rt2x00dev)
188static bool rt2x00lib_antenna_diversity(struct rt2x00_dev *rt2x00dev) 188static bool rt2x00lib_antenna_diversity(struct rt2x00_dev *rt2x00dev)
189{ 189{
190 struct link_ant *ant = &rt2x00dev->link.ant; 190 struct link_ant *ant = &rt2x00dev->link.ant;
191 unsigned int flags = ant->flags;
192 191
193 /* 192 /*
194 * Determine if software diversity is enabled for 193 * Determine if software diversity is enabled for
@@ -196,13 +195,13 @@ static bool rt2x00lib_antenna_diversity(struct rt2x00_dev *rt2x00dev)
196 * Always perform this check since within the link 195 * Always perform this check since within the link
197 * tuner interval the configuration might have changed. 196 * tuner interval the configuration might have changed.
198 */ 197 */
199 flags &= ~ANTENNA_RX_DIVERSITY; 198 ant->flags &= ~ANTENNA_RX_DIVERSITY;
200 flags &= ~ANTENNA_TX_DIVERSITY; 199 ant->flags &= ~ANTENNA_TX_DIVERSITY;
201 200
202 if (rt2x00dev->default_ant.rx == ANTENNA_SW_DIVERSITY) 201 if (rt2x00dev->default_ant.rx == ANTENNA_SW_DIVERSITY)
203 flags |= ANTENNA_RX_DIVERSITY; 202 ant->flags |= ANTENNA_RX_DIVERSITY;
204 if (rt2x00dev->default_ant.tx == ANTENNA_SW_DIVERSITY) 203 if (rt2x00dev->default_ant.tx == ANTENNA_SW_DIVERSITY)
205 flags |= ANTENNA_TX_DIVERSITY; 204 ant->flags |= ANTENNA_TX_DIVERSITY;
206 205
207 if (!(ant->flags & ANTENNA_RX_DIVERSITY) && 206 if (!(ant->flags & ANTENNA_RX_DIVERSITY) &&
208 !(ant->flags & ANTENNA_TX_DIVERSITY)) { 207 !(ant->flags & ANTENNA_TX_DIVERSITY)) {
@@ -210,9 +209,6 @@ static bool rt2x00lib_antenna_diversity(struct rt2x00_dev *rt2x00dev)
210 return true; 209 return true;
211 } 210 }
212 211
213 /* Update flags */
214 ant->flags = flags;
215
216 /* 212 /*
217 * If we have only sampled the data over the last period 213 * If we have only sampled the data over the last period
218 * we should now harvest the data. Otherwise just evaluate 214 * we should now harvest the data. Otherwise just evaluate
@@ -240,6 +236,12 @@ void rt2x00link_update_stats(struct rt2x00_dev *rt2x00dev,
240 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 236 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
241 237
242 /* 238 /*
239 * No need to update the stats for !=STA interfaces
240 */
241 if (!rt2x00dev->intf_sta_count)
242 return;
243
244 /*
243 * Frame was received successfully since non-succesfull 245 * Frame was received successfully since non-succesfull
244 * frames would have been dropped by the hardware. 246 * frames would have been dropped by the hardware.
245 */ 247 */
@@ -415,8 +417,7 @@ void rt2x00link_start_watchdog(struct rt2x00_dev *rt2x00dev)
415 !test_bit(DRIVER_SUPPORT_WATCHDOG, &rt2x00dev->flags)) 417 !test_bit(DRIVER_SUPPORT_WATCHDOG, &rt2x00dev->flags))
416 return; 418 return;
417 419
418 ieee80211_queue_delayed_work(rt2x00dev->hw, 420 schedule_delayed_work(&link->watchdog_work, WATCHDOG_INTERVAL);
419 &link->watchdog_work, WATCHDOG_INTERVAL);
420} 421}
421 422
422void rt2x00link_stop_watchdog(struct rt2x00_dev *rt2x00dev) 423void rt2x00link_stop_watchdog(struct rt2x00_dev *rt2x00dev)
@@ -440,8 +441,7 @@ static void rt2x00link_watchdog(struct work_struct *work)
440 rt2x00dev->ops->lib->watchdog(rt2x00dev); 441 rt2x00dev->ops->lib->watchdog(rt2x00dev);
441 442
442 if (test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags)) 443 if (test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags))
443 ieee80211_queue_delayed_work(rt2x00dev->hw, 444 schedule_delayed_work(&link->watchdog_work, WATCHDOG_INTERVAL);
444 &link->watchdog_work, WATCHDOG_INTERVAL);
445} 445}
446 446
447void rt2x00link_register(struct rt2x00_dev *rt2x00dev) 447void rt2x00link_register(struct rt2x00_dev *rt2x00dev)
diff --git a/drivers/net/wireless/rt2x00/rt2x00mac.c b/drivers/net/wireless/rt2x00/rt2x00mac.c
index 235e037e6509..c3c206a97d54 100644
--- a/drivers/net/wireless/rt2x00/rt2x00mac.c
+++ b/drivers/net/wireless/rt2x00/rt2x00mac.c
@@ -669,8 +669,10 @@ void rt2x00mac_bss_info_changed(struct ieee80211_hw *hw,
669 * When the erp information has changed, we should perform 669 * When the erp information has changed, we should perform
670 * additional configuration steps. For all other changes we are done. 670 * additional configuration steps. For all other changes we are done.
671 */ 671 */
672 if (changes & ~(BSS_CHANGED_ASSOC | BSS_CHANGED_HT)) 672 if (changes & (BSS_CHANGED_ERP_CTS_PROT | BSS_CHANGED_ERP_PREAMBLE |
673 rt2x00lib_config_erp(rt2x00dev, intf, bss_conf); 673 BSS_CHANGED_ERP_SLOT | BSS_CHANGED_BASIC_RATES |
674 BSS_CHANGED_BEACON_INT | BSS_CHANGED_HT))
675 rt2x00lib_config_erp(rt2x00dev, intf, bss_conf, changes);
674} 676}
675EXPORT_SYMBOL_GPL(rt2x00mac_bss_info_changed); 677EXPORT_SYMBOL_GPL(rt2x00mac_bss_info_changed);
676 678
diff --git a/drivers/net/wireless/rt2x00/rt2x00pci.c b/drivers/net/wireless/rt2x00/rt2x00pci.c
index 63c2cc408e15..2449d785cf8d 100644
--- a/drivers/net/wireless/rt2x00/rt2x00pci.c
+++ b/drivers/net/wireless/rt2x00/rt2x00pci.c
@@ -84,7 +84,7 @@ void rt2x00pci_rxdone(struct rt2x00_dev *rt2x00dev)
84 /* 84 /*
85 * Send the frame to rt2x00lib for further processing. 85 * Send the frame to rt2x00lib for further processing.
86 */ 86 */
87 rt2x00lib_rxdone(rt2x00dev, entry); 87 rt2x00lib_rxdone(entry);
88 } 88 }
89} 89}
90EXPORT_SYMBOL_GPL(rt2x00pci_rxdone); 90EXPORT_SYMBOL_GPL(rt2x00pci_rxdone);
diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.c b/drivers/net/wireless/rt2x00/rt2x00queue.c
index eede99939db9..e360d287defb 100644
--- a/drivers/net/wireless/rt2x00/rt2x00queue.c
+++ b/drivers/net/wireless/rt2x00/rt2x00queue.c
@@ -33,9 +33,9 @@
33#include "rt2x00.h" 33#include "rt2x00.h"
34#include "rt2x00lib.h" 34#include "rt2x00lib.h"
35 35
36struct sk_buff *rt2x00queue_alloc_rxskb(struct rt2x00_dev *rt2x00dev, 36struct sk_buff *rt2x00queue_alloc_rxskb(struct queue_entry *entry)
37 struct queue_entry *entry)
38{ 37{
38 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
39 struct sk_buff *skb; 39 struct sk_buff *skb;
40 struct skb_frame_desc *skbdesc; 40 struct skb_frame_desc *skbdesc;
41 unsigned int frame_size; 41 unsigned int frame_size;
@@ -97,41 +97,42 @@ struct sk_buff *rt2x00queue_alloc_rxskb(struct rt2x00_dev *rt2x00dev,
97 return skb; 97 return skb;
98} 98}
99 99
100void rt2x00queue_map_txskb(struct rt2x00_dev *rt2x00dev, struct sk_buff *skb) 100void rt2x00queue_map_txskb(struct queue_entry *entry)
101{ 101{
102 struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb); 102 struct device *dev = entry->queue->rt2x00dev->dev;
103 struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
103 104
104 skbdesc->skb_dma = 105 skbdesc->skb_dma =
105 dma_map_single(rt2x00dev->dev, skb->data, skb->len, DMA_TO_DEVICE); 106 dma_map_single(dev, entry->skb->data, entry->skb->len, DMA_TO_DEVICE);
106 skbdesc->flags |= SKBDESC_DMA_MAPPED_TX; 107 skbdesc->flags |= SKBDESC_DMA_MAPPED_TX;
107} 108}
108EXPORT_SYMBOL_GPL(rt2x00queue_map_txskb); 109EXPORT_SYMBOL_GPL(rt2x00queue_map_txskb);
109 110
110void rt2x00queue_unmap_skb(struct rt2x00_dev *rt2x00dev, struct sk_buff *skb) 111void rt2x00queue_unmap_skb(struct queue_entry *entry)
111{ 112{
112 struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb); 113 struct device *dev = entry->queue->rt2x00dev->dev;
114 struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
113 115
114 if (skbdesc->flags & SKBDESC_DMA_MAPPED_RX) { 116 if (skbdesc->flags & SKBDESC_DMA_MAPPED_RX) {
115 dma_unmap_single(rt2x00dev->dev, skbdesc->skb_dma, skb->len, 117 dma_unmap_single(dev, skbdesc->skb_dma, entry->skb->len,
116 DMA_FROM_DEVICE); 118 DMA_FROM_DEVICE);
117 skbdesc->flags &= ~SKBDESC_DMA_MAPPED_RX; 119 skbdesc->flags &= ~SKBDESC_DMA_MAPPED_RX;
118 } 120 } else if (skbdesc->flags & SKBDESC_DMA_MAPPED_TX) {
119 121 dma_unmap_single(dev, skbdesc->skb_dma, entry->skb->len,
120 if (skbdesc->flags & SKBDESC_DMA_MAPPED_TX) {
121 dma_unmap_single(rt2x00dev->dev, skbdesc->skb_dma, skb->len,
122 DMA_TO_DEVICE); 122 DMA_TO_DEVICE);
123 skbdesc->flags &= ~SKBDESC_DMA_MAPPED_TX; 123 skbdesc->flags &= ~SKBDESC_DMA_MAPPED_TX;
124 } 124 }
125} 125}
126EXPORT_SYMBOL_GPL(rt2x00queue_unmap_skb); 126EXPORT_SYMBOL_GPL(rt2x00queue_unmap_skb);
127 127
128void rt2x00queue_free_skb(struct rt2x00_dev *rt2x00dev, struct sk_buff *skb) 128void rt2x00queue_free_skb(struct queue_entry *entry)
129{ 129{
130 if (!skb) 130 if (!entry->skb)
131 return; 131 return;
132 132
133 rt2x00queue_unmap_skb(rt2x00dev, skb); 133 rt2x00queue_unmap_skb(entry);
134 dev_kfree_skb_any(skb); 134 dev_kfree_skb_any(entry->skb);
135 entry->skb = NULL;
135} 136}
136 137
137void rt2x00queue_align_frame(struct sk_buff *skb) 138void rt2x00queue_align_frame(struct sk_buff *skb)
@@ -440,7 +441,7 @@ static int rt2x00queue_write_tx_data(struct queue_entry *entry,
440 * Map the skb to DMA. 441 * Map the skb to DMA.
441 */ 442 */
442 if (test_bit(DRIVER_REQUIRE_DMA, &rt2x00dev->flags)) 443 if (test_bit(DRIVER_REQUIRE_DMA, &rt2x00dev->flags))
443 rt2x00queue_map_txskb(rt2x00dev, entry->skb); 444 rt2x00queue_map_txskb(entry);
444 445
445 return 0; 446 return 0;
446} 447}
@@ -491,7 +492,8 @@ int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb,
491 if (unlikely(rt2x00queue_full(queue))) 492 if (unlikely(rt2x00queue_full(queue)))
492 return -ENOBUFS; 493 return -ENOBUFS;
493 494
494 if (test_and_set_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags)) { 495 if (unlikely(test_and_set_bit(ENTRY_OWNER_DEVICE_DATA,
496 &entry->flags))) {
495 ERROR(queue->rt2x00dev, 497 ERROR(queue->rt2x00dev,
496 "Arrived at non-free entry in the non-full queue %d.\n" 498 "Arrived at non-free entry in the non-full queue %d.\n"
497 "Please file bug report to %s.\n", 499 "Please file bug report to %s.\n",
@@ -586,8 +588,7 @@ int rt2x00queue_update_beacon(struct rt2x00_dev *rt2x00dev,
586 /* 588 /*
587 * Clean up the beacon skb. 589 * Clean up the beacon skb.
588 */ 590 */
589 rt2x00queue_free_skb(rt2x00dev, intf->beacon->skb); 591 rt2x00queue_free_skb(intf->beacon);
590 intf->beacon->skb = NULL;
591 592
592 if (!enable_beacon) { 593 if (!enable_beacon) {
593 rt2x00dev->ops->lib->kill_tx_queue(intf->beacon->queue); 594 rt2x00dev->ops->lib->kill_tx_queue(intf->beacon->queue);
@@ -828,8 +829,7 @@ static int rt2x00queue_alloc_entries(struct data_queue *queue,
828 return 0; 829 return 0;
829} 830}
830 831
831static void rt2x00queue_free_skbs(struct rt2x00_dev *rt2x00dev, 832static void rt2x00queue_free_skbs(struct data_queue *queue)
832 struct data_queue *queue)
833{ 833{
834 unsigned int i; 834 unsigned int i;
835 835
@@ -837,19 +837,17 @@ static void rt2x00queue_free_skbs(struct rt2x00_dev *rt2x00dev,
837 return; 837 return;
838 838
839 for (i = 0; i < queue->limit; i++) { 839 for (i = 0; i < queue->limit; i++) {
840 if (queue->entries[i].skb) 840 rt2x00queue_free_skb(&queue->entries[i]);
841 rt2x00queue_free_skb(rt2x00dev, queue->entries[i].skb);
842 } 841 }
843} 842}
844 843
845static int rt2x00queue_alloc_rxskbs(struct rt2x00_dev *rt2x00dev, 844static int rt2x00queue_alloc_rxskbs(struct data_queue *queue)
846 struct data_queue *queue)
847{ 845{
848 unsigned int i; 846 unsigned int i;
849 struct sk_buff *skb; 847 struct sk_buff *skb;
850 848
851 for (i = 0; i < queue->limit; i++) { 849 for (i = 0; i < queue->limit; i++) {
852 skb = rt2x00queue_alloc_rxskb(rt2x00dev, &queue->entries[i]); 850 skb = rt2x00queue_alloc_rxskb(&queue->entries[i]);
853 if (!skb) 851 if (!skb)
854 return -ENOMEM; 852 return -ENOMEM;
855 queue->entries[i].skb = skb; 853 queue->entries[i].skb = skb;
@@ -884,7 +882,7 @@ int rt2x00queue_initialize(struct rt2x00_dev *rt2x00dev)
884 goto exit; 882 goto exit;
885 } 883 }
886 884
887 status = rt2x00queue_alloc_rxskbs(rt2x00dev, rt2x00dev->rx); 885 status = rt2x00queue_alloc_rxskbs(rt2x00dev->rx);
888 if (status) 886 if (status)
889 goto exit; 887 goto exit;
890 888
@@ -902,7 +900,7 @@ void rt2x00queue_uninitialize(struct rt2x00_dev *rt2x00dev)
902{ 900{
903 struct data_queue *queue; 901 struct data_queue *queue;
904 902
905 rt2x00queue_free_skbs(rt2x00dev, rt2x00dev->rx); 903 rt2x00queue_free_skbs(rt2x00dev->rx);
906 904
907 queue_for_each(rt2x00dev, queue) { 905 queue_for_each(rt2x00dev, queue) {
908 kfree(queue->entries); 906 kfree(queue->entries);
diff --git a/drivers/net/wireless/rt2x00/rt2x00usb.c b/drivers/net/wireless/rt2x00/rt2x00usb.c
index 4c5ae3d45625..b3317df7a7d4 100644
--- a/drivers/net/wireless/rt2x00/rt2x00usb.c
+++ b/drivers/net/wireless/rt2x00/rt2x00usb.c
@@ -208,7 +208,7 @@ static void rt2x00usb_interrupt_txdone(struct urb *urb)
208 struct queue_entry *entry = (struct queue_entry *)urb->context; 208 struct queue_entry *entry = (struct queue_entry *)urb->context;
209 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; 209 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
210 210
211 if (!__test_and_clear_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags)) 211 if (!test_and_clear_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags))
212 return; 212 return;
213 213
214 /* 214 /*
@@ -220,7 +220,7 @@ static void rt2x00usb_interrupt_txdone(struct urb *urb)
220 * Check if the frame was correctly uploaded 220 * Check if the frame was correctly uploaded
221 */ 221 */
222 if (urb->status) 222 if (urb->status)
223 __set_bit(ENTRY_DATA_IO_FAILED, &entry->flags); 223 set_bit(ENTRY_DATA_IO_FAILED, &entry->flags);
224 224
225 /* 225 /*
226 * Schedule the delayed work for reading the TX status 226 * Schedule the delayed work for reading the TX status
@@ -253,7 +253,10 @@ static void rt2x00usb_kick_tx_entry(struct queue_entry *entry)
253 entry->skb->data, length, 253 entry->skb->data, length,
254 rt2x00usb_interrupt_txdone, entry); 254 rt2x00usb_interrupt_txdone, entry);
255 255
256 usb_submit_urb(entry_priv->urb, GFP_ATOMIC); 256 if (usb_submit_urb(entry_priv->urb, GFP_ATOMIC)) {
257 set_bit(ENTRY_DATA_IO_FAILED, &entry->flags);
258 rt2x00lib_dmadone(entry);
259 }
257} 260}
258 261
259void rt2x00usb_kick_tx_queue(struct data_queue *queue) 262void rt2x00usb_kick_tx_queue(struct data_queue *queue)
@@ -280,14 +283,6 @@ static void rt2x00usb_kill_tx_entry(struct queue_entry *entry)
280 if ((entry->queue->qid == QID_BEACON) && 283 if ((entry->queue->qid == QID_BEACON) &&
281 (test_bit(DRIVER_REQUIRE_BEACON_GUARD, &rt2x00dev->flags))) 284 (test_bit(DRIVER_REQUIRE_BEACON_GUARD, &rt2x00dev->flags)))
282 usb_kill_urb(bcn_priv->guardian_urb); 285 usb_kill_urb(bcn_priv->guardian_urb);
283
284 /*
285 * We need a short delay here to wait for
286 * the URB to be canceled
287 */
288 do {
289 udelay(100);
290 } while (test_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags));
291} 286}
292 287
293void rt2x00usb_kill_tx_queue(struct data_queue *queue) 288void rt2x00usb_kill_tx_queue(struct data_queue *queue)
@@ -363,10 +358,12 @@ void rt2x00usb_watchdog(struct rt2x00_dev *rt2x00dev)
363 struct data_queue *queue; 358 struct data_queue *queue;
364 359
365 tx_queue_for_each(rt2x00dev, queue) { 360 tx_queue_for_each(rt2x00dev, queue) {
366 if (rt2x00queue_dma_timeout(queue)) 361 if (!rt2x00queue_empty(queue)) {
367 rt2x00usb_watchdog_tx_dma(queue); 362 if (rt2x00queue_dma_timeout(queue))
368 if (rt2x00queue_timeout(queue)) 363 rt2x00usb_watchdog_tx_dma(queue);
369 rt2x00usb_watchdog_tx_status(queue); 364 if (rt2x00queue_timeout(queue))
365 rt2x00usb_watchdog_tx_status(queue);
366 }
370 } 367 }
371} 368}
372EXPORT_SYMBOL_GPL(rt2x00usb_watchdog); 369EXPORT_SYMBOL_GPL(rt2x00usb_watchdog);
@@ -398,7 +395,7 @@ static void rt2x00usb_work_rxdone(struct work_struct *work)
398 /* 395 /*
399 * Send the frame to rt2x00lib for further processing. 396 * Send the frame to rt2x00lib for further processing.
400 */ 397 */
401 rt2x00lib_rxdone(rt2x00dev, entry); 398 rt2x00lib_rxdone(entry);
402 } 399 }
403} 400}
404 401
@@ -407,7 +404,7 @@ static void rt2x00usb_interrupt_rxdone(struct urb *urb)
407 struct queue_entry *entry = (struct queue_entry *)urb->context; 404 struct queue_entry *entry = (struct queue_entry *)urb->context;
408 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; 405 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
409 406
410 if (!__test_and_clear_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags)) 407 if (!test_and_clear_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags))
411 return; 408 return;
412 409
413 /* 410 /*
@@ -421,7 +418,7 @@ static void rt2x00usb_interrupt_rxdone(struct urb *urb)
421 * a problem. 418 * a problem.
422 */ 419 */
423 if (urb->actual_length < entry->queue->desc_size || urb->status) 420 if (urb->actual_length < entry->queue->desc_size || urb->status)
424 __set_bit(ENTRY_DATA_IO_FAILED, &entry->flags); 421 set_bit(ENTRY_DATA_IO_FAILED, &entry->flags);
425 422
426 /* 423 /*
427 * Schedule the delayed work for reading the RX status 424 * Schedule the delayed work for reading the RX status
@@ -467,7 +464,10 @@ void rt2x00usb_clear_entry(struct queue_entry *entry)
467 rt2x00usb_interrupt_rxdone, entry); 464 rt2x00usb_interrupt_rxdone, entry);
468 465
469 set_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags); 466 set_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags);
470 usb_submit_urb(entry_priv->urb, GFP_ATOMIC); 467 if (usb_submit_urb(entry_priv->urb, GFP_ATOMIC)) {
468 set_bit(ENTRY_DATA_IO_FAILED, &entry->flags);
469 rt2x00lib_dmadone(entry);
470 }
471 } 471 }
472} 472}
473EXPORT_SYMBOL_GPL(rt2x00usb_clear_entry); 473EXPORT_SYMBOL_GPL(rt2x00usb_clear_entry);
@@ -542,9 +542,9 @@ static int rt2x00usb_find_endpoints(struct rt2x00_dev *rt2x00dev)
542 return 0; 542 return 0;
543} 543}
544 544
545static int rt2x00usb_alloc_urb(struct rt2x00_dev *rt2x00dev, 545static int rt2x00usb_alloc_entries(struct data_queue *queue)
546 struct data_queue *queue)
547{ 546{
547 struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
548 struct queue_entry_priv_usb *entry_priv; 548 struct queue_entry_priv_usb *entry_priv;
549 struct queue_entry_priv_usb_bcn *bcn_priv; 549 struct queue_entry_priv_usb_bcn *bcn_priv;
550 unsigned int i; 550 unsigned int i;
@@ -561,7 +561,7 @@ static int rt2x00usb_alloc_urb(struct rt2x00_dev *rt2x00dev,
561 * no guardian byte was required for the beacon, 561 * no guardian byte was required for the beacon,
562 * then we are done. 562 * then we are done.
563 */ 563 */
564 if (rt2x00dev->bcn != queue || 564 if (queue->qid != QID_BEACON ||
565 !test_bit(DRIVER_REQUIRE_BEACON_GUARD, &rt2x00dev->flags)) 565 !test_bit(DRIVER_REQUIRE_BEACON_GUARD, &rt2x00dev->flags))
566 return 0; 566 return 0;
567 567
@@ -575,9 +575,9 @@ static int rt2x00usb_alloc_urb(struct rt2x00_dev *rt2x00dev,
575 return 0; 575 return 0;
576} 576}
577 577
578static void rt2x00usb_free_urb(struct rt2x00_dev *rt2x00dev, 578static void rt2x00usb_free_entries(struct data_queue *queue)
579 struct data_queue *queue)
580{ 579{
580 struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
581 struct queue_entry_priv_usb *entry_priv; 581 struct queue_entry_priv_usb *entry_priv;
582 struct queue_entry_priv_usb_bcn *bcn_priv; 582 struct queue_entry_priv_usb_bcn *bcn_priv;
583 unsigned int i; 583 unsigned int i;
@@ -596,7 +596,7 @@ static void rt2x00usb_free_urb(struct rt2x00_dev *rt2x00dev,
596 * no guardian byte was required for the beacon, 596 * no guardian byte was required for the beacon,
597 * then we are done. 597 * then we are done.
598 */ 598 */
599 if (rt2x00dev->bcn != queue || 599 if (queue->qid != QID_BEACON ||
600 !test_bit(DRIVER_REQUIRE_BEACON_GUARD, &rt2x00dev->flags)) 600 !test_bit(DRIVER_REQUIRE_BEACON_GUARD, &rt2x00dev->flags))
601 return; 601 return;
602 602
@@ -623,7 +623,7 @@ int rt2x00usb_initialize(struct rt2x00_dev *rt2x00dev)
623 * Allocate DMA 623 * Allocate DMA
624 */ 624 */
625 queue_for_each(rt2x00dev, queue) { 625 queue_for_each(rt2x00dev, queue) {
626 status = rt2x00usb_alloc_urb(rt2x00dev, queue); 626 status = rt2x00usb_alloc_entries(queue);
627 if (status) 627 if (status)
628 goto exit; 628 goto exit;
629 } 629 }
@@ -642,7 +642,7 @@ void rt2x00usb_uninitialize(struct rt2x00_dev *rt2x00dev)
642 struct data_queue *queue; 642 struct data_queue *queue;
643 643
644 queue_for_each(rt2x00dev, queue) 644 queue_for_each(rt2x00dev, queue)
645 rt2x00usb_free_urb(rt2x00dev, queue); 645 rt2x00usb_free_entries(queue);
646} 646}
647EXPORT_SYMBOL_GPL(rt2x00usb_uninitialize); 647EXPORT_SYMBOL_GPL(rt2x00usb_uninitialize);
648 648
diff --git a/drivers/net/wireless/rt2x00/rt61pci.c b/drivers/net/wireless/rt2x00/rt61pci.c
index 3a7759929190..af548c87f108 100644
--- a/drivers/net/wireless/rt2x00/rt61pci.c
+++ b/drivers/net/wireless/rt2x00/rt61pci.c
@@ -594,7 +594,8 @@ static void rt61pci_config_intf(struct rt2x00_dev *rt2x00dev,
594} 594}
595 595
596static void rt61pci_config_erp(struct rt2x00_dev *rt2x00dev, 596static void rt61pci_config_erp(struct rt2x00_dev *rt2x00dev,
597 struct rt2x00lib_erp *erp) 597 struct rt2x00lib_erp *erp,
598 u32 changed)
598{ 599{
599 u32 reg; 600 u32 reg;
600 601
@@ -603,28 +604,36 @@ static void rt61pci_config_erp(struct rt2x00_dev *rt2x00dev,
603 rt2x00_set_field32(&reg, TXRX_CSR0_TSF_OFFSET, IEEE80211_HEADER); 604 rt2x00_set_field32(&reg, TXRX_CSR0_TSF_OFFSET, IEEE80211_HEADER);
604 rt2x00pci_register_write(rt2x00dev, TXRX_CSR0, reg); 605 rt2x00pci_register_write(rt2x00dev, TXRX_CSR0, reg);
605 606
606 rt2x00pci_register_read(rt2x00dev, TXRX_CSR4, &reg); 607 if (changed & BSS_CHANGED_ERP_PREAMBLE) {
607 rt2x00_set_field32(&reg, TXRX_CSR4_AUTORESPOND_ENABLE, 1); 608 rt2x00pci_register_read(rt2x00dev, TXRX_CSR4, &reg);
608 rt2x00_set_field32(&reg, TXRX_CSR4_AUTORESPOND_PREAMBLE, 609 rt2x00_set_field32(&reg, TXRX_CSR4_AUTORESPOND_ENABLE, 1);
609 !!erp->short_preamble); 610 rt2x00_set_field32(&reg, TXRX_CSR4_AUTORESPOND_PREAMBLE,
610 rt2x00pci_register_write(rt2x00dev, TXRX_CSR4, reg); 611 !!erp->short_preamble);
612 rt2x00pci_register_write(rt2x00dev, TXRX_CSR4, reg);
613 }
611 614
612 rt2x00pci_register_write(rt2x00dev, TXRX_CSR5, erp->basic_rates); 615 if (changed & BSS_CHANGED_BASIC_RATES)
616 rt2x00pci_register_write(rt2x00dev, TXRX_CSR5,
617 erp->basic_rates);
613 618
614 rt2x00pci_register_read(rt2x00dev, TXRX_CSR9, &reg); 619 if (changed & BSS_CHANGED_BEACON_INT) {
615 rt2x00_set_field32(&reg, TXRX_CSR9_BEACON_INTERVAL, 620 rt2x00pci_register_read(rt2x00dev, TXRX_CSR9, &reg);
616 erp->beacon_int * 16); 621 rt2x00_set_field32(&reg, TXRX_CSR9_BEACON_INTERVAL,
617 rt2x00pci_register_write(rt2x00dev, TXRX_CSR9, reg); 622 erp->beacon_int * 16);
623 rt2x00pci_register_write(rt2x00dev, TXRX_CSR9, reg);
624 }
618 625
619 rt2x00pci_register_read(rt2x00dev, MAC_CSR9, &reg); 626 if (changed & BSS_CHANGED_ERP_SLOT) {
620 rt2x00_set_field32(&reg, MAC_CSR9_SLOT_TIME, erp->slot_time); 627 rt2x00pci_register_read(rt2x00dev, MAC_CSR9, &reg);
621 rt2x00pci_register_write(rt2x00dev, MAC_CSR9, reg); 628 rt2x00_set_field32(&reg, MAC_CSR9_SLOT_TIME, erp->slot_time);
629 rt2x00pci_register_write(rt2x00dev, MAC_CSR9, reg);
622 630
623 rt2x00pci_register_read(rt2x00dev, MAC_CSR8, &reg); 631 rt2x00pci_register_read(rt2x00dev, MAC_CSR8, &reg);
624 rt2x00_set_field32(&reg, MAC_CSR8_SIFS, erp->sifs); 632 rt2x00_set_field32(&reg, MAC_CSR8_SIFS, erp->sifs);
625 rt2x00_set_field32(&reg, MAC_CSR8_SIFS_AFTER_RX_OFDM, 3); 633 rt2x00_set_field32(&reg, MAC_CSR8_SIFS_AFTER_RX_OFDM, 3);
626 rt2x00_set_field32(&reg, MAC_CSR8_EIFS, erp->eifs); 634 rt2x00_set_field32(&reg, MAC_CSR8_EIFS, erp->eifs);
627 rt2x00pci_register_write(rt2x00dev, MAC_CSR8, reg); 635 rt2x00pci_register_write(rt2x00dev, MAC_CSR8, reg);
636 }
628} 637}
629 638
630static void rt61pci_config_antenna_5x(struct rt2x00_dev *rt2x00dev, 639static void rt61pci_config_antenna_5x(struct rt2x00_dev *rt2x00dev,
@@ -1645,6 +1654,7 @@ static void rt61pci_toggle_irq(struct rt2x00_dev *rt2x00dev,
1645 rt2x00pci_register_read(rt2x00dev, INT_MASK_CSR, &reg); 1654 rt2x00pci_register_read(rt2x00dev, INT_MASK_CSR, &reg);
1646 rt2x00_set_field32(&reg, INT_MASK_CSR_TXDONE, mask); 1655 rt2x00_set_field32(&reg, INT_MASK_CSR_TXDONE, mask);
1647 rt2x00_set_field32(&reg, INT_MASK_CSR_RXDONE, mask); 1656 rt2x00_set_field32(&reg, INT_MASK_CSR_RXDONE, mask);
1657 rt2x00_set_field32(&reg, INT_MASK_CSR_BEACON_DONE, mask);
1648 rt2x00_set_field32(&reg, INT_MASK_CSR_ENABLE_MITIGATION, mask); 1658 rt2x00_set_field32(&reg, INT_MASK_CSR_ENABLE_MITIGATION, mask);
1649 rt2x00_set_field32(&reg, INT_MASK_CSR_MITIGATION_PERIOD, 0xff); 1659 rt2x00_set_field32(&reg, INT_MASK_CSR_MITIGATION_PERIOD, 0xff);
1650 rt2x00pci_register_write(rt2x00dev, INT_MASK_CSR, reg); 1660 rt2x00pci_register_write(rt2x00dev, INT_MASK_CSR, reg);
@@ -1658,6 +1668,7 @@ static void rt61pci_toggle_irq(struct rt2x00_dev *rt2x00dev,
1658 rt2x00_set_field32(&reg, MCU_INT_MASK_CSR_5, mask); 1668 rt2x00_set_field32(&reg, MCU_INT_MASK_CSR_5, mask);
1659 rt2x00_set_field32(&reg, MCU_INT_MASK_CSR_6, mask); 1669 rt2x00_set_field32(&reg, MCU_INT_MASK_CSR_6, mask);
1660 rt2x00_set_field32(&reg, MCU_INT_MASK_CSR_7, mask); 1670 rt2x00_set_field32(&reg, MCU_INT_MASK_CSR_7, mask);
1671 rt2x00_set_field32(&reg, MCU_INT_MASK_CSR_TWAKEUP, mask);
1661 rt2x00pci_register_write(rt2x00dev, MCU_INT_MASK_CSR, reg); 1672 rt2x00pci_register_write(rt2x00dev, MCU_INT_MASK_CSR, reg);
1662} 1673}
1663 1674
@@ -2106,7 +2117,7 @@ static void rt61pci_txdone(struct rt2x00_dev *rt2x00dev)
2106 "TX status report missed for entry %d\n", 2117 "TX status report missed for entry %d\n",
2107 entry_done->entry_idx); 2118 entry_done->entry_idx);
2108 2119
2109 rt2x00lib_txdone_noinfo(entry, TXDONE_UNKNOWN); 2120 rt2x00lib_txdone_noinfo(entry_done, TXDONE_UNKNOWN);
2110 entry_done = rt2x00queue_get_entry(queue, Q_INDEX_DONE); 2121 entry_done = rt2x00queue_get_entry(queue, Q_INDEX_DONE);
2111 } 2122 }
2112 2123
@@ -2619,12 +2630,13 @@ static int rt61pci_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
2619 * As rt61 has a global fallback table we cannot specify 2630 * As rt61 has a global fallback table we cannot specify
2620 * more then one tx rate per frame but since the hw will 2631 * more then one tx rate per frame but since the hw will
2621 * try several rates (based on the fallback table) we should 2632 * try several rates (based on the fallback table) we should
2622 * still initialize max_rates to the maximum number of rates 2633 * initialize max_report_rates to the maximum number of rates
2623 * we are going to try. Otherwise mac80211 will truncate our 2634 * we are going to try. Otherwise mac80211 will truncate our
2624 * reported tx rates and the rc algortihm will end up with 2635 * reported tx rates and the rc algortihm will end up with
2625 * incorrect data. 2636 * incorrect data.
2626 */ 2637 */
2627 rt2x00dev->hw->max_rates = 7; 2638 rt2x00dev->hw->max_rates = 1;
2639 rt2x00dev->hw->max_report_rates = 7;
2628 rt2x00dev->hw->max_rate_tries = 1; 2640 rt2x00dev->hw->max_rate_tries = 1;
2629 2641
2630 /* 2642 /*
diff --git a/drivers/net/wireless/rt2x00/rt73usb.c b/drivers/net/wireless/rt2x00/rt73usb.c
index 87fb2201537b..9be8089317e4 100644
--- a/drivers/net/wireless/rt2x00/rt73usb.c
+++ b/drivers/net/wireless/rt2x00/rt73usb.c
@@ -545,7 +545,8 @@ static void rt73usb_config_intf(struct rt2x00_dev *rt2x00dev,
545} 545}
546 546
547static void rt73usb_config_erp(struct rt2x00_dev *rt2x00dev, 547static void rt73usb_config_erp(struct rt2x00_dev *rt2x00dev,
548 struct rt2x00lib_erp *erp) 548 struct rt2x00lib_erp *erp,
549 u32 changed)
549{ 550{
550 u32 reg; 551 u32 reg;
551 552
@@ -554,28 +555,36 @@ static void rt73usb_config_erp(struct rt2x00_dev *rt2x00dev,
554 rt2x00_set_field32(&reg, TXRX_CSR0_TSF_OFFSET, IEEE80211_HEADER); 555 rt2x00_set_field32(&reg, TXRX_CSR0_TSF_OFFSET, IEEE80211_HEADER);
555 rt2x00usb_register_write(rt2x00dev, TXRX_CSR0, reg); 556 rt2x00usb_register_write(rt2x00dev, TXRX_CSR0, reg);
556 557
557 rt2x00usb_register_read(rt2x00dev, TXRX_CSR4, &reg); 558 if (changed & BSS_CHANGED_ERP_PREAMBLE) {
558 rt2x00_set_field32(&reg, TXRX_CSR4_AUTORESPOND_ENABLE, 1); 559 rt2x00usb_register_read(rt2x00dev, TXRX_CSR4, &reg);
559 rt2x00_set_field32(&reg, TXRX_CSR4_AUTORESPOND_PREAMBLE, 560 rt2x00_set_field32(&reg, TXRX_CSR4_AUTORESPOND_ENABLE, 1);
560 !!erp->short_preamble); 561 rt2x00_set_field32(&reg, TXRX_CSR4_AUTORESPOND_PREAMBLE,
561 rt2x00usb_register_write(rt2x00dev, TXRX_CSR4, reg); 562 !!erp->short_preamble);
563 rt2x00usb_register_write(rt2x00dev, TXRX_CSR4, reg);
564 }
562 565
563 rt2x00usb_register_write(rt2x00dev, TXRX_CSR5, erp->basic_rates); 566 if (changed & BSS_CHANGED_BASIC_RATES)
567 rt2x00usb_register_write(rt2x00dev, TXRX_CSR5,
568 erp->basic_rates);
564 569
565 rt2x00usb_register_read(rt2x00dev, TXRX_CSR9, &reg); 570 if (changed & BSS_CHANGED_BEACON_INT) {
566 rt2x00_set_field32(&reg, TXRX_CSR9_BEACON_INTERVAL, 571 rt2x00usb_register_read(rt2x00dev, TXRX_CSR9, &reg);
567 erp->beacon_int * 16); 572 rt2x00_set_field32(&reg, TXRX_CSR9_BEACON_INTERVAL,
568 rt2x00usb_register_write(rt2x00dev, TXRX_CSR9, reg); 573 erp->beacon_int * 16);
574 rt2x00usb_register_write(rt2x00dev, TXRX_CSR9, reg);
575 }
569 576
570 rt2x00usb_register_read(rt2x00dev, MAC_CSR9, &reg); 577 if (changed & BSS_CHANGED_ERP_SLOT) {
571 rt2x00_set_field32(&reg, MAC_CSR9_SLOT_TIME, erp->slot_time); 578 rt2x00usb_register_read(rt2x00dev, MAC_CSR9, &reg);
572 rt2x00usb_register_write(rt2x00dev, MAC_CSR9, reg); 579 rt2x00_set_field32(&reg, MAC_CSR9_SLOT_TIME, erp->slot_time);
580 rt2x00usb_register_write(rt2x00dev, MAC_CSR9, reg);
573 581
574 rt2x00usb_register_read(rt2x00dev, MAC_CSR8, &reg); 582 rt2x00usb_register_read(rt2x00dev, MAC_CSR8, &reg);
575 rt2x00_set_field32(&reg, MAC_CSR8_SIFS, erp->sifs); 583 rt2x00_set_field32(&reg, MAC_CSR8_SIFS, erp->sifs);
576 rt2x00_set_field32(&reg, MAC_CSR8_SIFS_AFTER_RX_OFDM, 3); 584 rt2x00_set_field32(&reg, MAC_CSR8_SIFS_AFTER_RX_OFDM, 3);
577 rt2x00_set_field32(&reg, MAC_CSR8_EIFS, erp->eifs); 585 rt2x00_set_field32(&reg, MAC_CSR8_EIFS, erp->eifs);
578 rt2x00usb_register_write(rt2x00dev, MAC_CSR8, reg); 586 rt2x00usb_register_write(rt2x00dev, MAC_CSR8, reg);
587 }
579} 588}
580 589
581static void rt73usb_config_antenna_5x(struct rt2x00_dev *rt2x00dev, 590static void rt73usb_config_antenna_5x(struct rt2x00_dev *rt2x00dev,
@@ -2054,9 +2063,14 @@ static int rt73usb_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
2054 2063
2055 /* 2064 /*
2056 * Initialize all hw fields. 2065 * Initialize all hw fields.
2066 *
2067 * Don't set IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING unless we are
2068 * capable of sending the buffered frames out after the DTIM
2069 * transmission using rt2x00lib_beacondone. This will send out
2070 * multicast and broadcast traffic immediately instead of buffering it
2071 * infinitly and thus dropping it after some time.
2057 */ 2072 */
2058 rt2x00dev->hw->flags = 2073 rt2x00dev->hw->flags =
2059 IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
2060 IEEE80211_HW_SIGNAL_DBM | 2074 IEEE80211_HW_SIGNAL_DBM |
2061 IEEE80211_HW_SUPPORTS_PS | 2075 IEEE80211_HW_SUPPORTS_PS |
2062 IEEE80211_HW_PS_NULLFUNC_STACK; 2076 IEEE80211_HW_PS_NULLFUNC_STACK;
@@ -2356,6 +2370,7 @@ static struct usb_device_id rt73usb_device_table[] = {
2356 { USB_DEVICE(0x0411, 0x00f4), USB_DEVICE_DATA(&rt73usb_ops) }, 2370 { USB_DEVICE(0x0411, 0x00f4), USB_DEVICE_DATA(&rt73usb_ops) },
2357 { USB_DEVICE(0x0411, 0x0116), USB_DEVICE_DATA(&rt73usb_ops) }, 2371 { USB_DEVICE(0x0411, 0x0116), USB_DEVICE_DATA(&rt73usb_ops) },
2358 { USB_DEVICE(0x0411, 0x0119), USB_DEVICE_DATA(&rt73usb_ops) }, 2372 { USB_DEVICE(0x0411, 0x0119), USB_DEVICE_DATA(&rt73usb_ops) },
2373 { USB_DEVICE(0x0411, 0x0137), USB_DEVICE_DATA(&rt73usb_ops) },
2359 /* CEIVA */ 2374 /* CEIVA */
2360 { USB_DEVICE(0x178d, 0x02be), USB_DEVICE_DATA(&rt73usb_ops) }, 2375 { USB_DEVICE(0x178d, 0x02be), USB_DEVICE_DATA(&rt73usb_ops) },
2361 /* CNet */ 2376 /* CNet */
diff --git a/drivers/net/wireless/rtl818x/rtl8180_dev.c b/drivers/net/wireless/rtl818x/rtl8180_dev.c
index 05c6badbe201..707c688da618 100644
--- a/drivers/net/wireless/rtl818x/rtl8180_dev.c
+++ b/drivers/net/wireless/rtl818x/rtl8180_dev.c
@@ -99,66 +99,19 @@ void rtl8180_write_phy(struct ieee80211_hw *dev, u8 addr, u32 data)
99 } 99 }
100} 100}
101 101
102static void rtl8180_handle_tx(struct ieee80211_hw *dev) 102static void rtl8180_handle_rx(struct ieee80211_hw *dev)
103{ 103{
104 struct rtl8180_priv *priv = dev->priv; 104 struct rtl8180_priv *priv = dev->priv;
105 struct rtl8180_tx_ring *ring; 105 unsigned int count = 32;
106 int prio;
107
108 spin_lock(&priv->lock);
109
110 for (prio = 3; prio >= 0; prio--) {
111 ring = &priv->tx_ring[prio];
112
113 while (skb_queue_len(&ring->queue)) {
114 struct rtl8180_tx_desc *entry = &ring->desc[ring->idx];
115 struct sk_buff *skb;
116 struct ieee80211_tx_info *info;
117 u32 flags = le32_to_cpu(entry->flags);
118
119 if (flags & RTL818X_TX_DESC_FLAG_OWN)
120 break;
121
122 ring->idx = (ring->idx + 1) % ring->entries;
123 skb = __skb_dequeue(&ring->queue);
124 pci_unmap_single(priv->pdev, le32_to_cpu(entry->tx_buf),
125 skb->len, PCI_DMA_TODEVICE);
126
127 info = IEEE80211_SKB_CB(skb);
128 ieee80211_tx_info_clear_status(info);
129
130 if (!(info->flags & IEEE80211_TX_CTL_NO_ACK) &&
131 (flags & RTL818X_TX_DESC_FLAG_TX_OK))
132 info->flags |= IEEE80211_TX_STAT_ACK;
133
134 info->status.rates[0].count = (flags & 0xFF) + 1;
135 info->status.rates[1].idx = -1;
136
137 ieee80211_tx_status(dev, skb);
138 if (ring->entries - skb_queue_len(&ring->queue) == 2)
139 ieee80211_wake_queue(dev, prio);
140 }
141 }
142
143 spin_unlock(&priv->lock);
144}
145
146static int rtl8180_poll(struct ieee80211_hw *dev, int budget)
147{
148 struct rtl8180_priv *priv = dev->priv;
149 unsigned int count = 0;
150 u8 signal, agc, sq; 106 u8 signal, agc, sq;
151 107
152 /* handle pending Tx queue cleanup */ 108 while (count--) {
153 rtl8180_handle_tx(dev);
154
155 while (count++ < budget) {
156 struct rtl8180_rx_desc *entry = &priv->rx_ring[priv->rx_idx]; 109 struct rtl8180_rx_desc *entry = &priv->rx_ring[priv->rx_idx];
157 struct sk_buff *skb = priv->rx_buf[priv->rx_idx]; 110 struct sk_buff *skb = priv->rx_buf[priv->rx_idx];
158 u32 flags = le32_to_cpu(entry->flags); 111 u32 flags = le32_to_cpu(entry->flags);
159 112
160 if (flags & RTL818X_RX_DESC_FLAG_OWN) 113 if (flags & RTL818X_RX_DESC_FLAG_OWN)
161 break; 114 return;
162 115
163 if (unlikely(flags & (RTL818X_RX_DESC_FLAG_DMA_FAIL | 116 if (unlikely(flags & (RTL818X_RX_DESC_FLAG_DMA_FAIL |
164 RTL818X_RX_DESC_FLAG_FOF | 117 RTL818X_RX_DESC_FLAG_FOF |
@@ -198,7 +151,7 @@ static int rtl8180_poll(struct ieee80211_hw *dev, int budget)
198 rx_status.flag |= RX_FLAG_FAILED_FCS_CRC; 151 rx_status.flag |= RX_FLAG_FAILED_FCS_CRC;
199 152
200 memcpy(IEEE80211_SKB_RXCB(skb), &rx_status, sizeof(rx_status)); 153 memcpy(IEEE80211_SKB_RXCB(skb), &rx_status, sizeof(rx_status));
201 ieee80211_rx(dev, skb); 154 ieee80211_rx_irqsafe(dev, skb);
202 155
203 skb = new_skb; 156 skb = new_skb;
204 priv->rx_buf[priv->rx_idx] = skb; 157 priv->rx_buf[priv->rx_idx] = skb;
@@ -215,16 +168,41 @@ static int rtl8180_poll(struct ieee80211_hw *dev, int budget)
215 entry->flags |= cpu_to_le32(RTL818X_RX_DESC_FLAG_EOR); 168 entry->flags |= cpu_to_le32(RTL818X_RX_DESC_FLAG_EOR);
216 priv->rx_idx = (priv->rx_idx + 1) % 32; 169 priv->rx_idx = (priv->rx_idx + 1) % 32;
217 } 170 }
171}
172
173static void rtl8180_handle_tx(struct ieee80211_hw *dev, unsigned int prio)
174{
175 struct rtl8180_priv *priv = dev->priv;
176 struct rtl8180_tx_ring *ring = &priv->tx_ring[prio];
218 177
219 if (count < budget) { 178 while (skb_queue_len(&ring->queue)) {
220 /* disable polling */ 179 struct rtl8180_tx_desc *entry = &ring->desc[ring->idx];
221 ieee80211_napi_complete(dev); 180 struct sk_buff *skb;
181 struct ieee80211_tx_info *info;
182 u32 flags = le32_to_cpu(entry->flags);
222 183
223 /* enable interrupts */ 184 if (flags & RTL818X_TX_DESC_FLAG_OWN)
224 rtl818x_iowrite16(priv, &priv->map->INT_MASK, 0xFFFF); 185 return;
225 } 186
187 ring->idx = (ring->idx + 1) % ring->entries;
188 skb = __skb_dequeue(&ring->queue);
189 pci_unmap_single(priv->pdev, le32_to_cpu(entry->tx_buf),
190 skb->len, PCI_DMA_TODEVICE);
191
192 info = IEEE80211_SKB_CB(skb);
193 ieee80211_tx_info_clear_status(info);
194
195 if (!(info->flags & IEEE80211_TX_CTL_NO_ACK) &&
196 (flags & RTL818X_TX_DESC_FLAG_TX_OK))
197 info->flags |= IEEE80211_TX_STAT_ACK;
198
199 info->status.rates[0].count = (flags & 0xFF) + 1;
200 info->status.rates[1].idx = -1;
226 201
227 return count; 202 ieee80211_tx_status_irqsafe(dev, skb);
203 if (ring->entries - skb_queue_len(&ring->queue) == 2)
204 ieee80211_wake_queue(dev, prio);
205 }
228} 206}
229 207
230static irqreturn_t rtl8180_interrupt(int irq, void *dev_id) 208static irqreturn_t rtl8180_interrupt(int irq, void *dev_id)
@@ -233,17 +211,31 @@ static irqreturn_t rtl8180_interrupt(int irq, void *dev_id)
233 struct rtl8180_priv *priv = dev->priv; 211 struct rtl8180_priv *priv = dev->priv;
234 u16 reg; 212 u16 reg;
235 213
214 spin_lock(&priv->lock);
236 reg = rtl818x_ioread16(priv, &priv->map->INT_STATUS); 215 reg = rtl818x_ioread16(priv, &priv->map->INT_STATUS);
237 if (unlikely(reg == 0xFFFF)) 216 if (unlikely(reg == 0xFFFF)) {
217 spin_unlock(&priv->lock);
238 return IRQ_HANDLED; 218 return IRQ_HANDLED;
219 }
239 220
240 rtl818x_iowrite16(priv, &priv->map->INT_STATUS, reg); 221 rtl818x_iowrite16(priv, &priv->map->INT_STATUS, reg);
241 222
242 /* disable interrupts */ 223 if (reg & (RTL818X_INT_TXB_OK | RTL818X_INT_TXB_ERR))
243 rtl818x_iowrite16(priv, &priv->map->INT_MASK, 0); 224 rtl8180_handle_tx(dev, 3);
225
226 if (reg & (RTL818X_INT_TXH_OK | RTL818X_INT_TXH_ERR))
227 rtl8180_handle_tx(dev, 2);
228
229 if (reg & (RTL818X_INT_TXN_OK | RTL818X_INT_TXN_ERR))
230 rtl8180_handle_tx(dev, 1);
244 231
245 /* enable polling */ 232 if (reg & (RTL818X_INT_TXL_OK | RTL818X_INT_TXL_ERR))
246 ieee80211_napi_schedule(dev); 233 rtl8180_handle_tx(dev, 0);
234
235 if (reg & (RTL818X_INT_RX_OK | RTL818X_INT_RX_ERR))
236 rtl8180_handle_rx(dev);
237
238 spin_unlock(&priv->lock);
247 239
248 return IRQ_HANDLED; 240 return IRQ_HANDLED;
249} 241}
@@ -255,6 +247,7 @@ static int rtl8180_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
255 struct rtl8180_priv *priv = dev->priv; 247 struct rtl8180_priv *priv = dev->priv;
256 struct rtl8180_tx_ring *ring; 248 struct rtl8180_tx_ring *ring;
257 struct rtl8180_tx_desc *entry; 249 struct rtl8180_tx_desc *entry;
250 unsigned long flags;
258 unsigned int idx, prio; 251 unsigned int idx, prio;
259 dma_addr_t mapping; 252 dma_addr_t mapping;
260 u32 tx_flags; 253 u32 tx_flags;
@@ -301,7 +294,7 @@ static int rtl8180_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
301 plcp_len |= 1 << 15; 294 plcp_len |= 1 << 15;
302 } 295 }
303 296
304 spin_lock(&priv->lock); 297 spin_lock_irqsave(&priv->lock, flags);
305 298
306 if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) { 299 if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
307 if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT) 300 if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)
@@ -325,7 +318,7 @@ static int rtl8180_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
325 if (ring->entries - skb_queue_len(&ring->queue) < 2) 318 if (ring->entries - skb_queue_len(&ring->queue) < 2)
326 ieee80211_stop_queue(dev, prio); 319 ieee80211_stop_queue(dev, prio);
327 320
328 spin_unlock(&priv->lock); 321 spin_unlock_irqrestore(&priv->lock, flags);
329 322
330 rtl818x_iowrite8(priv, &priv->map->TX_DMA_POLLING, (1 << (prio + 4))); 323 rtl818x_iowrite8(priv, &priv->map->TX_DMA_POLLING, (1 << (prio + 4)));
331 324
@@ -871,7 +864,6 @@ static const struct ieee80211_ops rtl8180_ops = {
871 .prepare_multicast = rtl8180_prepare_multicast, 864 .prepare_multicast = rtl8180_prepare_multicast,
872 .configure_filter = rtl8180_configure_filter, 865 .configure_filter = rtl8180_configure_filter,
873 .get_tsf = rtl8180_get_tsf, 866 .get_tsf = rtl8180_get_tsf,
874 .napi_poll = rtl8180_poll,
875}; 867};
876 868
877static void rtl8180_eeprom_register_read(struct eeprom_93cx6 *eeprom) 869static void rtl8180_eeprom_register_read(struct eeprom_93cx6 *eeprom)
@@ -1003,8 +995,6 @@ static int __devinit rtl8180_probe(struct pci_dev *pdev,
1003 dev->queues = 1; 995 dev->queues = 1;
1004 dev->max_signal = 65; 996 dev->max_signal = 65;
1005 997
1006 dev->napi_weight = 64;
1007
1008 reg = rtl818x_ioread32(priv, &priv->map->TX_CONF); 998 reg = rtl818x_ioread32(priv, &priv->map->TX_CONF);
1009 reg &= RTL818X_TX_CONF_HWVER_MASK; 999 reg &= RTL818X_TX_CONF_HWVER_MASK;
1010 switch (reg) { 1000 switch (reg) {
diff --git a/drivers/net/wireless/wl1251/Kconfig b/drivers/net/wireless/wl1251/Kconfig
new file mode 100644
index 000000000000..1fb65849414f
--- /dev/null
+++ b/drivers/net/wireless/wl1251/Kconfig
@@ -0,0 +1,33 @@
1menuconfig WL1251
2 tristate "TI wl1251 driver support"
3 depends on MAC80211 && EXPERIMENTAL && GENERIC_HARDIRQS
4 select FW_LOADER
5 select CRC7
6 ---help---
7 This will enable TI wl1251 driver support. The drivers make
8 use of the mac80211 stack.
9
10 If you choose to build a module, it'll be called wl1251. Say
11 N if unsure.
12
13config WL1251_SPI
14 tristate "TI wl1251 SPI support"
15 depends on WL1251 && SPI_MASTER
16 ---help---
17 This module adds support for the SPI interface of adapters using
18 TI wl1251 chipset. Select this if your platform is using
19 the SPI bus.
20
21 If you choose to build a module, it'll be called wl1251_spi.
22 Say N if unsure.
23
24config WL1251_SDIO
25 tristate "TI wl1251 SDIO support"
26 depends on WL1251 && MMC
27 ---help---
28 This module adds support for the SDIO interface of adapters using
29 TI wl1251 chipset. Select this if your platform is using
30 the SDIO bus.
31
32 If you choose to build a module, it'll be called
33 wl1251_sdio. Say N if unsure.
diff --git a/drivers/net/wireless/wl1251/Makefile b/drivers/net/wireless/wl1251/Makefile
new file mode 100644
index 000000000000..4fe246824db3
--- /dev/null
+++ b/drivers/net/wireless/wl1251/Makefile
@@ -0,0 +1,6 @@
1wl1251-objs = main.o event.o tx.o rx.o ps.o cmd.o \
2 acx.o boot.o init.o debugfs.o io.o
3
4obj-$(CONFIG_WL1251) += wl1251.o
5obj-$(CONFIG_WL1251_SPI) += spi.o
6obj-$(CONFIG_WL1251_SDIO) += sdio.o
diff --git a/drivers/net/wireless/wl12xx/wl1251_acx.c b/drivers/net/wireless/wl1251/acx.c
index 2f8a2ba744dc..64a0214cfb29 100644
--- a/drivers/net/wireless/wl12xx/wl1251_acx.c
+++ b/drivers/net/wireless/wl1251/acx.c
@@ -1,13 +1,13 @@
1#include "wl1251_acx.h" 1#include "acx.h"
2 2
3#include <linux/module.h> 3#include <linux/module.h>
4#include <linux/slab.h> 4#include <linux/slab.h>
5#include <linux/crc7.h> 5#include <linux/crc7.h>
6 6
7#include "wl1251.h" 7#include "wl1251.h"
8#include "wl1251_reg.h" 8#include "reg.h"
9#include "wl1251_cmd.h" 9#include "cmd.h"
10#include "wl1251_ps.h" 10#include "ps.h"
11 11
12int wl1251_acx_frame_rates(struct wl1251 *wl, u8 ctrl_rate, u8 ctrl_mod, 12int wl1251_acx_frame_rates(struct wl1251 *wl, u8 ctrl_rate, u8 ctrl_mod,
13 u8 mgt_rate, u8 mgt_mod) 13 u8 mgt_rate, u8 mgt_mod)
diff --git a/drivers/net/wireless/wl12xx/wl1251_acx.h b/drivers/net/wireless/wl1251/acx.h
index c7cc5c1e8a75..e54b21a4f8b1 100644
--- a/drivers/net/wireless/wl12xx/wl1251_acx.h
+++ b/drivers/net/wireless/wl1251/acx.h
@@ -24,7 +24,7 @@
24#define __WL1251_ACX_H__ 24#define __WL1251_ACX_H__
25 25
26#include "wl1251.h" 26#include "wl1251.h"
27#include "wl1251_cmd.h" 27#include "cmd.h"
28 28
29/* Target's information element */ 29/* Target's information element */
30struct acx_header { 30struct acx_header {
diff --git a/drivers/net/wireless/wl12xx/wl1251_boot.c b/drivers/net/wireless/wl1251/boot.c
index 468b47b0328a..61572dfa1f60 100644
--- a/drivers/net/wireless/wl12xx/wl1251_boot.c
+++ b/drivers/net/wireless/wl1251/boot.c
@@ -22,12 +22,12 @@
22#include <linux/gpio.h> 22#include <linux/gpio.h>
23#include <linux/slab.h> 23#include <linux/slab.h>
24 24
25#include "wl1251_reg.h" 25#include "reg.h"
26#include "wl1251_boot.h" 26#include "boot.h"
27#include "wl1251_io.h" 27#include "io.h"
28#include "wl1251_spi.h" 28#include "spi.h"
29#include "wl1251_event.h" 29#include "event.h"
30#include "wl1251_acx.h" 30#include "acx.h"
31 31
32void wl1251_boot_target_enable_interrupts(struct wl1251 *wl) 32void wl1251_boot_target_enable_interrupts(struct wl1251 *wl)
33{ 33{
diff --git a/drivers/net/wireless/wl12xx/wl1251_boot.h b/drivers/net/wireless/wl1251/boot.h
index 7661bc5e4662..7661bc5e4662 100644
--- a/drivers/net/wireless/wl12xx/wl1251_boot.h
+++ b/drivers/net/wireless/wl1251/boot.h
diff --git a/drivers/net/wireless/wl12xx/wl1251_cmd.c b/drivers/net/wireless/wl1251/cmd.c
index 15fb68c6b542..0ade4bd617c0 100644
--- a/drivers/net/wireless/wl12xx/wl1251_cmd.c
+++ b/drivers/net/wireless/wl1251/cmd.c
@@ -1,14 +1,14 @@
1#include "wl1251_cmd.h" 1#include "cmd.h"
2 2
3#include <linux/module.h> 3#include <linux/module.h>
4#include <linux/slab.h> 4#include <linux/slab.h>
5#include <linux/crc7.h> 5#include <linux/crc7.h>
6 6
7#include "wl1251.h" 7#include "wl1251.h"
8#include "wl1251_reg.h" 8#include "reg.h"
9#include "wl1251_io.h" 9#include "io.h"
10#include "wl1251_ps.h" 10#include "ps.h"
11#include "wl1251_acx.h" 11#include "acx.h"
12 12
13/** 13/**
14 * send command to firmware 14 * send command to firmware
diff --git a/drivers/net/wireless/wl12xx/wl1251_cmd.h b/drivers/net/wireless/wl1251/cmd.h
index e5c74c631374..e5c74c631374 100644
--- a/drivers/net/wireless/wl12xx/wl1251_cmd.h
+++ b/drivers/net/wireless/wl1251/cmd.h
diff --git a/drivers/net/wireless/wl12xx/wl1251_debugfs.c b/drivers/net/wireless/wl1251/debugfs.c
index 6ffe4cd58561..6e5caaa9f613 100644
--- a/drivers/net/wireless/wl12xx/wl1251_debugfs.c
+++ b/drivers/net/wireless/wl1251/debugfs.c
@@ -19,14 +19,14 @@
19 * 19 *
20 */ 20 */
21 21
22#include "wl1251_debugfs.h" 22#include "debugfs.h"
23 23
24#include <linux/skbuff.h> 24#include <linux/skbuff.h>
25#include <linux/slab.h> 25#include <linux/slab.h>
26 26
27#include "wl1251.h" 27#include "wl1251.h"
28#include "wl1251_acx.h" 28#include "acx.h"
29#include "wl1251_ps.h" 29#include "ps.h"
30 30
31/* ms */ 31/* ms */
32#define WL1251_DEBUGFS_STATS_LIFETIME 1000 32#define WL1251_DEBUGFS_STATS_LIFETIME 1000
diff --git a/drivers/net/wireless/wl12xx/wl1251_debugfs.h b/drivers/net/wireless/wl1251/debugfs.h
index b3417c02a218..b3417c02a218 100644
--- a/drivers/net/wireless/wl12xx/wl1251_debugfs.h
+++ b/drivers/net/wireless/wl1251/debugfs.h
diff --git a/drivers/net/wireless/wl12xx/wl1251_event.c b/drivers/net/wireless/wl1251/event.c
index 54223556b308..712372e50a87 100644
--- a/drivers/net/wireless/wl12xx/wl1251_event.c
+++ b/drivers/net/wireless/wl1251/event.c
@@ -21,10 +21,10 @@
21 */ 21 */
22 22
23#include "wl1251.h" 23#include "wl1251.h"
24#include "wl1251_reg.h" 24#include "reg.h"
25#include "wl1251_io.h" 25#include "io.h"
26#include "wl1251_event.h" 26#include "event.h"
27#include "wl1251_ps.h" 27#include "ps.h"
28 28
29static int wl1251_event_scan_complete(struct wl1251 *wl, 29static int wl1251_event_scan_complete(struct wl1251 *wl,
30 struct event_mailbox *mbox) 30 struct event_mailbox *mbox)
diff --git a/drivers/net/wireless/wl12xx/wl1251_event.h b/drivers/net/wireless/wl1251/event.h
index 30eb5d150bf7..30eb5d150bf7 100644
--- a/drivers/net/wireless/wl12xx/wl1251_event.h
+++ b/drivers/net/wireless/wl1251/event.h
diff --git a/drivers/net/wireless/wl12xx/wl1251_init.c b/drivers/net/wireless/wl1251/init.c
index c5daec05d9ee..89b43d35473c 100644
--- a/drivers/net/wireless/wl12xx/wl1251_init.c
+++ b/drivers/net/wireless/wl1251/init.c
@@ -23,11 +23,11 @@
23#include <linux/module.h> 23#include <linux/module.h>
24#include <linux/slab.h> 24#include <linux/slab.h>
25 25
26#include "wl1251_init.h" 26#include "init.h"
27#include "wl12xx_80211.h" 27#include "wl12xx_80211.h"
28#include "wl1251_acx.h" 28#include "acx.h"
29#include "wl1251_cmd.h" 29#include "cmd.h"
30#include "wl1251_reg.h" 30#include "reg.h"
31 31
32int wl1251_hw_init_hwenc_config(struct wl1251 *wl) 32int wl1251_hw_init_hwenc_config(struct wl1251 *wl)
33{ 33{
diff --git a/drivers/net/wireless/wl12xx/wl1251_init.h b/drivers/net/wireless/wl1251/init.h
index 543f17582ead..543f17582ead 100644
--- a/drivers/net/wireless/wl12xx/wl1251_init.h
+++ b/drivers/net/wireless/wl1251/init.h
diff --git a/drivers/net/wireless/wl12xx/wl1251_io.c b/drivers/net/wireless/wl1251/io.c
index ad6ca68b303f..cdcadbf6ac2c 100644
--- a/drivers/net/wireless/wl12xx/wl1251_io.c
+++ b/drivers/net/wireless/wl1251/io.c
@@ -20,8 +20,8 @@
20 */ 20 */
21 21
22#include "wl1251.h" 22#include "wl1251.h"
23#include "wl1251_reg.h" 23#include "reg.h"
24#include "wl1251_io.h" 24#include "io.h"
25 25
26/* FIXME: this is static data nowadays and the table can be removed */ 26/* FIXME: this is static data nowadays and the table can be removed */
27static enum wl12xx_acx_int_reg wl1251_io_reg_table[ACX_REG_TABLE_LEN] = { 27static enum wl12xx_acx_int_reg wl1251_io_reg_table[ACX_REG_TABLE_LEN] = {
diff --git a/drivers/net/wireless/wl12xx/wl1251_io.h b/drivers/net/wireless/wl1251/io.h
index c545e9d5f512..c545e9d5f512 100644
--- a/drivers/net/wireless/wl12xx/wl1251_io.h
+++ b/drivers/net/wireless/wl1251/io.h
diff --git a/drivers/net/wireless/wl12xx/wl1251_main.c b/drivers/net/wireless/wl1251/main.c
index faf221ca3f41..7a8762553cdc 100644
--- a/drivers/net/wireless/wl12xx/wl1251_main.c
+++ b/drivers/net/wireless/wl1251/main.c
@@ -31,16 +31,16 @@
31 31
32#include "wl1251.h" 32#include "wl1251.h"
33#include "wl12xx_80211.h" 33#include "wl12xx_80211.h"
34#include "wl1251_reg.h" 34#include "reg.h"
35#include "wl1251_io.h" 35#include "io.h"
36#include "wl1251_cmd.h" 36#include "cmd.h"
37#include "wl1251_event.h" 37#include "event.h"
38#include "wl1251_tx.h" 38#include "tx.h"
39#include "wl1251_rx.h" 39#include "rx.h"
40#include "wl1251_ps.h" 40#include "ps.h"
41#include "wl1251_init.h" 41#include "init.h"
42#include "wl1251_debugfs.h" 42#include "debugfs.h"
43#include "wl1251_boot.h" 43#include "boot.h"
44 44
45void wl1251_enable_interrupts(struct wl1251 *wl) 45void wl1251_enable_interrupts(struct wl1251 *wl)
46{ 46{
diff --git a/drivers/net/wireless/wl12xx/wl1251_ps.c b/drivers/net/wireless/wl1251/ps.c
index 0b997bdfec09..5ed47c8373d2 100644
--- a/drivers/net/wireless/wl12xx/wl1251_ps.c
+++ b/drivers/net/wireless/wl1251/ps.c
@@ -19,10 +19,10 @@
19 * 19 *
20 */ 20 */
21 21
22#include "wl1251_reg.h" 22#include "reg.h"
23#include "wl1251_ps.h" 23#include "ps.h"
24#include "wl1251_cmd.h" 24#include "cmd.h"
25#include "wl1251_io.h" 25#include "io.h"
26 26
27/* in ms */ 27/* in ms */
28#define WL1251_WAKEUP_TIMEOUT 100 28#define WL1251_WAKEUP_TIMEOUT 100
diff --git a/drivers/net/wireless/wl12xx/wl1251_ps.h b/drivers/net/wireless/wl1251/ps.h
index e5db81fc1dfc..55c3dda75e69 100644
--- a/drivers/net/wireless/wl12xx/wl1251_ps.h
+++ b/drivers/net/wireless/wl1251/ps.h
@@ -24,7 +24,7 @@
24#define __WL1251_PS_H__ 24#define __WL1251_PS_H__
25 25
26#include "wl1251.h" 26#include "wl1251.h"
27#include "wl1251_acx.h" 27#include "acx.h"
28 28
29int wl1251_ps_set_mode(struct wl1251 *wl, enum wl1251_cmd_ps_mode mode); 29int wl1251_ps_set_mode(struct wl1251 *wl, enum wl1251_cmd_ps_mode mode);
30void wl1251_ps_elp_sleep(struct wl1251 *wl); 30void wl1251_ps_elp_sleep(struct wl1251 *wl);
diff --git a/drivers/net/wireless/wl12xx/wl1251_reg.h b/drivers/net/wireless/wl1251/reg.h
index a5809019c5c1..a5809019c5c1 100644
--- a/drivers/net/wireless/wl12xx/wl1251_reg.h
+++ b/drivers/net/wireless/wl1251/reg.h
diff --git a/drivers/net/wireless/wl12xx/wl1251_rx.c b/drivers/net/wireless/wl1251/rx.c
index 25764592a596..efa53607d5c9 100644
--- a/drivers/net/wireless/wl12xx/wl1251_rx.c
+++ b/drivers/net/wireless/wl1251/rx.c
@@ -25,11 +25,11 @@
25#include <net/mac80211.h> 25#include <net/mac80211.h>
26 26
27#include "wl1251.h" 27#include "wl1251.h"
28#include "wl1251_reg.h" 28#include "reg.h"
29#include "wl1251_io.h" 29#include "io.h"
30#include "wl1251_rx.h" 30#include "rx.h"
31#include "wl1251_cmd.h" 31#include "cmd.h"
32#include "wl1251_acx.h" 32#include "acx.h"
33 33
34static void wl1251_rx_header(struct wl1251 *wl, 34static void wl1251_rx_header(struct wl1251 *wl,
35 struct wl1251_rx_descriptor *desc) 35 struct wl1251_rx_descriptor *desc)
diff --git a/drivers/net/wireless/wl12xx/wl1251_rx.h b/drivers/net/wireless/wl1251/rx.h
index 4448f635a4d8..4448f635a4d8 100644
--- a/drivers/net/wireless/wl12xx/wl1251_rx.h
+++ b/drivers/net/wireless/wl1251/rx.h
diff --git a/drivers/net/wireless/wl12xx/wl1251_sdio.c b/drivers/net/wireless/wl1251/sdio.c
index c0b68b0a9aa8..74ba9ced5393 100644
--- a/drivers/net/wireless/wl12xx/wl1251_sdio.c
+++ b/drivers/net/wireless/wl1251/sdio.c
@@ -24,7 +24,7 @@
24#include <linux/mmc/sdio_func.h> 24#include <linux/mmc/sdio_func.h>
25#include <linux/mmc/sdio_ids.h> 25#include <linux/mmc/sdio_ids.h>
26#include <linux/platform_device.h> 26#include <linux/platform_device.h>
27#include <linux/spi/wl12xx.h> 27#include <linux/wl12xx.h>
28#include <linux/irq.h> 28#include <linux/irq.h>
29 29
30#include "wl1251.h" 30#include "wl1251.h"
diff --git a/drivers/net/wireless/wl12xx/wl1251_spi.c b/drivers/net/wireless/wl1251/spi.c
index 334ded9881c0..88fa8e69d0d1 100644
--- a/drivers/net/wireless/wl12xx/wl1251_spi.c
+++ b/drivers/net/wireless/wl1251/spi.c
@@ -24,11 +24,11 @@
24#include <linux/slab.h> 24#include <linux/slab.h>
25#include <linux/crc7.h> 25#include <linux/crc7.h>
26#include <linux/spi/spi.h> 26#include <linux/spi/spi.h>
27#include <linux/spi/wl12xx.h> 27#include <linux/wl12xx.h>
28 28
29#include "wl1251.h" 29#include "wl1251.h"
30#include "wl1251_reg.h" 30#include "reg.h"
31#include "wl1251_spi.h" 31#include "spi.h"
32 32
33static irqreturn_t wl1251_irq(int irq, void *cookie) 33static irqreturn_t wl1251_irq(int irq, void *cookie)
34{ 34{
diff --git a/drivers/net/wireless/wl12xx/wl1251_spi.h b/drivers/net/wireless/wl1251/spi.h
index 7dcf3cf7ae40..16d506955cc0 100644
--- a/drivers/net/wireless/wl12xx/wl1251_spi.h
+++ b/drivers/net/wireless/wl1251/spi.h
@@ -23,9 +23,9 @@
23#ifndef __WL1251_SPI_H__ 23#ifndef __WL1251_SPI_H__
24#define __WL1251_SPI_H__ 24#define __WL1251_SPI_H__
25 25
26#include "wl1251_cmd.h" 26#include "cmd.h"
27#include "wl1251_acx.h" 27#include "acx.h"
28#include "wl1251_reg.h" 28#include "reg.h"
29 29
30#define WSPI_CMD_READ 0x40000000 30#define WSPI_CMD_READ 0x40000000
31#define WSPI_CMD_WRITE 0x00000000 31#define WSPI_CMD_WRITE 0x00000000
diff --git a/drivers/net/wireless/wl12xx/wl1251_tx.c b/drivers/net/wireless/wl1251/tx.c
index 388492a7f41f..554b4f9a3d3e 100644
--- a/drivers/net/wireless/wl12xx/wl1251_tx.c
+++ b/drivers/net/wireless/wl1251/tx.c
@@ -24,10 +24,10 @@
24#include <linux/module.h> 24#include <linux/module.h>
25 25
26#include "wl1251.h" 26#include "wl1251.h"
27#include "wl1251_reg.h" 27#include "reg.h"
28#include "wl1251_tx.h" 28#include "tx.h"
29#include "wl1251_ps.h" 29#include "ps.h"
30#include "wl1251_io.h" 30#include "io.h"
31 31
32static bool wl1251_tx_double_buffer_busy(struct wl1251 *wl, u32 data_out_count) 32static bool wl1251_tx_double_buffer_busy(struct wl1251 *wl, u32 data_out_count)
33{ 33{
diff --git a/drivers/net/wireless/wl12xx/wl1251_tx.h b/drivers/net/wireless/wl1251/tx.h
index 96011e78cd5a..81338d39b43e 100644
--- a/drivers/net/wireless/wl12xx/wl1251_tx.h
+++ b/drivers/net/wireless/wl1251/tx.h
@@ -24,7 +24,7 @@
24#define __WL1251_TX_H__ 24#define __WL1251_TX_H__
25 25
26#include <linux/bitops.h> 26#include <linux/bitops.h>
27#include "wl1251_acx.h" 27#include "acx.h"
28 28
29/* 29/*
30 * 30 *
diff --git a/drivers/net/wireless/wl12xx/wl1251.h b/drivers/net/wireless/wl1251/wl1251.h
index e113d4c1fb35..e113d4c1fb35 100644
--- a/drivers/net/wireless/wl12xx/wl1251.h
+++ b/drivers/net/wireless/wl1251/wl1251.h
diff --git a/drivers/net/wireless/wl1251/wl12xx_80211.h b/drivers/net/wireless/wl1251/wl12xx_80211.h
new file mode 100644
index 000000000000..184628027213
--- /dev/null
+++ b/drivers/net/wireless/wl1251/wl12xx_80211.h
@@ -0,0 +1,156 @@
1#ifndef __WL12XX_80211_H__
2#define __WL12XX_80211_H__
3
4#include <linux/if_ether.h> /* ETH_ALEN */
5
6/* RATES */
7#define IEEE80211_CCK_RATE_1MB 0x02
8#define IEEE80211_CCK_RATE_2MB 0x04
9#define IEEE80211_CCK_RATE_5MB 0x0B
10#define IEEE80211_CCK_RATE_11MB 0x16
11#define IEEE80211_OFDM_RATE_6MB 0x0C
12#define IEEE80211_OFDM_RATE_9MB 0x12
13#define IEEE80211_OFDM_RATE_12MB 0x18
14#define IEEE80211_OFDM_RATE_18MB 0x24
15#define IEEE80211_OFDM_RATE_24MB 0x30
16#define IEEE80211_OFDM_RATE_36MB 0x48
17#define IEEE80211_OFDM_RATE_48MB 0x60
18#define IEEE80211_OFDM_RATE_54MB 0x6C
19#define IEEE80211_BASIC_RATE_MASK 0x80
20
21#define IEEE80211_CCK_RATE_1MB_MASK (1<<0)
22#define IEEE80211_CCK_RATE_2MB_MASK (1<<1)
23#define IEEE80211_CCK_RATE_5MB_MASK (1<<2)
24#define IEEE80211_CCK_RATE_11MB_MASK (1<<3)
25#define IEEE80211_OFDM_RATE_6MB_MASK (1<<4)
26#define IEEE80211_OFDM_RATE_9MB_MASK (1<<5)
27#define IEEE80211_OFDM_RATE_12MB_MASK (1<<6)
28#define IEEE80211_OFDM_RATE_18MB_MASK (1<<7)
29#define IEEE80211_OFDM_RATE_24MB_MASK (1<<8)
30#define IEEE80211_OFDM_RATE_36MB_MASK (1<<9)
31#define IEEE80211_OFDM_RATE_48MB_MASK (1<<10)
32#define IEEE80211_OFDM_RATE_54MB_MASK (1<<11)
33
34#define IEEE80211_CCK_RATES_MASK 0x0000000F
35#define IEEE80211_CCK_BASIC_RATES_MASK (IEEE80211_CCK_RATE_1MB_MASK | \
36 IEEE80211_CCK_RATE_2MB_MASK)
37#define IEEE80211_CCK_DEFAULT_RATES_MASK (IEEE80211_CCK_BASIC_RATES_MASK | \
38 IEEE80211_CCK_RATE_5MB_MASK | \
39 IEEE80211_CCK_RATE_11MB_MASK)
40
41#define IEEE80211_OFDM_RATES_MASK 0x00000FF0
42#define IEEE80211_OFDM_BASIC_RATES_MASK (IEEE80211_OFDM_RATE_6MB_MASK | \
43 IEEE80211_OFDM_RATE_12MB_MASK | \
44 IEEE80211_OFDM_RATE_24MB_MASK)
45#define IEEE80211_OFDM_DEFAULT_RATES_MASK (IEEE80211_OFDM_BASIC_RATES_MASK | \
46 IEEE80211_OFDM_RATE_9MB_MASK | \
47 IEEE80211_OFDM_RATE_18MB_MASK | \
48 IEEE80211_OFDM_RATE_36MB_MASK | \
49 IEEE80211_OFDM_RATE_48MB_MASK | \
50 IEEE80211_OFDM_RATE_54MB_MASK)
51#define IEEE80211_DEFAULT_RATES_MASK (IEEE80211_OFDM_DEFAULT_RATES_MASK | \
52 IEEE80211_CCK_DEFAULT_RATES_MASK)
53
54
55/* This really should be 8, but not for our firmware */
56#define MAX_SUPPORTED_RATES 32
57#define COUNTRY_STRING_LEN 3
58#define MAX_COUNTRY_TRIPLETS 32
59
60/* Headers */
61struct ieee80211_header {
62 __le16 frame_ctl;
63 __le16 duration_id;
64 u8 da[ETH_ALEN];
65 u8 sa[ETH_ALEN];
66 u8 bssid[ETH_ALEN];
67 __le16 seq_ctl;
68 u8 payload[0];
69} __packed;
70
71struct wl12xx_ie_header {
72 u8 id;
73 u8 len;
74} __packed;
75
76/* IEs */
77
78struct wl12xx_ie_ssid {
79 struct wl12xx_ie_header header;
80 char ssid[IW_ESSID_MAX_SIZE];
81} __packed;
82
83struct wl12xx_ie_rates {
84 struct wl12xx_ie_header header;
85 u8 rates[MAX_SUPPORTED_RATES];
86} __packed;
87
88struct wl12xx_ie_ds_params {
89 struct wl12xx_ie_header header;
90 u8 channel;
91} __packed;
92
93struct country_triplet {
94 u8 channel;
95 u8 num_channels;
96 u8 max_tx_power;
97} __packed;
98
99struct wl12xx_ie_country {
100 struct wl12xx_ie_header header;
101 u8 country_string[COUNTRY_STRING_LEN];
102 struct country_triplet triplets[MAX_COUNTRY_TRIPLETS];
103} __packed;
104
105
106/* Templates */
107
108struct wl12xx_beacon_template {
109 struct ieee80211_header header;
110 __le32 time_stamp[2];
111 __le16 beacon_interval;
112 __le16 capability;
113 struct wl12xx_ie_ssid ssid;
114 struct wl12xx_ie_rates rates;
115 struct wl12xx_ie_rates ext_rates;
116 struct wl12xx_ie_ds_params ds_params;
117 struct wl12xx_ie_country country;
118} __packed;
119
120struct wl12xx_null_data_template {
121 struct ieee80211_header header;
122} __packed;
123
124struct wl12xx_ps_poll_template {
125 __le16 fc;
126 __le16 aid;
127 u8 bssid[ETH_ALEN];
128 u8 ta[ETH_ALEN];
129} __packed;
130
131struct wl12xx_qos_null_data_template {
132 struct ieee80211_header header;
133 __le16 qos_ctl;
134} __packed;
135
136struct wl12xx_probe_req_template {
137 struct ieee80211_header header;
138 struct wl12xx_ie_ssid ssid;
139 struct wl12xx_ie_rates rates;
140 struct wl12xx_ie_rates ext_rates;
141} __packed;
142
143
144struct wl12xx_probe_resp_template {
145 struct ieee80211_header header;
146 __le32 time_stamp[2];
147 __le16 beacon_interval;
148 __le16 capability;
149 struct wl12xx_ie_ssid ssid;
150 struct wl12xx_ie_rates rates;
151 struct wl12xx_ie_rates ext_rates;
152 struct wl12xx_ie_ds_params ds_params;
153 struct wl12xx_ie_country country;
154} __packed;
155
156#endif
diff --git a/drivers/net/wireless/wl12xx/Kconfig b/drivers/net/wireless/wl12xx/Kconfig
index 2f98058be451..b447559f1db5 100644
--- a/drivers/net/wireless/wl12xx/Kconfig
+++ b/drivers/net/wireless/wl12xx/Kconfig
@@ -5,40 +5,6 @@ menuconfig WL12XX
5 This will enable TI wl12xx driver support. The drivers make 5 This will enable TI wl12xx driver support. The drivers make
6 use of the mac80211 stack. 6 use of the mac80211 stack.
7 7
8config WL1251
9 tristate "TI wl1251 support"
10 depends on WL12XX && GENERIC_HARDIRQS
11 select FW_LOADER
12 select CRC7
13 ---help---
14 This module adds support for wireless adapters based on
15 TI wl1251 chipset.
16
17 If you choose to build a module, it'll be called wl1251. Say
18 N if unsure.
19
20config WL1251_SPI
21 tristate "TI wl1251 SPI support"
22 depends on WL1251 && SPI_MASTER
23 ---help---
24 This module adds support for the SPI interface of adapters using
25 TI wl1251 chipset. Select this if your platform is using
26 the SPI bus.
27
28 If you choose to build a module, it'll be called wl1251_spi.
29 Say N if unsure.
30
31config WL1251_SDIO
32 tristate "TI wl1251 SDIO support"
33 depends on WL1251 && MMC
34 ---help---
35 This module adds support for the SDIO interface of adapters using
36 TI wl1251 chipset. Select this if your platform is using
37 the SDIO bus.
38
39 If you choose to build a module, it'll be called
40 wl1251_sdio. Say N if unsure.
41
42config WL1271 8config WL1271
43 tristate "TI wl1271 support" 9 tristate "TI wl1271 support"
44 depends on WL12XX && GENERIC_HARDIRQS 10 depends on WL12XX && GENERIC_HARDIRQS
@@ -74,4 +40,7 @@ config WL1271_SDIO
74 If you choose to build a module, it'll be called 40 If you choose to build a module, it'll be called
75 wl1271_sdio. Say N if unsure. 41 wl1271_sdio. Say N if unsure.
76 42
77 43config WL12XX_PLATFORM_DATA
44 bool
45 depends on WL1271_SDIO != n
46 default y
diff --git a/drivers/net/wireless/wl12xx/Makefile b/drivers/net/wireless/wl12xx/Makefile
index 078b4398ac1f..3a807444b2af 100644
--- a/drivers/net/wireless/wl12xx/Makefile
+++ b/drivers/net/wireless/wl12xx/Makefile
@@ -1,12 +1,3 @@
1wl1251-objs = wl1251_main.o wl1251_event.o \
2 wl1251_tx.o wl1251_rx.o wl1251_ps.o wl1251_cmd.o \
3 wl1251_acx.o wl1251_boot.o wl1251_init.o \
4 wl1251_debugfs.o wl1251_io.o
5
6obj-$(CONFIG_WL1251) += wl1251.o
7obj-$(CONFIG_WL1251_SPI) += wl1251_spi.o
8obj-$(CONFIG_WL1251_SDIO) += wl1251_sdio.o
9
10wl1271-objs = wl1271_main.o wl1271_cmd.o wl1271_io.o \ 1wl1271-objs = wl1271_main.o wl1271_cmd.o wl1271_io.o \
11 wl1271_event.o wl1271_tx.o wl1271_rx.o \ 2 wl1271_event.o wl1271_tx.o wl1271_rx.o \
12 wl1271_ps.o wl1271_acx.o wl1271_boot.o \ 3 wl1271_ps.o wl1271_acx.o wl1271_boot.o \
@@ -16,3 +7,6 @@ wl1271-$(CONFIG_NL80211_TESTMODE) += wl1271_testmode.o
16obj-$(CONFIG_WL1271) += wl1271.o 7obj-$(CONFIG_WL1271) += wl1271.o
17obj-$(CONFIG_WL1271_SPI) += wl1271_spi.o 8obj-$(CONFIG_WL1271_SPI) += wl1271_spi.o
18obj-$(CONFIG_WL1271_SDIO) += wl1271_sdio.o 9obj-$(CONFIG_WL1271_SDIO) += wl1271_sdio.o
10
11# small builtin driver bit
12obj-$(CONFIG_WL12XX_PLATFORM_DATA) += wl12xx_platform_data.o
diff --git a/drivers/net/wireless/wl12xx/wl1271.h b/drivers/net/wireless/wl12xx/wl1271.h
index dd3cee6ea5bb..8a4cd763e5a2 100644
--- a/drivers/net/wireless/wl12xx/wl1271.h
+++ b/drivers/net/wireless/wl12xx/wl1271.h
@@ -117,10 +117,7 @@ enum {
117#define WL1271_TX_SECURITY_LO16(s) ((u16)((s) & 0xffff)) 117#define WL1271_TX_SECURITY_LO16(s) ((u16)((s) & 0xffff))
118#define WL1271_TX_SECURITY_HI32(s) ((u32)(((s) >> 16) & 0xffffffff)) 118#define WL1271_TX_SECURITY_HI32(s) ((u32)(((s) >> 16) & 0xffffffff))
119 119
120/* 120#define WL1271_CIPHER_SUITE_GEM 0x00147201
121 * Enable/disable 802.11a support for WL1273
122 */
123#undef WL1271_80211A_ENABLED
124 121
125#define WL1271_BUSY_WORD_CNT 1 122#define WL1271_BUSY_WORD_CNT 1
126#define WL1271_BUSY_WORD_LEN (WL1271_BUSY_WORD_CNT * sizeof(u32)) 123#define WL1271_BUSY_WORD_LEN (WL1271_BUSY_WORD_CNT * sizeof(u32))
@@ -133,6 +130,8 @@ enum {
133 130
134#define ACX_TX_DESCRIPTORS 32 131#define ACX_TX_DESCRIPTORS 32
135 132
133#define WL1271_AGGR_BUFFER_SIZE (4 * PAGE_SIZE)
134
136enum wl1271_state { 135enum wl1271_state {
137 WL1271_STATE_OFF, 136 WL1271_STATE_OFF,
138 WL1271_STATE_ON, 137 WL1271_STATE_ON,
@@ -301,6 +300,7 @@ struct wl1271_rx_mem_pool_addr {
301struct wl1271_scan { 300struct wl1271_scan {
302 struct cfg80211_scan_request *req; 301 struct cfg80211_scan_request *req;
303 bool *scanned_ch; 302 bool *scanned_ch;
303 bool failed;
304 u8 state; 304 u8 state;
305 u8 ssid[IW_ESSID_MAX_SIZE+1]; 305 u8 ssid[IW_ESSID_MAX_SIZE+1];
306 size_t ssid_len; 306 size_t ssid_len;
@@ -313,7 +313,7 @@ struct wl1271_if_operations {
313 bool fixed); 313 bool fixed);
314 void (*reset)(struct wl1271 *wl); 314 void (*reset)(struct wl1271 *wl);
315 void (*init)(struct wl1271 *wl); 315 void (*init)(struct wl1271 *wl);
316 void (*power)(struct wl1271 *wl, bool enable); 316 int (*power)(struct wl1271 *wl, bool enable);
317 struct device* (*dev)(struct wl1271 *wl); 317 struct device* (*dev)(struct wl1271 *wl);
318 void (*enable_irq)(struct wl1271 *wl); 318 void (*enable_irq)(struct wl1271 *wl);
319 void (*disable_irq)(struct wl1271 *wl); 319 void (*disable_irq)(struct wl1271 *wl);
@@ -330,6 +330,7 @@ struct wl1271 {
330 330
331 void (*set_power)(bool enable); 331 void (*set_power)(bool enable);
332 int irq; 332 int irq;
333 int ref_clock;
333 334
334 spinlock_t wl_lock; 335 spinlock_t wl_lock;
335 336
@@ -349,6 +350,7 @@ struct wl1271 {
349#define WL1271_FLAG_IDLE (10) 350#define WL1271_FLAG_IDLE (10)
350#define WL1271_FLAG_IDLE_REQUESTED (11) 351#define WL1271_FLAG_IDLE_REQUESTED (11)
351#define WL1271_FLAG_PSPOLL_FAILURE (12) 352#define WL1271_FLAG_PSPOLL_FAILURE (12)
353#define WL1271_FLAG_STA_STATE_SENT (13)
352 unsigned long flags; 354 unsigned long flags;
353 355
354 struct wl1271_partition_set part; 356 struct wl1271_partition_set part;
@@ -361,6 +363,7 @@ struct wl1271 {
361 u8 *fw; 363 u8 *fw;
362 size_t fw_len; 364 size_t fw_len;
363 struct wl1271_nvs_file *nvs; 365 struct wl1271_nvs_file *nvs;
366 size_t nvs_len;
364 367
365 s8 hw_pg_ver; 368 s8 hw_pg_ver;
366 369
@@ -407,9 +410,15 @@ struct wl1271 {
407 /* Rx memory pool address */ 410 /* Rx memory pool address */
408 struct wl1271_rx_mem_pool_addr rx_mem_pool_addr; 411 struct wl1271_rx_mem_pool_addr rx_mem_pool_addr;
409 412
413 /* Intermediate buffer, used for packet aggregation */
414 u8 *aggr_buf;
415
410 /* The target interrupt mask */ 416 /* The target interrupt mask */
411 struct work_struct irq_work; 417 struct work_struct irq_work;
412 418
419 /* Hardware recovery work */
420 struct work_struct recovery_work;
421
413 /* The mbox event mask */ 422 /* The mbox event mask */
414 u32 event_mask; 423 u32 event_mask;
415 424
@@ -418,6 +427,7 @@ struct wl1271 {
418 427
419 /* Are we currently scanning */ 428 /* Are we currently scanning */
420 struct wl1271_scan scan; 429 struct wl1271_scan scan;
430 struct delayed_work scan_complete_work;
421 431
422 /* Our association ID */ 432 /* Our association ID */
423 u16 aid; 433 u16 aid;
@@ -474,6 +484,8 @@ struct wl1271 {
474 484
475 bool sg_enabled; 485 bool sg_enabled;
476 486
487 bool enable_11a;
488
477 struct list_head list; 489 struct list_head list;
478 490
479 /* Most recently reported noise in dBm */ 491 /* Most recently reported noise in dBm */
@@ -497,14 +509,4 @@ int wl1271_plt_stop(struct wl1271 *wl);
497#define WL1271_PRE_POWER_ON_SLEEP 20 /* in miliseconds */ 509#define WL1271_PRE_POWER_ON_SLEEP 20 /* in miliseconds */
498#define WL1271_POWER_ON_SLEEP 200 /* in miliseconds */ 510#define WL1271_POWER_ON_SLEEP 200 /* in miliseconds */
499 511
500static inline bool wl1271_11a_enabled(void)
501{
502 /* FIXME: this could be determined based on the NVS-INI file */
503#ifdef WL1271_80211A_ENABLED
504 return true;
505#else
506 return false;
507#endif
508}
509
510#endif 512#endif
diff --git a/drivers/net/wireless/wl12xx/wl1271_acx.c b/drivers/net/wireless/wl12xx/wl1271_acx.c
index f03ad088db8b..618993405262 100644
--- a/drivers/net/wireless/wl12xx/wl1271_acx.c
+++ b/drivers/net/wireless/wl12xx/wl1271_acx.c
@@ -86,40 +86,6 @@ out:
86 return ret; 86 return ret;
87} 87}
88 88
89int wl1271_acx_fw_version(struct wl1271 *wl, char *buf, size_t len)
90{
91 struct acx_revision *rev;
92 int ret;
93
94 wl1271_debug(DEBUG_ACX, "acx fw rev");
95
96 rev = kzalloc(sizeof(*rev), GFP_KERNEL);
97 if (!rev) {
98 ret = -ENOMEM;
99 goto out;
100 }
101
102 ret = wl1271_cmd_interrogate(wl, ACX_FW_REV, rev, sizeof(*rev));
103 if (ret < 0) {
104 wl1271_warning("ACX_FW_REV interrogate failed");
105 goto out;
106 }
107
108 /* be careful with the buffer sizes */
109 strncpy(buf, rev->fw_version, min(len, sizeof(rev->fw_version)));
110
111 /*
112 * if the firmware version string is exactly
113 * sizeof(rev->fw_version) long or fw_len is less than
114 * sizeof(rev->fw_version) it won't be null terminated
115 */
116 buf[min(len, sizeof(rev->fw_version)) - 1] = '\0';
117
118out:
119 kfree(rev);
120 return ret;
121}
122
123int wl1271_acx_tx_power(struct wl1271 *wl, int power) 89int wl1271_acx_tx_power(struct wl1271 *wl, int power)
124{ 90{
125 struct acx_current_tx_power *acx; 91 struct acx_current_tx_power *acx;
diff --git a/drivers/net/wireless/wl12xx/wl1271_acx.h b/drivers/net/wireless/wl12xx/wl1271_acx.h
index 4235bc56f750..ebb341d36e8c 100644
--- a/drivers/net/wireless/wl12xx/wl1271_acx.h
+++ b/drivers/net/wireless/wl12xx/wl1271_acx.h
@@ -100,35 +100,6 @@ struct acx_error_counter {
100 __le32 seq_num_miss; 100 __le32 seq_num_miss;
101} __packed; 101} __packed;
102 102
103struct acx_revision {
104 struct acx_header header;
105
106 /*
107 * The WiLink firmware version, an ASCII string x.x.x.x,
108 * that uniquely identifies the current firmware.
109 * The left most digit is incremented each time a
110 * significant change is made to the firmware, such as
111 * code redesign or new platform support.
112 * The second digit is incremented when major enhancements
113 * are added or major fixes are made.
114 * The third digit is incremented for each GA release.
115 * The fourth digit is incremented for each build.
116 * The first two digits identify a firmware release version,
117 * in other words, a unique set of features.
118 * The first three digits identify a GA release.
119 */
120 char fw_version[20];
121
122 /*
123 * This 4 byte field specifies the WiLink hardware version.
124 * bits 0 - 15: Reserved.
125 * bits 16 - 23: Version ID - The WiLink version ID
126 * (1 = first spin, 2 = second spin, and so on).
127 * bits 24 - 31: Chip ID - The WiLink chip ID.
128 */
129 __le32 hw_version;
130} __packed;
131
132enum wl1271_psm_mode { 103enum wl1271_psm_mode {
133 /* Active mode */ 104 /* Active mode */
134 WL1271_PSM_CAM = 0, 105 WL1271_PSM_CAM = 0,
@@ -1060,7 +1031,6 @@ enum {
1060 ACX_PEER_HT_CAP = 0x0057, 1031 ACX_PEER_HT_CAP = 0x0057,
1061 ACX_HT_BSS_OPERATION = 0x0058, 1032 ACX_HT_BSS_OPERATION = 0x0058,
1062 ACX_COEX_ACTIVITY = 0x0059, 1033 ACX_COEX_ACTIVITY = 0x0059,
1063 ACX_SET_SMART_REFLEX_DEBUG = 0x005A,
1064 ACX_SET_DCO_ITRIM_PARAMS = 0x0061, 1034 ACX_SET_DCO_ITRIM_PARAMS = 0x0061,
1065 DOT11_RX_MSDU_LIFE_TIME = 0x1004, 1035 DOT11_RX_MSDU_LIFE_TIME = 0x1004,
1066 DOT11_CUR_TX_PWR = 0x100D, 1036 DOT11_CUR_TX_PWR = 0x100D,
@@ -1077,7 +1047,6 @@ enum {
1077 1047
1078int wl1271_acx_wake_up_conditions(struct wl1271 *wl); 1048int wl1271_acx_wake_up_conditions(struct wl1271 *wl);
1079int wl1271_acx_sleep_auth(struct wl1271 *wl, u8 sleep_auth); 1049int wl1271_acx_sleep_auth(struct wl1271 *wl, u8 sleep_auth);
1080int wl1271_acx_fw_version(struct wl1271 *wl, char *buf, size_t len);
1081int wl1271_acx_tx_power(struct wl1271 *wl, int power); 1050int wl1271_acx_tx_power(struct wl1271 *wl, int power);
1082int wl1271_acx_feature_cfg(struct wl1271 *wl); 1051int wl1271_acx_feature_cfg(struct wl1271 *wl);
1083int wl1271_acx_mem_map(struct wl1271 *wl, 1052int wl1271_acx_mem_map(struct wl1271 *wl,
diff --git a/drivers/net/wireless/wl12xx/wl1271_boot.c b/drivers/net/wireless/wl12xx/wl1271_boot.c
index f36430b0336d..b91021242098 100644
--- a/drivers/net/wireless/wl12xx/wl1271_boot.c
+++ b/drivers/net/wireless/wl12xx/wl1271_boot.c
@@ -225,6 +225,28 @@ static int wl1271_boot_upload_nvs(struct wl1271 *wl)
225 if (wl->nvs == NULL) 225 if (wl->nvs == NULL)
226 return -ENODEV; 226 return -ENODEV;
227 227
228 /*
229 * FIXME: the LEGACY NVS image support (NVS's missing the 5GHz band
230 * configurations) can be removed when those NVS files stop floating
231 * around.
232 */
233 if (wl->nvs_len == sizeof(struct wl1271_nvs_file) ||
234 wl->nvs_len == WL1271_INI_LEGACY_NVS_FILE_SIZE) {
235 if (wl->nvs->general_params.dual_mode_select)
236 wl->enable_11a = true;
237 }
238
239 if (wl->nvs_len != sizeof(struct wl1271_nvs_file) &&
240 (wl->nvs_len != WL1271_INI_LEGACY_NVS_FILE_SIZE ||
241 wl->enable_11a)) {
242 wl1271_error("nvs size is not as expected: %zu != %zu",
243 wl->nvs_len, sizeof(struct wl1271_nvs_file));
244 kfree(wl->nvs);
245 wl->nvs = NULL;
246 wl->nvs_len = 0;
247 return -EILSEQ;
248 }
249
228 /* only the first part of the NVS needs to be uploaded */ 250 /* only the first part of the NVS needs to be uploaded */
229 nvs_len = sizeof(wl->nvs->nvs); 251 nvs_len = sizeof(wl->nvs->nvs);
230 nvs_ptr = (u8 *)wl->nvs->nvs; 252 nvs_ptr = (u8 *)wl->nvs->nvs;
@@ -251,8 +273,10 @@ static int wl1271_boot_upload_nvs(struct wl1271 *wl)
251 burst_len = nvs_ptr[0]; 273 burst_len = nvs_ptr[0];
252 dest_addr = (nvs_ptr[1] & 0xfe) | ((u32)(nvs_ptr[2] << 8)); 274 dest_addr = (nvs_ptr[1] & 0xfe) | ((u32)(nvs_ptr[2] << 8));
253 275
254 /* FIXME: Due to our new wl1271_translate_reg_addr function, 276 /*
255 we need to add the REGISTER_BASE to the destination */ 277 * Due to our new wl1271_translate_reg_addr function,
278 * we need to add the REGISTER_BASE to the destination
279 */
256 dest_addr += REGISTERS_BASE; 280 dest_addr += REGISTERS_BASE;
257 281
258 /* We move our pointer to the data */ 282 /* We move our pointer to the data */
@@ -274,31 +298,21 @@ static int wl1271_boot_upload_nvs(struct wl1271 *wl)
274 298
275 /* 299 /*
276 * We've reached the first zero length, the first NVS table 300 * We've reached the first zero length, the first NVS table
277 * is 7 bytes further. 301 * is located at an aligned offset which is at least 7 bytes further.
278 */ 302 */
279 nvs_ptr += 7; 303 nvs_ptr = (u8 *)wl->nvs->nvs +
304 ALIGN(nvs_ptr - (u8 *)wl->nvs->nvs + 7, 4);
280 nvs_len -= nvs_ptr - (u8 *)wl->nvs->nvs; 305 nvs_len -= nvs_ptr - (u8 *)wl->nvs->nvs;
281 nvs_len = ALIGN(nvs_len, 4);
282 306
283 /* FIXME: The driver sets the partition here, but this is not needed,
284 since it sets to the same one as currently in use */
285 /* Now we must set the partition correctly */ 307 /* Now we must set the partition correctly */
286 wl1271_set_partition(wl, &part_table[PART_WORK]); 308 wl1271_set_partition(wl, &part_table[PART_WORK]);
287 309
288 /* Copy the NVS tables to a new block to ensure alignment */ 310 /* Copy the NVS tables to a new block to ensure alignment */
289 /* FIXME: We jump 3 more bytes before uploading the NVS. It seems 311 nvs_aligned = kmemdup(nvs_ptr, nvs_len, GFP_KERNEL);
290 that our NVS files have three extra zeros here. I'm not sure whether 312 if (!nvs_aligned)
291 the problem is in our NVS generation or we should really jumpt these 313 return -ENOMEM;
292 3 bytes here */
293 nvs_ptr += 3;
294
295 nvs_aligned = kmemdup(nvs_ptr, nvs_len, GFP_KERNEL); if
296 (!nvs_aligned) return -ENOMEM;
297 314
298 /* And finally we upload the NVS tables */ 315 /* And finally we upload the NVS tables */
299 /* FIXME: In wl1271, we upload everything at once.
300 No endianness handling needed here?! The ref driver doesn't do
301 anything about it at this point */
302 wl1271_write(wl, CMD_MBOX_ADDRESS, nvs_aligned, nvs_len, false); 316 wl1271_write(wl, CMD_MBOX_ADDRESS, nvs_aligned, nvs_len, false);
303 317
304 kfree(nvs_aligned); 318 kfree(nvs_aligned);
@@ -457,17 +471,20 @@ int wl1271_boot(struct wl1271 *wl)
457{ 471{
458 int ret = 0; 472 int ret = 0;
459 u32 tmp, clk, pause; 473 u32 tmp, clk, pause;
474 int ref_clock = wl->ref_clock;
460 475
461 wl1271_boot_hw_version(wl); 476 wl1271_boot_hw_version(wl);
462 477
463 if (REF_CLOCK == 0 || REF_CLOCK == 2 || REF_CLOCK == 4) 478 if (ref_clock == 0 || ref_clock == 2 || ref_clock == 4)
464 /* ref clk: 19.2/38.4/38.4-XTAL */ 479 /* ref clk: 19.2/38.4/38.4-XTAL */
465 clk = 0x3; 480 clk = 0x3;
466 else if (REF_CLOCK == 1 || REF_CLOCK == 3) 481 else if (ref_clock == 1 || ref_clock == 3)
467 /* ref clk: 26/52 */ 482 /* ref clk: 26/52 */
468 clk = 0x5; 483 clk = 0x5;
484 else
485 return -EINVAL;
469 486
470 if (REF_CLOCK != 0) { 487 if (ref_clock != 0) {
471 u16 val; 488 u16 val;
472 /* Set clock type (open drain) */ 489 /* Set clock type (open drain) */
473 val = wl1271_top_reg_read(wl, OCP_REG_CLK_TYPE); 490 val = wl1271_top_reg_read(wl, OCP_REG_CLK_TYPE);
@@ -493,10 +510,7 @@ int wl1271_boot(struct wl1271 *wl)
493 510
494 wl1271_debug(DEBUG_BOOT, "pause1 0x%x", pause); 511 wl1271_debug(DEBUG_BOOT, "pause1 0x%x", pause);
495 512
496 pause &= ~(WU_COUNTER_PAUSE_VAL); /* FIXME: This should probably be 513 pause &= ~(WU_COUNTER_PAUSE_VAL);
497 * WU_COUNTER_PAUSE_VAL instead of
498 * 0x3ff (magic number ). How does
499 * this work?! */
500 pause |= WU_COUNTER_PAUSE_VAL; 514 pause |= WU_COUNTER_PAUSE_VAL;
501 wl1271_write32(wl, WU_COUNTER_PAUSE, pause); 515 wl1271_write32(wl, WU_COUNTER_PAUSE, pause);
502 516
@@ -516,7 +530,7 @@ int wl1271_boot(struct wl1271 *wl)
516 wl1271_debug(DEBUG_BOOT, "clk2 0x%x", clk); 530 wl1271_debug(DEBUG_BOOT, "clk2 0x%x", clk);
517 531
518 /* 2 */ 532 /* 2 */
519 clk |= (REF_CLOCK << 1) << 4; 533 clk |= (ref_clock << 1) << 4;
520 wl1271_write32(wl, DRPW_SCRATCH_START, clk); 534 wl1271_write32(wl, DRPW_SCRATCH_START, clk);
521 535
522 wl1271_set_partition(wl, &part_table[PART_WORK]); 536 wl1271_set_partition(wl, &part_table[PART_WORK]);
@@ -550,7 +564,6 @@ int wl1271_boot(struct wl1271 *wl)
550 if (ret < 0) 564 if (ret < 0)
551 goto out; 565 goto out;
552 566
553 /* FIXME: Need to check whether this is really what we want */
554 wl1271_write32(wl, ACX_REG_INTERRUPT_MASK, 567 wl1271_write32(wl, ACX_REG_INTERRUPT_MASK,
555 WL1271_ACX_ALL_EVENTS_VECTOR); 568 WL1271_ACX_ALL_EVENTS_VECTOR);
556 569
diff --git a/drivers/net/wireless/wl12xx/wl1271_boot.h b/drivers/net/wireless/wl12xx/wl1271_boot.h
index f829699d597e..f73b0b15a280 100644
--- a/drivers/net/wireless/wl12xx/wl1271_boot.h
+++ b/drivers/net/wireless/wl12xx/wl1271_boot.h
@@ -46,7 +46,6 @@ struct wl1271_static_data {
46/* delay between retries */ 46/* delay between retries */
47#define INIT_LOOP_DELAY 50 47#define INIT_LOOP_DELAY 50
48 48
49#define REF_CLOCK 2
50#define WU_COUNTER_PAUSE_VAL 0x3FF 49#define WU_COUNTER_PAUSE_VAL 0x3FF
51#define WELP_ARM_COMMAND_VAL 0x4 50#define WELP_ARM_COMMAND_VAL 0x4
52 51
diff --git a/drivers/net/wireless/wl12xx/wl1271_cmd.c b/drivers/net/wireless/wl12xx/wl1271_cmd.c
index ce503ddd5a41..5d3e8485ea4e 100644
--- a/drivers/net/wireless/wl12xx/wl1271_cmd.c
+++ b/drivers/net/wireless/wl12xx/wl1271_cmd.c
@@ -94,6 +94,7 @@ int wl1271_cmd_send(struct wl1271 *wl, u16 id, void *buf, size_t len,
94 status = le16_to_cpu(cmd->status); 94 status = le16_to_cpu(cmd->status);
95 if (status != CMD_STATUS_SUCCESS) { 95 if (status != CMD_STATUS_SUCCESS) {
96 wl1271_error("command execute failure %d", status); 96 wl1271_error("command execute failure %d", status);
97 ieee80211_queue_work(wl->hw, &wl->recovery_work);
97 ret = -EIO; 98 ret = -EIO;
98 } 99 }
99 100
@@ -107,6 +108,8 @@ out:
107int wl1271_cmd_general_parms(struct wl1271 *wl) 108int wl1271_cmd_general_parms(struct wl1271 *wl)
108{ 109{
109 struct wl1271_general_parms_cmd *gen_parms; 110 struct wl1271_general_parms_cmd *gen_parms;
111 struct wl1271_ini_general_params *gp = &wl->nvs->general_params;
112 bool answer = false;
110 int ret; 113 int ret;
111 114
112 if (!wl->nvs) 115 if (!wl->nvs)
@@ -118,13 +121,24 @@ int wl1271_cmd_general_parms(struct wl1271 *wl)
118 121
119 gen_parms->test.id = TEST_CMD_INI_FILE_GENERAL_PARAM; 122 gen_parms->test.id = TEST_CMD_INI_FILE_GENERAL_PARAM;
120 123
121 memcpy(&gen_parms->general_params, &wl->nvs->general_params, 124 memcpy(&gen_parms->general_params, gp, sizeof(*gp));
122 sizeof(struct wl1271_ini_general_params));
123 125
124 ret = wl1271_cmd_test(wl, gen_parms, sizeof(*gen_parms), 0); 126 if (gp->tx_bip_fem_auto_detect)
125 if (ret < 0) 127 answer = true;
128
129 ret = wl1271_cmd_test(wl, gen_parms, sizeof(*gen_parms), answer);
130 if (ret < 0) {
126 wl1271_warning("CMD_INI_FILE_GENERAL_PARAM failed"); 131 wl1271_warning("CMD_INI_FILE_GENERAL_PARAM failed");
132 goto out;
133 }
134
135 gp->tx_bip_fem_manufacturer =
136 gen_parms->general_params.tx_bip_fem_manufacturer;
137
138 wl1271_debug(DEBUG_CMD, "FEM autodetect: %s, manufacturer: %d\n",
139 answer ? "auto" : "manual", gp->tx_bip_fem_manufacturer);
127 140
141out:
128 kfree(gen_parms); 142 kfree(gen_parms);
129 return ret; 143 return ret;
130} 144}
@@ -170,6 +184,39 @@ int wl1271_cmd_radio_parms(struct wl1271 *wl)
170 return ret; 184 return ret;
171} 185}
172 186
187int wl1271_cmd_ext_radio_parms(struct wl1271 *wl)
188{
189 struct wl1271_ext_radio_parms_cmd *ext_radio_parms;
190 struct conf_rf_settings *rf = &wl->conf.rf;
191 int ret;
192
193 if (!wl->nvs)
194 return -ENODEV;
195
196 ext_radio_parms = kzalloc(sizeof(*ext_radio_parms), GFP_KERNEL);
197 if (!ext_radio_parms)
198 return -ENOMEM;
199
200 ext_radio_parms->test.id = TEST_CMD_INI_FILE_RF_EXTENDED_PARAM;
201
202 memcpy(ext_radio_parms->tx_per_channel_power_compensation_2,
203 rf->tx_per_channel_power_compensation_2,
204 CONF_TX_PWR_COMPENSATION_LEN_2);
205 memcpy(ext_radio_parms->tx_per_channel_power_compensation_5,
206 rf->tx_per_channel_power_compensation_5,
207 CONF_TX_PWR_COMPENSATION_LEN_5);
208
209 wl1271_dump(DEBUG_CMD, "TEST_CMD_INI_FILE_EXT_RADIO_PARAM: ",
210 ext_radio_parms, sizeof(*ext_radio_parms));
211
212 ret = wl1271_cmd_test(wl, ext_radio_parms, sizeof(*ext_radio_parms), 0);
213 if (ret < 0)
214 wl1271_warning("TEST_CMD_INI_FILE_RF_EXTENDED_PARAM failed");
215
216 kfree(ext_radio_parms);
217 return ret;
218}
219
173/* 220/*
174 * Poll the mailbox event field until any of the bits in the mask is set or a 221 * Poll the mailbox event field until any of the bits in the mask is set or a
175 * timeout occurs (WL1271_EVENT_TIMEOUT in msecs) 222 * timeout occurs (WL1271_EVENT_TIMEOUT in msecs)
@@ -182,8 +229,10 @@ static int wl1271_cmd_wait_for_event(struct wl1271 *wl, u32 mask)
182 timeout = jiffies + msecs_to_jiffies(WL1271_EVENT_TIMEOUT); 229 timeout = jiffies + msecs_to_jiffies(WL1271_EVENT_TIMEOUT);
183 230
184 do { 231 do {
185 if (time_after(jiffies, timeout)) 232 if (time_after(jiffies, timeout)) {
233 ieee80211_queue_work(wl->hw, &wl->recovery_work);
186 return -ETIMEDOUT; 234 return -ETIMEDOUT;
235 }
187 236
188 msleep(1); 237 msleep(1);
189 238
@@ -390,18 +439,11 @@ out:
390 return ret; 439 return ret;
391} 440}
392 441
393int wl1271_cmd_ps_mode(struct wl1271 *wl, u8 ps_mode, bool send) 442int wl1271_cmd_ps_mode(struct wl1271 *wl, u8 ps_mode, u32 rates, bool send)
394{ 443{
395 struct wl1271_cmd_ps_params *ps_params = NULL; 444 struct wl1271_cmd_ps_params *ps_params = NULL;
396 int ret = 0; 445 int ret = 0;
397 446
398 /* FIXME: this should be in ps.c */
399 ret = wl1271_acx_wake_up_conditions(wl);
400 if (ret < 0) {
401 wl1271_error("couldn't set wake up conditions");
402 goto out;
403 }
404
405 wl1271_debug(DEBUG_CMD, "cmd set ps mode"); 447 wl1271_debug(DEBUG_CMD, "cmd set ps mode");
406 448
407 ps_params = kzalloc(sizeof(*ps_params), GFP_KERNEL); 449 ps_params = kzalloc(sizeof(*ps_params), GFP_KERNEL);
@@ -412,9 +454,9 @@ int wl1271_cmd_ps_mode(struct wl1271 *wl, u8 ps_mode, bool send)
412 454
413 ps_params->ps_mode = ps_mode; 455 ps_params->ps_mode = ps_mode;
414 ps_params->send_null_data = send; 456 ps_params->send_null_data = send;
415 ps_params->retries = 5; 457 ps_params->retries = wl->conf.conn.psm_entry_nullfunc_retries;
416 ps_params->hang_over_period = 1; 458 ps_params->hang_over_period = wl->conf.conn.psm_entry_hangover_period;
417 ps_params->null_data_rate = cpu_to_le32(wl->basic_rate_set); 459 ps_params->null_data_rate = cpu_to_le32(rates);
418 460
419 ret = wl1271_cmd_send(wl, CMD_SET_PS_MODE, ps_params, 461 ret = wl1271_cmd_send(wl, CMD_SET_PS_MODE, ps_params,
420 sizeof(*ps_params), 0); 462 sizeof(*ps_params), 0);
@@ -428,41 +470,6 @@ out:
428 return ret; 470 return ret;
429} 471}
430 472
431int wl1271_cmd_read_memory(struct wl1271 *wl, u32 addr, void *answer,
432 size_t len)
433{
434 struct cmd_read_write_memory *cmd;
435 int ret = 0;
436
437 wl1271_debug(DEBUG_CMD, "cmd read memory");
438
439 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
440 if (!cmd) {
441 ret = -ENOMEM;
442 goto out;
443 }
444
445 WARN_ON(len > MAX_READ_SIZE);
446 len = min_t(size_t, len, MAX_READ_SIZE);
447
448 cmd->addr = cpu_to_le32(addr);
449 cmd->size = cpu_to_le32(len);
450
451 ret = wl1271_cmd_send(wl, CMD_READ_MEMORY, cmd, sizeof(*cmd),
452 sizeof(*cmd));
453 if (ret < 0) {
454 wl1271_error("read memory command failed: %d", ret);
455 goto out;
456 }
457
458 /* the read command got in */
459 memcpy(answer, cmd->value, len);
460
461out:
462 kfree(cmd);
463 return ret;
464}
465
466int wl1271_cmd_template_set(struct wl1271 *wl, u16 template_id, 473int wl1271_cmd_template_set(struct wl1271 *wl, u16 template_id,
467 void *buf, size_t buf_len, int index, u32 rates) 474 void *buf, size_t buf_len, int index, u32 rates)
468{ 475{
@@ -523,7 +530,7 @@ int wl1271_cmd_build_null_data(struct wl1271 *wl)
523 } 530 }
524 531
525 ret = wl1271_cmd_template_set(wl, CMD_TEMPL_NULL_DATA, ptr, size, 0, 532 ret = wl1271_cmd_template_set(wl, CMD_TEMPL_NULL_DATA, ptr, size, 0,
526 WL1271_RATE_AUTOMATIC); 533 wl->basic_rate);
527 534
528out: 535out:
529 dev_kfree_skb(skb); 536 dev_kfree_skb(skb);
@@ -546,7 +553,7 @@ int wl1271_cmd_build_klv_null_data(struct wl1271 *wl)
546 ret = wl1271_cmd_template_set(wl, CMD_TEMPL_KLV, 553 ret = wl1271_cmd_template_set(wl, CMD_TEMPL_KLV,
547 skb->data, skb->len, 554 skb->data, skb->len,
548 CMD_TEMPL_KLV_IDX_NULL_DATA, 555 CMD_TEMPL_KLV_IDX_NULL_DATA,
549 WL1271_RATE_AUTOMATIC); 556 wl->basic_rate);
550 557
551out: 558out:
552 dev_kfree_skb(skb); 559 dev_kfree_skb(skb);
@@ -623,7 +630,7 @@ int wl1271_build_qos_null_data(struct wl1271 *wl)
623 630
624 return wl1271_cmd_template_set(wl, CMD_TEMPL_QOS_NULL_DATA, &template, 631 return wl1271_cmd_template_set(wl, CMD_TEMPL_QOS_NULL_DATA, &template,
625 sizeof(template), 0, 632 sizeof(template), 0,
626 WL1271_RATE_AUTOMATIC); 633 wl->basic_rate);
627} 634}
628 635
629int wl1271_cmd_set_default_wep_key(struct wl1271 *wl, u8 id) 636int wl1271_cmd_set_default_wep_key(struct wl1271 *wl, u8 id)
@@ -746,3 +753,31 @@ out_free:
746out: 753out:
747 return ret; 754 return ret;
748} 755}
756
757int wl1271_cmd_set_sta_state(struct wl1271 *wl)
758{
759 struct wl1271_cmd_set_sta_state *cmd;
760 int ret = 0;
761
762 wl1271_debug(DEBUG_CMD, "cmd set sta state");
763
764 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
765 if (!cmd) {
766 ret = -ENOMEM;
767 goto out;
768 }
769
770 cmd->state = WL1271_CMD_STA_STATE_CONNECTED;
771
772 ret = wl1271_cmd_send(wl, CMD_SET_STA_STATE, cmd, sizeof(*cmd), 0);
773 if (ret < 0) {
774 wl1271_error("failed to send set STA state command");
775 goto out_free;
776 }
777
778out_free:
779 kfree(cmd);
780
781out:
782 return ret;
783}
diff --git a/drivers/net/wireless/wl12xx/wl1271_cmd.h b/drivers/net/wireless/wl12xx/wl1271_cmd.h
index af577ee8eb02..a0caf4fc37b1 100644
--- a/drivers/net/wireless/wl12xx/wl1271_cmd.h
+++ b/drivers/net/wireless/wl12xx/wl1271_cmd.h
@@ -33,12 +33,13 @@ int wl1271_cmd_send(struct wl1271 *wl, u16 id, void *buf, size_t len,
33 size_t res_len); 33 size_t res_len);
34int wl1271_cmd_general_parms(struct wl1271 *wl); 34int wl1271_cmd_general_parms(struct wl1271 *wl);
35int wl1271_cmd_radio_parms(struct wl1271 *wl); 35int wl1271_cmd_radio_parms(struct wl1271 *wl);
36int wl1271_cmd_ext_radio_parms(struct wl1271 *wl);
36int wl1271_cmd_join(struct wl1271 *wl, u8 bss_type); 37int wl1271_cmd_join(struct wl1271 *wl, u8 bss_type);
37int wl1271_cmd_test(struct wl1271 *wl, void *buf, size_t buf_len, u8 answer); 38int wl1271_cmd_test(struct wl1271 *wl, void *buf, size_t buf_len, u8 answer);
38int wl1271_cmd_interrogate(struct wl1271 *wl, u16 id, void *buf, size_t len); 39int wl1271_cmd_interrogate(struct wl1271 *wl, u16 id, void *buf, size_t len);
39int wl1271_cmd_configure(struct wl1271 *wl, u16 id, void *buf, size_t len); 40int wl1271_cmd_configure(struct wl1271 *wl, u16 id, void *buf, size_t len);
40int wl1271_cmd_data_path(struct wl1271 *wl, bool enable); 41int wl1271_cmd_data_path(struct wl1271 *wl, bool enable);
41int wl1271_cmd_ps_mode(struct wl1271 *wl, u8 ps_mode, bool send); 42int wl1271_cmd_ps_mode(struct wl1271 *wl, u8 ps_mode, u32 rates, bool send);
42int wl1271_cmd_read_memory(struct wl1271 *wl, u32 addr, void *answer, 43int wl1271_cmd_read_memory(struct wl1271 *wl, u32 addr, void *answer,
43 size_t len); 44 size_t len);
44int wl1271_cmd_template_set(struct wl1271 *wl, u16 template_id, 45int wl1271_cmd_template_set(struct wl1271 *wl, u16 template_id,
@@ -55,6 +56,7 @@ int wl1271_cmd_set_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type,
55 u8 key_size, const u8 *key, const u8 *addr, 56 u8 key_size, const u8 *key, const u8 *addr,
56 u32 tx_seq_32, u16 tx_seq_16); 57 u32 tx_seq_32, u16 tx_seq_16);
57int wl1271_cmd_disconnect(struct wl1271 *wl); 58int wl1271_cmd_disconnect(struct wl1271 *wl);
59int wl1271_cmd_set_sta_state(struct wl1271 *wl);
58 60
59enum wl1271_commands { 61enum wl1271_commands {
60 CMD_INTERROGATE = 1, /*use this to read information elements*/ 62 CMD_INTERROGATE = 1, /*use this to read information elements*/
@@ -160,41 +162,6 @@ enum {
160 MAX_COMMAND_STATUS = 0xff 162 MAX_COMMAND_STATUS = 0xff
161}; 163};
162 164
163
164/*
165 * CMD_READ_MEMORY
166 *
167 * The host issues this command to read the WiLink device memory/registers.
168 *
169 * Note: The Base Band address has special handling (16 bits registers and
170 * addresses). For more information, see the hardware specification.
171 */
172/*
173 * CMD_WRITE_MEMORY
174 *
175 * The host issues this command to write the WiLink device memory/registers.
176 *
177 * The Base Band address has special handling (16 bits registers and
178 * addresses). For more information, see the hardware specification.
179 */
180#define MAX_READ_SIZE 256
181
182struct cmd_read_write_memory {
183 struct wl1271_cmd_header header;
184
185 /* The address of the memory to read from or write to.*/
186 __le32 addr;
187
188 /* The amount of data in bytes to read from or write to the WiLink
189 * device.*/
190 __le32 size;
191
192 /* The actual value read from or written to the Wilink. The source
193 of this field is the Host in WRITE command or the Wilink in READ
194 command. */
195 u8 value[MAX_READ_SIZE];
196} __packed;
197
198#define CMDMBOX_HEADER_LEN 4 165#define CMDMBOX_HEADER_LEN 4
199#define CMDMBOX_INFO_ELEM_HEADER_LEN 4 166#define CMDMBOX_INFO_ELEM_HEADER_LEN 4
200 167
@@ -313,7 +280,7 @@ enum wl1271_cmd_key_type {
313 KEY_WEP = 1, 280 KEY_WEP = 1,
314 KEY_TKIP = 2, 281 KEY_TKIP = 2,
315 KEY_AES = 3, 282 KEY_AES = 3,
316 KEY_GEM = 4 283 KEY_GEM = 4,
317}; 284};
318 285
319/* FIXME: Add description for key-types */ 286/* FIXME: Add description for key-types */
@@ -358,13 +325,14 @@ enum wl1271_channel_tune_bands {
358 WL1271_CHANNEL_TUNE_BAND_4_9 325 WL1271_CHANNEL_TUNE_BAND_4_9
359}; 326};
360 327
361#define WL1271_PD_REFERENCE_POINT_BAND_B_G 0 328#define WL1271_PD_REFERENCE_POINT_BAND_B_G 0
362 329
363#define TEST_CMD_P2G_CAL 0x02 330#define TEST_CMD_P2G_CAL 0x02
364#define TEST_CMD_CHANNEL_TUNE 0x0d 331#define TEST_CMD_CHANNEL_TUNE 0x0d
365#define TEST_CMD_UPDATE_PD_REFERENCE_POINT 0x1d 332#define TEST_CMD_UPDATE_PD_REFERENCE_POINT 0x1d
366#define TEST_CMD_INI_FILE_RADIO_PARAM 0x19 333#define TEST_CMD_INI_FILE_RADIO_PARAM 0x19
367#define TEST_CMD_INI_FILE_GENERAL_PARAM 0x1E 334#define TEST_CMD_INI_FILE_GENERAL_PARAM 0x1E
335#define TEST_CMD_INI_FILE_RF_EXTENDED_PARAM 0x26
368 336
369struct wl1271_general_parms_cmd { 337struct wl1271_general_parms_cmd {
370 struct wl1271_cmd_header header; 338 struct wl1271_cmd_header header;
@@ -397,6 +365,16 @@ struct wl1271_radio_parms_cmd {
397 u8 padding3[2]; 365 u8 padding3[2];
398} __packed; 366} __packed;
399 367
368struct wl1271_ext_radio_parms_cmd {
369 struct wl1271_cmd_header header;
370
371 struct wl1271_cmd_test_header test;
372
373 u8 tx_per_channel_power_compensation_2[CONF_TX_PWR_COMPENSATION_LEN_2];
374 u8 tx_per_channel_power_compensation_5[CONF_TX_PWR_COMPENSATION_LEN_5];
375 u8 padding[3];
376} __packed;
377
400struct wl1271_cmd_cal_channel_tune { 378struct wl1271_cmd_cal_channel_tune {
401 struct wl1271_cmd_header header; 379 struct wl1271_cmd_header header;
402 380
@@ -469,4 +447,13 @@ struct wl1271_cmd_disconnect {
469 u8 padding; 447 u8 padding;
470} __packed; 448} __packed;
471 449
450#define WL1271_CMD_STA_STATE_CONNECTED 1
451
452struct wl1271_cmd_set_sta_state {
453 struct wl1271_cmd_header header;
454
455 u8 state;
456 u8 padding[3];
457} __packed;
458
472#endif /* __WL1271_CMD_H__ */ 459#endif /* __WL1271_CMD_H__ */
diff --git a/drivers/net/wireless/wl12xx/wl1271_conf.h b/drivers/net/wireless/wl12xx/wl1271_conf.h
index 0435ffda8f73..5f78a6cb1433 100644
--- a/drivers/net/wireless/wl12xx/wl1271_conf.h
+++ b/drivers/net/wireless/wl12xx/wl1271_conf.h
@@ -595,7 +595,7 @@ struct conf_tx_ac_category {
595 u16 tx_op_limit; 595 u16 tx_op_limit;
596}; 596};
597 597
598#define CONF_TX_MAX_TID_COUNT 7 598#define CONF_TX_MAX_TID_COUNT 8
599 599
600enum { 600enum {
601 CONF_CHANNEL_TYPE_DCF = 0, /* DC/LEGACY*/ 601 CONF_CHANNEL_TYPE_DCF = 0, /* DC/LEGACY*/
@@ -912,6 +912,22 @@ struct conf_conn_settings {
912 u8 psm_entry_retries; 912 u8 psm_entry_retries;
913 913
914 /* 914 /*
915 * Specifies the maximum number of times to try transmit the PSM entry
916 * null-func frame for each PSM entry attempt
917 *
918 * Range 0 - 255
919 */
920 u8 psm_entry_nullfunc_retries;
921
922 /*
923 * Specifies the time to linger in active mode after successfully
924 * transmitting the PSM entry null-func frame.
925 *
926 * Range 0 - 255 TU's
927 */
928 u8 psm_entry_hangover_period;
929
930 /*
915 * 931 *
916 * Specifies the interval of the connection keep-alive null-func 932 * Specifies the interval of the connection keep-alive null-func
917 * frame in ms. 933 * frame in ms.
@@ -1016,6 +1032,64 @@ struct conf_roam_trigger_settings {
1016 u8 avg_weight_snr_data; 1032 u8 avg_weight_snr_data;
1017}; 1033};
1018 1034
1035struct conf_scan_settings {
1036 /*
1037 * The minimum time to wait on each channel for active scans
1038 *
1039 * Range: 0 - 65536 tu
1040 */
1041 u16 min_dwell_time_active;
1042
1043 /*
1044 * The maximum time to wait on each channel for active scans
1045 *
1046 * Range: 0 - 65536 tu
1047 */
1048 u16 max_dwell_time_active;
1049
1050 /*
1051 * The maximum time to wait on each channel for passive scans
1052 *
1053 * Range: 0 - 65536 tu
1054 */
1055 u16 min_dwell_time_passive;
1056
1057 /*
1058 * The maximum time to wait on each channel for passive scans
1059 *
1060 * Range: 0 - 65536 tu
1061 */
1062 u16 max_dwell_time_passive;
1063
1064 /*
1065 * Number of probe requests to transmit on each active scan channel
1066 *
1067 * Range: u8
1068 */
1069 u16 num_probe_reqs;
1070
1071};
1072
1073/* these are number of channels on the band divided by two, rounded up */
1074#define CONF_TX_PWR_COMPENSATION_LEN_2 7
1075#define CONF_TX_PWR_COMPENSATION_LEN_5 18
1076
1077struct conf_rf_settings {
1078 /*
1079 * Per channel power compensation for 2.4GHz
1080 *
1081 * Range: s8
1082 */
1083 u8 tx_per_channel_power_compensation_2[CONF_TX_PWR_COMPENSATION_LEN_2];
1084
1085 /*
1086 * Per channel power compensation for 5GHz
1087 *
1088 * Range: s8
1089 */
1090 u8 tx_per_channel_power_compensation_5[CONF_TX_PWR_COMPENSATION_LEN_5];
1091};
1092
1019struct conf_drv_settings { 1093struct conf_drv_settings {
1020 struct conf_sg_settings sg; 1094 struct conf_sg_settings sg;
1021 struct conf_rx_settings rx; 1095 struct conf_rx_settings rx;
@@ -1024,6 +1098,8 @@ struct conf_drv_settings {
1024 struct conf_itrim_settings itrim; 1098 struct conf_itrim_settings itrim;
1025 struct conf_pm_config_settings pm_config; 1099 struct conf_pm_config_settings pm_config;
1026 struct conf_roam_trigger_settings roam_trigger; 1100 struct conf_roam_trigger_settings roam_trigger;
1101 struct conf_scan_settings scan;
1102 struct conf_rf_settings rf;
1027}; 1103};
1028 1104
1029#endif 1105#endif
diff --git a/drivers/net/wireless/wl12xx/wl1271_event.c b/drivers/net/wireless/wl12xx/wl1271_event.c
index 25ce2cd5e3f3..7b3f50382963 100644
--- a/drivers/net/wireless/wl12xx/wl1271_event.c
+++ b/drivers/net/wireless/wl12xx/wl1271_event.c
@@ -41,6 +41,9 @@ void wl1271_pspoll_work(struct work_struct *work)
41 41
42 mutex_lock(&wl->mutex); 42 mutex_lock(&wl->mutex);
43 43
44 if (unlikely(wl->state == WL1271_STATE_OFF))
45 goto out;
46
44 if (!test_and_clear_bit(WL1271_FLAG_PSPOLL_FAILURE, &wl->flags)) 47 if (!test_and_clear_bit(WL1271_FLAG_PSPOLL_FAILURE, &wl->flags))
45 goto out; 48 goto out;
46 49
@@ -52,7 +55,7 @@ void wl1271_pspoll_work(struct work_struct *work)
52 * delivery failure occurred, and no-one changed state since, so 55 * delivery failure occurred, and no-one changed state since, so
53 * we should go back to powersave. 56 * we should go back to powersave.
54 */ 57 */
55 wl1271_ps_set_mode(wl, STATION_POWER_SAVE_MODE, true); 58 wl1271_ps_set_mode(wl, STATION_POWER_SAVE_MODE, wl->basic_rate, true);
56 59
57out: 60out:
58 mutex_unlock(&wl->mutex); 61 mutex_unlock(&wl->mutex);
@@ -70,7 +73,8 @@ static void wl1271_event_pspoll_delivery_fail(struct wl1271 *wl)
70 73
71 /* force active mode receive data from the AP */ 74 /* force active mode receive data from the AP */
72 if (test_bit(WL1271_FLAG_PSM, &wl->flags)) { 75 if (test_bit(WL1271_FLAG_PSM, &wl->flags)) {
73 ret = wl1271_ps_set_mode(wl, STATION_ACTIVE_MODE, true); 76 ret = wl1271_ps_set_mode(wl, STATION_ACTIVE_MODE,
77 wl->basic_rate, true);
74 if (ret < 0) 78 if (ret < 0)
75 return; 79 return;
76 set_bit(WL1271_FLAG_PSPOLL_FAILURE, &wl->flags); 80 set_bit(WL1271_FLAG_PSPOLL_FAILURE, &wl->flags);
@@ -91,6 +95,7 @@ static int wl1271_event_ps_report(struct wl1271 *wl,
91 bool *beacon_loss) 95 bool *beacon_loss)
92{ 96{
93 int ret = 0; 97 int ret = 0;
98 u32 total_retries = wl->conf.conn.psm_entry_retries;
94 99
95 wl1271_debug(DEBUG_EVENT, "ps_status: 0x%x", mbox->ps_status); 100 wl1271_debug(DEBUG_EVENT, "ps_status: 0x%x", mbox->ps_status);
96 101
@@ -104,10 +109,10 @@ static int wl1271_event_ps_report(struct wl1271 *wl,
104 break; 109 break;
105 } 110 }
106 111
107 if (wl->psm_entry_retry < wl->conf.conn.psm_entry_retries) { 112 if (wl->psm_entry_retry < total_retries) {
108 wl->psm_entry_retry++; 113 wl->psm_entry_retry++;
109 ret = wl1271_ps_set_mode(wl, STATION_POWER_SAVE_MODE, 114 ret = wl1271_ps_set_mode(wl, STATION_POWER_SAVE_MODE,
110 true); 115 wl->basic_rate, true);
111 } else { 116 } else {
112 wl1271_info("No ack to nullfunc from AP."); 117 wl1271_info("No ack to nullfunc from AP.");
113 wl->psm_entry_retry = 0; 118 wl->psm_entry_retry = 0;
@@ -143,7 +148,7 @@ static int wl1271_event_ps_report(struct wl1271 *wl,
143 /* make sure the firmware goes to active mode - the frame to 148 /* make sure the firmware goes to active mode - the frame to
144 be sent next will indicate to the AP, that we are active. */ 149 be sent next will indicate to the AP, that we are active. */
145 ret = wl1271_ps_set_mode(wl, STATION_ACTIVE_MODE, 150 ret = wl1271_ps_set_mode(wl, STATION_ACTIVE_MODE,
146 false); 151 wl->basic_rate, false);
147 break; 152 break;
148 case EVENT_EXIT_POWER_SAVE_SUCCESS: 153 case EVENT_EXIT_POWER_SAVE_SUCCESS:
149 default: 154 default:
diff --git a/drivers/net/wireless/wl12xx/wl1271_init.c b/drivers/net/wireless/wl12xx/wl1271_init.c
index 4447af1557f5..8044bba70ee7 100644
--- a/drivers/net/wireless/wl12xx/wl1271_init.c
+++ b/drivers/net/wireless/wl12xx/wl1271_init.c
@@ -53,6 +53,7 @@ static int wl1271_init_hwenc_config(struct wl1271 *wl)
53int wl1271_init_templates_config(struct wl1271 *wl) 53int wl1271_init_templates_config(struct wl1271 *wl)
54{ 54{
55 int ret, i; 55 int ret, i;
56 size_t size;
56 57
57 /* send empty templates for fw memory reservation */ 58 /* send empty templates for fw memory reservation */
58 ret = wl1271_cmd_template_set(wl, CMD_TEMPL_CFG_PROBE_REQ_2_4, NULL, 59 ret = wl1271_cmd_template_set(wl, CMD_TEMPL_CFG_PROBE_REQ_2_4, NULL,
@@ -61,14 +62,12 @@ int wl1271_init_templates_config(struct wl1271 *wl)
61 if (ret < 0) 62 if (ret < 0)
62 return ret; 63 return ret;
63 64
64 if (wl1271_11a_enabled()) { 65 size = sizeof(struct wl12xx_probe_req_template);
65 size_t size = sizeof(struct wl12xx_probe_req_template); 66 ret = wl1271_cmd_template_set(wl, CMD_TEMPL_CFG_PROBE_REQ_5,
66 ret = wl1271_cmd_template_set(wl, CMD_TEMPL_CFG_PROBE_REQ_5, 67 NULL, size, 0,
67 NULL, size, 0, 68 WL1271_RATE_AUTOMATIC);
68 WL1271_RATE_AUTOMATIC); 69 if (ret < 0)
69 if (ret < 0) 70 return ret;
70 return ret;
71 }
72 71
73 ret = wl1271_cmd_template_set(wl, CMD_TEMPL_NULL_DATA, NULL, 72 ret = wl1271_cmd_template_set(wl, CMD_TEMPL_NULL_DATA, NULL,
74 sizeof(struct wl12xx_null_data_template), 73 sizeof(struct wl12xx_null_data_template),
@@ -223,6 +222,10 @@ int wl1271_hw_init(struct wl1271 *wl)
223 if (ret < 0) 222 if (ret < 0)
224 return ret; 223 return ret;
225 224
225 ret = wl1271_cmd_ext_radio_parms(wl);
226 if (ret < 0)
227 return ret;
228
226 /* Template settings */ 229 /* Template settings */
227 ret = wl1271_init_templates_config(wl); 230 ret = wl1271_init_templates_config(wl);
228 if (ret < 0) 231 if (ret < 0)
@@ -291,8 +294,16 @@ int wl1271_hw_init(struct wl1271 *wl)
291 if (ret < 0) 294 if (ret < 0)
292 goto out_free_memmap; 295 goto out_free_memmap;
293 296
294 /* Default TID configuration */ 297 /* Default TID/AC configuration */
298 BUG_ON(wl->conf.tx.tid_conf_count != wl->conf.tx.ac_conf_count);
295 for (i = 0; i < wl->conf.tx.tid_conf_count; i++) { 299 for (i = 0; i < wl->conf.tx.tid_conf_count; i++) {
300 conf_ac = &wl->conf.tx.ac_conf[i];
301 ret = wl1271_acx_ac_cfg(wl, conf_ac->ac, conf_ac->cw_min,
302 conf_ac->cw_max, conf_ac->aifsn,
303 conf_ac->tx_op_limit);
304 if (ret < 0)
305 goto out_free_memmap;
306
296 conf_tid = &wl->conf.tx.tid_conf[i]; 307 conf_tid = &wl->conf.tx.tid_conf[i];
297 ret = wl1271_acx_tid_cfg(wl, conf_tid->queue_id, 308 ret = wl1271_acx_tid_cfg(wl, conf_tid->queue_id,
298 conf_tid->channel_type, 309 conf_tid->channel_type,
@@ -305,16 +316,6 @@ int wl1271_hw_init(struct wl1271 *wl)
305 goto out_free_memmap; 316 goto out_free_memmap;
306 } 317 }
307 318
308 /* Default AC configuration */
309 for (i = 0; i < wl->conf.tx.ac_conf_count; i++) {
310 conf_ac = &wl->conf.tx.ac_conf[i];
311 ret = wl1271_acx_ac_cfg(wl, conf_ac->ac, conf_ac->cw_min,
312 conf_ac->cw_max, conf_ac->aifsn,
313 conf_ac->tx_op_limit);
314 if (ret < 0)
315 goto out_free_memmap;
316 }
317
318 /* Configure TX rate classes */ 319 /* Configure TX rate classes */
319 ret = wl1271_acx_rate_policies(wl); 320 ret = wl1271_acx_rate_policies(wl);
320 if (ret < 0) 321 if (ret < 0)
diff --git a/drivers/net/wireless/wl12xx/wl1271_io.h b/drivers/net/wireless/wl12xx/wl1271_io.h
index bc806c74c63a..c1f92e65ded0 100644
--- a/drivers/net/wireless/wl12xx/wl1271_io.h
+++ b/drivers/net/wireless/wl12xx/wl1271_io.h
@@ -144,10 +144,13 @@ static inline void wl1271_power_off(struct wl1271 *wl)
144 clear_bit(WL1271_FLAG_GPIO_POWER, &wl->flags); 144 clear_bit(WL1271_FLAG_GPIO_POWER, &wl->flags);
145} 145}
146 146
147static inline void wl1271_power_on(struct wl1271 *wl) 147static inline int wl1271_power_on(struct wl1271 *wl)
148{ 148{
149 wl->if_ops->power(wl, true); 149 int ret = wl->if_ops->power(wl, true);
150 set_bit(WL1271_FLAG_GPIO_POWER, &wl->flags); 150 if (ret == 0)
151 set_bit(WL1271_FLAG_GPIO_POWER, &wl->flags);
152
153 return ret;
151} 154}
152 155
153 156
diff --git a/drivers/net/wireless/wl12xx/wl1271_main.c b/drivers/net/wireless/wl12xx/wl1271_main.c
index 8e55cf8d509d..48a4b9961ae6 100644
--- a/drivers/net/wireless/wl12xx/wl1271_main.c
+++ b/drivers/net/wireless/wl12xx/wl1271_main.c
@@ -124,28 +124,28 @@ static struct conf_drv_settings default_conf = {
124 }, 124 },
125 .ac_conf_count = 4, 125 .ac_conf_count = 4,
126 .ac_conf = { 126 .ac_conf = {
127 [0] = { 127 [CONF_TX_AC_BE] = {
128 .ac = CONF_TX_AC_BE, 128 .ac = CONF_TX_AC_BE,
129 .cw_min = 15, 129 .cw_min = 15,
130 .cw_max = 63, 130 .cw_max = 63,
131 .aifsn = 3, 131 .aifsn = 3,
132 .tx_op_limit = 0, 132 .tx_op_limit = 0,
133 }, 133 },
134 [1] = { 134 [CONF_TX_AC_BK] = {
135 .ac = CONF_TX_AC_BK, 135 .ac = CONF_TX_AC_BK,
136 .cw_min = 15, 136 .cw_min = 15,
137 .cw_max = 63, 137 .cw_max = 63,
138 .aifsn = 7, 138 .aifsn = 7,
139 .tx_op_limit = 0, 139 .tx_op_limit = 0,
140 }, 140 },
141 [2] = { 141 [CONF_TX_AC_VI] = {
142 .ac = CONF_TX_AC_VI, 142 .ac = CONF_TX_AC_VI,
143 .cw_min = 15, 143 .cw_min = 15,
144 .cw_max = 63, 144 .cw_max = 63,
145 .aifsn = CONF_TX_AIFS_PIFS, 145 .aifsn = CONF_TX_AIFS_PIFS,
146 .tx_op_limit = 3008, 146 .tx_op_limit = 3008,
147 }, 147 },
148 [3] = { 148 [CONF_TX_AC_VO] = {
149 .ac = CONF_TX_AC_VO, 149 .ac = CONF_TX_AC_VO,
150 .cw_min = 15, 150 .cw_min = 15,
151 .cw_max = 63, 151 .cw_max = 63,
@@ -153,64 +153,40 @@ static struct conf_drv_settings default_conf = {
153 .tx_op_limit = 1504, 153 .tx_op_limit = 1504,
154 }, 154 },
155 }, 155 },
156 .tid_conf_count = 7, 156 .tid_conf_count = 4,
157 .tid_conf = { 157 .tid_conf = {
158 [0] = { 158 [CONF_TX_AC_BE] = {
159 .queue_id = 0, 159 .queue_id = CONF_TX_AC_BE,
160 .channel_type = CONF_CHANNEL_TYPE_DCF, 160 .channel_type = CONF_CHANNEL_TYPE_EDCF,
161 .tsid = CONF_TX_AC_BE,
162 .ps_scheme = CONF_PS_SCHEME_LEGACY,
163 .ack_policy = CONF_ACK_POLICY_LEGACY,
164 .apsd_conf = {0, 0},
165 },
166 [1] = {
167 .queue_id = 1,
168 .channel_type = CONF_CHANNEL_TYPE_DCF,
169 .tsid = CONF_TX_AC_BE, 161 .tsid = CONF_TX_AC_BE,
170 .ps_scheme = CONF_PS_SCHEME_LEGACY, 162 .ps_scheme = CONF_PS_SCHEME_LEGACY,
171 .ack_policy = CONF_ACK_POLICY_LEGACY, 163 .ack_policy = CONF_ACK_POLICY_LEGACY,
172 .apsd_conf = {0, 0}, 164 .apsd_conf = {0, 0},
173 }, 165 },
174 [2] = { 166 [CONF_TX_AC_BK] = {
175 .queue_id = 2, 167 .queue_id = CONF_TX_AC_BK,
176 .channel_type = CONF_CHANNEL_TYPE_DCF, 168 .channel_type = CONF_CHANNEL_TYPE_EDCF,
177 .tsid = CONF_TX_AC_BE, 169 .tsid = CONF_TX_AC_BK,
178 .ps_scheme = CONF_PS_SCHEME_LEGACY, 170 .ps_scheme = CONF_PS_SCHEME_LEGACY,
179 .ack_policy = CONF_ACK_POLICY_LEGACY, 171 .ack_policy = CONF_ACK_POLICY_LEGACY,
180 .apsd_conf = {0, 0}, 172 .apsd_conf = {0, 0},
181 }, 173 },
182 [3] = { 174 [CONF_TX_AC_VI] = {
183 .queue_id = 3, 175 .queue_id = CONF_TX_AC_VI,
184 .channel_type = CONF_CHANNEL_TYPE_DCF, 176 .channel_type = CONF_CHANNEL_TYPE_EDCF,
185 .tsid = CONF_TX_AC_BE, 177 .tsid = CONF_TX_AC_VI,
186 .ps_scheme = CONF_PS_SCHEME_LEGACY,
187 .ack_policy = CONF_ACK_POLICY_LEGACY,
188 .apsd_conf = {0, 0},
189 },
190 [4] = {
191 .queue_id = 4,
192 .channel_type = CONF_CHANNEL_TYPE_DCF,
193 .tsid = CONF_TX_AC_BE,
194 .ps_scheme = CONF_PS_SCHEME_LEGACY, 178 .ps_scheme = CONF_PS_SCHEME_LEGACY,
195 .ack_policy = CONF_ACK_POLICY_LEGACY, 179 .ack_policy = CONF_ACK_POLICY_LEGACY,
196 .apsd_conf = {0, 0}, 180 .apsd_conf = {0, 0},
197 }, 181 },
198 [5] = { 182 [CONF_TX_AC_VO] = {
199 .queue_id = 5, 183 .queue_id = CONF_TX_AC_VO,
200 .channel_type = CONF_CHANNEL_TYPE_DCF, 184 .channel_type = CONF_CHANNEL_TYPE_EDCF,
201 .tsid = CONF_TX_AC_BE, 185 .tsid = CONF_TX_AC_VO,
202 .ps_scheme = CONF_PS_SCHEME_LEGACY, 186 .ps_scheme = CONF_PS_SCHEME_LEGACY,
203 .ack_policy = CONF_ACK_POLICY_LEGACY, 187 .ack_policy = CONF_ACK_POLICY_LEGACY,
204 .apsd_conf = {0, 0}, 188 .apsd_conf = {0, 0},
205 }, 189 },
206 [6] = {
207 .queue_id = 6,
208 .channel_type = CONF_CHANNEL_TYPE_DCF,
209 .tsid = CONF_TX_AC_BE,
210 .ps_scheme = CONF_PS_SCHEME_LEGACY,
211 .ack_policy = CONF_ACK_POLICY_LEGACY,
212 .apsd_conf = {0, 0},
213 }
214 }, 190 },
215 .frag_threshold = IEEE80211_MAX_FRAG_THRESHOLD, 191 .frag_threshold = IEEE80211_MAX_FRAG_THRESHOLD,
216 .tx_compl_timeout = 700, 192 .tx_compl_timeout = 700,
@@ -238,7 +214,9 @@ static struct conf_drv_settings default_conf = {
238 .ps_poll_recovery_period = 700, 214 .ps_poll_recovery_period = 700,
239 .bet_enable = CONF_BET_MODE_ENABLE, 215 .bet_enable = CONF_BET_MODE_ENABLE,
240 .bet_max_consecutive = 10, 216 .bet_max_consecutive = 10,
241 .psm_entry_retries = 3, 217 .psm_entry_retries = 5,
218 .psm_entry_nullfunc_retries = 3,
219 .psm_entry_hangover_period = 1,
242 .keep_alive_interval = 55000, 220 .keep_alive_interval = 55000,
243 .max_listen_interval = 20, 221 .max_listen_interval = 20,
244 }, 222 },
@@ -251,15 +229,34 @@ static struct conf_drv_settings default_conf = {
251 .host_fast_wakeup_support = false 229 .host_fast_wakeup_support = false
252 }, 230 },
253 .roam_trigger = { 231 .roam_trigger = {
254 /* FIXME: due to firmware bug, must use value 1 for now */
255 .trigger_pacing = 1, 232 .trigger_pacing = 1,
256 .avg_weight_rssi_beacon = 20, 233 .avg_weight_rssi_beacon = 20,
257 .avg_weight_rssi_data = 10, 234 .avg_weight_rssi_data = 10,
258 .avg_weight_snr_beacon = 20, 235 .avg_weight_snr_beacon = 20,
259 .avg_weight_snr_data = 10 236 .avg_weight_snr_data = 10
260 } 237 },
238 .scan = {
239 .min_dwell_time_active = 7500,
240 .max_dwell_time_active = 30000,
241 .min_dwell_time_passive = 30000,
242 .max_dwell_time_passive = 60000,
243 .num_probe_reqs = 2,
244 },
245 .rf = {
246 .tx_per_channel_power_compensation_2 = {
247 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
248 },
249 .tx_per_channel_power_compensation_5 = {
250 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
251 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
252 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
253 },
254 },
261}; 255};
262 256
257static void __wl1271_op_remove_interface(struct wl1271 *wl);
258
259
263static void wl1271_device_release(struct device *dev) 260static void wl1271_device_release(struct device *dev)
264{ 261{
265 262
@@ -277,6 +274,67 @@ static struct platform_device wl1271_device = {
277 274
278static LIST_HEAD(wl_list); 275static LIST_HEAD(wl_list);
279 276
277static int wl1271_dev_notify(struct notifier_block *me, unsigned long what,
278 void *arg)
279{
280 struct net_device *dev = arg;
281 struct wireless_dev *wdev;
282 struct wiphy *wiphy;
283 struct ieee80211_hw *hw;
284 struct wl1271 *wl;
285 struct wl1271 *wl_temp;
286 int ret = 0;
287
288 /* Check that this notification is for us. */
289 if (what != NETDEV_CHANGE)
290 return NOTIFY_DONE;
291
292 wdev = dev->ieee80211_ptr;
293 if (wdev == NULL)
294 return NOTIFY_DONE;
295
296 wiphy = wdev->wiphy;
297 if (wiphy == NULL)
298 return NOTIFY_DONE;
299
300 hw = wiphy_priv(wiphy);
301 if (hw == NULL)
302 return NOTIFY_DONE;
303
304 wl_temp = hw->priv;
305 list_for_each_entry(wl, &wl_list, list) {
306 if (wl == wl_temp)
307 break;
308 }
309 if (wl != wl_temp)
310 return NOTIFY_DONE;
311
312 mutex_lock(&wl->mutex);
313
314 if (wl->state == WL1271_STATE_OFF)
315 goto out;
316
317 if (!test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags))
318 goto out;
319
320 ret = wl1271_ps_elp_wakeup(wl, false);
321 if (ret < 0)
322 goto out;
323
324 if ((dev->operstate == IF_OPER_UP) &&
325 !test_and_set_bit(WL1271_FLAG_STA_STATE_SENT, &wl->flags)) {
326 wl1271_cmd_set_sta_state(wl);
327 wl1271_info("Association completed.");
328 }
329
330 wl1271_ps_elp_sleep(wl);
331
332out:
333 mutex_unlock(&wl->mutex);
334
335 return NOTIFY_OK;
336}
337
280static void wl1271_conf_init(struct wl1271 *wl) 338static void wl1271_conf_init(struct wl1271 *wl)
281{ 339{
282 340
@@ -309,6 +367,10 @@ static int wl1271_plt_init(struct wl1271 *wl)
309 if (ret < 0) 367 if (ret < 0)
310 return ret; 368 return ret;
311 369
370 ret = wl1271_cmd_ext_radio_parms(wl);
371 if (ret < 0)
372 return ret;
373
312 ret = wl1271_init_templates_config(wl); 374 ret = wl1271_init_templates_config(wl);
313 if (ret < 0) 375 if (ret < 0)
314 return ret; 376 return ret;
@@ -346,8 +408,16 @@ static int wl1271_plt_init(struct wl1271 *wl)
346 if (ret < 0) 408 if (ret < 0)
347 goto out_free_memmap; 409 goto out_free_memmap;
348 410
349 /* Default TID configuration */ 411 /* Default TID/AC configuration */
412 BUG_ON(wl->conf.tx.tid_conf_count != wl->conf.tx.ac_conf_count);
350 for (i = 0; i < wl->conf.tx.tid_conf_count; i++) { 413 for (i = 0; i < wl->conf.tx.tid_conf_count; i++) {
414 conf_ac = &wl->conf.tx.ac_conf[i];
415 ret = wl1271_acx_ac_cfg(wl, conf_ac->ac, conf_ac->cw_min,
416 conf_ac->cw_max, conf_ac->aifsn,
417 conf_ac->tx_op_limit);
418 if (ret < 0)
419 goto out_free_memmap;
420
351 conf_tid = &wl->conf.tx.tid_conf[i]; 421 conf_tid = &wl->conf.tx.tid_conf[i];
352 ret = wl1271_acx_tid_cfg(wl, conf_tid->queue_id, 422 ret = wl1271_acx_tid_cfg(wl, conf_tid->queue_id,
353 conf_tid->channel_type, 423 conf_tid->channel_type,
@@ -360,16 +430,6 @@ static int wl1271_plt_init(struct wl1271 *wl)
360 goto out_free_memmap; 430 goto out_free_memmap;
361 } 431 }
362 432
363 /* Default AC configuration */
364 for (i = 0; i < wl->conf.tx.ac_conf_count; i++) {
365 conf_ac = &wl->conf.tx.ac_conf[i];
366 ret = wl1271_acx_ac_cfg(wl, conf_ac->ac, conf_ac->cw_min,
367 conf_ac->cw_max, conf_ac->aifsn,
368 conf_ac->tx_op_limit);
369 if (ret < 0)
370 goto out_free_memmap;
371 }
372
373 /* Enable data path */ 433 /* Enable data path */
374 ret = wl1271_cmd_data_path(wl, 1); 434 ret = wl1271_cmd_data_path(wl, 1);
375 if (ret < 0) 435 if (ret < 0)
@@ -562,20 +622,6 @@ static int wl1271_fetch_nvs(struct wl1271 *wl)
562 return ret; 622 return ret;
563 } 623 }
564 624
565 /*
566 * FIXME: the LEGACY NVS image support (NVS's missing the 5GHz band
567 * configurations) can be removed when those NVS files stop floating
568 * around.
569 */
570 if (fw->size != sizeof(struct wl1271_nvs_file) &&
571 (fw->size != WL1271_INI_LEGACY_NVS_FILE_SIZE ||
572 wl1271_11a_enabled())) {
573 wl1271_error("nvs size is not as expected: %zu != %zu",
574 fw->size, sizeof(struct wl1271_nvs_file));
575 ret = -EILSEQ;
576 goto out;
577 }
578
579 wl->nvs = kmemdup(fw->data, sizeof(struct wl1271_nvs_file), GFP_KERNEL); 625 wl->nvs = kmemdup(fw->data, sizeof(struct wl1271_nvs_file), GFP_KERNEL);
580 626
581 if (!wl->nvs) { 627 if (!wl->nvs) {
@@ -584,12 +630,37 @@ static int wl1271_fetch_nvs(struct wl1271 *wl)
584 goto out; 630 goto out;
585 } 631 }
586 632
633 wl->nvs_len = fw->size;
634
587out: 635out:
588 release_firmware(fw); 636 release_firmware(fw);
589 637
590 return ret; 638 return ret;
591} 639}
592 640
641static void wl1271_recovery_work(struct work_struct *work)
642{
643 struct wl1271 *wl =
644 container_of(work, struct wl1271, recovery_work);
645
646 mutex_lock(&wl->mutex);
647
648 if (wl->state != WL1271_STATE_ON)
649 goto out;
650
651 wl1271_info("Hardware recovery in progress.");
652
653 if (test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags))
654 ieee80211_connection_loss(wl->vif);
655
656 /* reboot the chipset */
657 __wl1271_op_remove_interface(wl);
658 ieee80211_restart_hw(wl->hw);
659
660out:
661 mutex_unlock(&wl->mutex);
662}
663
593static void wl1271_fw_wakeup(struct wl1271 *wl) 664static void wl1271_fw_wakeup(struct wl1271 *wl)
594{ 665{
595 u32 elp_reg; 666 u32 elp_reg;
@@ -610,8 +681,6 @@ static int wl1271_setup(struct wl1271 *wl)
610 return -ENOMEM; 681 return -ENOMEM;
611 } 682 }
612 683
613 INIT_WORK(&wl->irq_work, wl1271_irq_work);
614 INIT_WORK(&wl->tx_work, wl1271_tx_work);
615 return 0; 684 return 0;
616} 685}
617 686
@@ -621,7 +690,9 @@ static int wl1271_chip_wakeup(struct wl1271 *wl)
621 int ret = 0; 690 int ret = 0;
622 691
623 msleep(WL1271_PRE_POWER_ON_SLEEP); 692 msleep(WL1271_PRE_POWER_ON_SLEEP);
624 wl1271_power_on(wl); 693 ret = wl1271_power_on(wl);
694 if (ret < 0)
695 goto out;
625 msleep(WL1271_POWER_ON_SLEEP); 696 msleep(WL1271_POWER_ON_SLEEP);
626 wl1271_io_reset(wl); 697 wl1271_io_reset(wl);
627 wl1271_io_init(wl); 698 wl1271_io_init(wl);
@@ -766,10 +837,12 @@ int wl1271_plt_stop(struct wl1271 *wl)
766out: 837out:
767 mutex_unlock(&wl->mutex); 838 mutex_unlock(&wl->mutex);
768 839
840 cancel_work_sync(&wl->irq_work);
841 cancel_work_sync(&wl->recovery_work);
842
769 return ret; 843 return ret;
770} 844}
771 845
772
773static int wl1271_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb) 846static int wl1271_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
774{ 847{
775 struct wl1271 *wl = hw->priv; 848 struct wl1271 *wl = hw->priv;
@@ -812,6 +885,10 @@ static int wl1271_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
812 return NETDEV_TX_OK; 885 return NETDEV_TX_OK;
813} 886}
814 887
888static struct notifier_block wl1271_dev_notifier = {
889 .notifier_call = wl1271_dev_notify,
890};
891
815static int wl1271_op_start(struct ieee80211_hw *hw) 892static int wl1271_op_start(struct ieee80211_hw *hw)
816{ 893{
817 wl1271_debug(DEBUG_MAC80211, "mac80211 start"); 894 wl1271_debug(DEBUG_MAC80211, "mac80211 start");
@@ -928,13 +1005,10 @@ out:
928 return ret; 1005 return ret;
929} 1006}
930 1007
931static void wl1271_op_remove_interface(struct ieee80211_hw *hw, 1008static void __wl1271_op_remove_interface(struct wl1271 *wl)
932 struct ieee80211_vif *vif)
933{ 1009{
934 struct wl1271 *wl = hw->priv;
935 int i; 1010 int i;
936 1011
937 mutex_lock(&wl->mutex);
938 wl1271_debug(DEBUG_MAC80211, "mac80211 remove interface"); 1012 wl1271_debug(DEBUG_MAC80211, "mac80211 remove interface");
939 1013
940 wl1271_info("down"); 1014 wl1271_info("down");
@@ -948,10 +1022,10 @@ static void wl1271_op_remove_interface(struct ieee80211_hw *hw,
948 ieee80211_enable_dyn_ps(wl->vif); 1022 ieee80211_enable_dyn_ps(wl->vif);
949 1023
950 if (wl->scan.state != WL1271_SCAN_STATE_IDLE) { 1024 if (wl->scan.state != WL1271_SCAN_STATE_IDLE) {
951 ieee80211_scan_completed(wl->hw, true);
952 wl->scan.state = WL1271_SCAN_STATE_IDLE; 1025 wl->scan.state = WL1271_SCAN_STATE_IDLE;
953 kfree(wl->scan.scanned_ch); 1026 kfree(wl->scan.scanned_ch);
954 wl->scan.scanned_ch = NULL; 1027 wl->scan.scanned_ch = NULL;
1028 ieee80211_scan_completed(wl->hw, true);
955 } 1029 }
956 1030
957 wl->state = WL1271_STATE_OFF; 1031 wl->state = WL1271_STATE_OFF;
@@ -960,9 +1034,11 @@ static void wl1271_op_remove_interface(struct ieee80211_hw *hw,
960 1034
961 mutex_unlock(&wl->mutex); 1035 mutex_unlock(&wl->mutex);
962 1036
1037 cancel_delayed_work_sync(&wl->scan_complete_work);
963 cancel_work_sync(&wl->irq_work); 1038 cancel_work_sync(&wl->irq_work);
964 cancel_work_sync(&wl->tx_work); 1039 cancel_work_sync(&wl->tx_work);
965 cancel_delayed_work_sync(&wl->pspoll_work); 1040 cancel_delayed_work_sync(&wl->pspoll_work);
1041 cancel_delayed_work_sync(&wl->elp_work);
966 1042
967 mutex_lock(&wl->mutex); 1043 mutex_lock(&wl->mutex);
968 1044
@@ -1004,8 +1080,19 @@ static void wl1271_op_remove_interface(struct ieee80211_hw *hw,
1004 wl->tx_res_if = NULL; 1080 wl->tx_res_if = NULL;
1005 kfree(wl->target_mem_map); 1081 kfree(wl->target_mem_map);
1006 wl->target_mem_map = NULL; 1082 wl->target_mem_map = NULL;
1083}
1084
1085static void wl1271_op_remove_interface(struct ieee80211_hw *hw,
1086 struct ieee80211_vif *vif)
1087{
1088 struct wl1271 *wl = hw->priv;
1007 1089
1090 mutex_lock(&wl->mutex);
1091 WARN_ON(wl->vif != vif);
1092 __wl1271_op_remove_interface(wl);
1008 mutex_unlock(&wl->mutex); 1093 mutex_unlock(&wl->mutex);
1094
1095 cancel_work_sync(&wl->recovery_work);
1009} 1096}
1010 1097
1011static void wl1271_configure_filters(struct wl1271 *wl, unsigned int filters) 1098static void wl1271_configure_filters(struct wl1271 *wl, unsigned int filters)
@@ -1287,7 +1374,7 @@ static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed)
1287 if (test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags)) { 1374 if (test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags)) {
1288 wl1271_debug(DEBUG_PSM, "psm enabled"); 1375 wl1271_debug(DEBUG_PSM, "psm enabled");
1289 ret = wl1271_ps_set_mode(wl, STATION_POWER_SAVE_MODE, 1376 ret = wl1271_ps_set_mode(wl, STATION_POWER_SAVE_MODE,
1290 true); 1377 wl->basic_rate, true);
1291 } 1378 }
1292 } else if (!(conf->flags & IEEE80211_CONF_PS) && 1379 } else if (!(conf->flags & IEEE80211_CONF_PS) &&
1293 test_bit(WL1271_FLAG_PSM_REQUESTED, &wl->flags)) { 1380 test_bit(WL1271_FLAG_PSM_REQUESTED, &wl->flags)) {
@@ -1297,7 +1384,7 @@ static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed)
1297 1384
1298 if (test_bit(WL1271_FLAG_PSM, &wl->flags)) 1385 if (test_bit(WL1271_FLAG_PSM, &wl->flags))
1299 ret = wl1271_ps_set_mode(wl, STATION_ACTIVE_MODE, 1386 ret = wl1271_ps_set_mode(wl, STATION_ACTIVE_MODE,
1300 true); 1387 wl->basic_rate, true);
1301 } 1388 }
1302 1389
1303 if (conf->power_level != wl->power_level) { 1390 if (conf->power_level != wl->power_level) {
@@ -1474,6 +1561,11 @@ static int wl1271_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
1474 tx_seq_32 = WL1271_TX_SECURITY_HI32(wl->tx_security_seq); 1561 tx_seq_32 = WL1271_TX_SECURITY_HI32(wl->tx_security_seq);
1475 tx_seq_16 = WL1271_TX_SECURITY_LO16(wl->tx_security_seq); 1562 tx_seq_16 = WL1271_TX_SECURITY_LO16(wl->tx_security_seq);
1476 break; 1563 break;
1564 case WL1271_CIPHER_SUITE_GEM:
1565 key_type = KEY_GEM;
1566 tx_seq_32 = WL1271_TX_SECURITY_HI32(wl->tx_security_seq);
1567 tx_seq_16 = WL1271_TX_SECURITY_LO16(wl->tx_security_seq);
1568 break;
1477 default: 1569 default:
1478 wl1271_error("Unknown key algo 0x%x", key_conf->cipher); 1570 wl1271_error("Unknown key algo 0x%x", key_conf->cipher);
1479 1571
@@ -1557,10 +1649,7 @@ static int wl1271_op_hw_scan(struct ieee80211_hw *hw,
1557 if (ret < 0) 1649 if (ret < 0)
1558 goto out; 1650 goto out;
1559 1651
1560 if (wl1271_11a_enabled()) 1652 ret = wl1271_scan(hw->priv, ssid, len, req);
1561 ret = wl1271_scan(hw->priv, ssid, len, req);
1562 else
1563 ret = wl1271_scan(hw->priv, ssid, len, req);
1564 1653
1565 wl1271_ps_elp_sleep(wl); 1654 wl1271_ps_elp_sleep(wl);
1566 1655
@@ -1632,7 +1721,7 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
1632 if (ret < 0) 1721 if (ret < 0)
1633 goto out; 1722 goto out;
1634 1723
1635 if ((changed && BSS_CHANGED_BEACON_INT) && 1724 if ((changed & BSS_CHANGED_BEACON_INT) &&
1636 (wl->bss_type == BSS_TYPE_IBSS)) { 1725 (wl->bss_type == BSS_TYPE_IBSS)) {
1637 wl1271_debug(DEBUG_ADHOC, "ad-hoc beacon interval updated: %d", 1726 wl1271_debug(DEBUG_ADHOC, "ad-hoc beacon interval updated: %d",
1638 bss_conf->beacon_int); 1727 bss_conf->beacon_int);
@@ -1641,7 +1730,7 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
1641 do_join = true; 1730 do_join = true;
1642 } 1731 }
1643 1732
1644 if ((changed && BSS_CHANGED_BEACON) && 1733 if ((changed & BSS_CHANGED_BEACON) &&
1645 (wl->bss_type == BSS_TYPE_IBSS)) { 1734 (wl->bss_type == BSS_TYPE_IBSS)) {
1646 struct sk_buff *beacon = ieee80211_beacon_get(hw, vif); 1735 struct sk_buff *beacon = ieee80211_beacon_get(hw, vif);
1647 1736
@@ -1775,12 +1864,15 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
1775 if (test_bit(WL1271_FLAG_PSM_REQUESTED, &wl->flags) && 1864 if (test_bit(WL1271_FLAG_PSM_REQUESTED, &wl->flags) &&
1776 !test_bit(WL1271_FLAG_PSM, &wl->flags)) { 1865 !test_bit(WL1271_FLAG_PSM, &wl->flags)) {
1777 mode = STATION_POWER_SAVE_MODE; 1866 mode = STATION_POWER_SAVE_MODE;
1778 ret = wl1271_ps_set_mode(wl, mode, true); 1867 ret = wl1271_ps_set_mode(wl, mode,
1868 wl->basic_rate,
1869 true);
1779 if (ret < 0) 1870 if (ret < 0)
1780 goto out_sleep; 1871 goto out_sleep;
1781 } 1872 }
1782 } else { 1873 } else {
1783 /* use defaults when not associated */ 1874 /* use defaults when not associated */
1875 clear_bit(WL1271_FLAG_STA_STATE_SENT, &wl->flags);
1784 clear_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags); 1876 clear_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags);
1785 wl->aid = 0; 1877 wl->aid = 0;
1786 1878
@@ -1992,21 +2084,24 @@ static struct ieee80211_rate wl1271_rates[] = {
1992 .hw_value_short = CONF_HW_BIT_RATE_54MBPS, }, 2084 .hw_value_short = CONF_HW_BIT_RATE_54MBPS, },
1993}; 2085};
1994 2086
1995/* can't be const, mac80211 writes to this */ 2087/*
2088 * Can't be const, mac80211 writes to this. The order of the channels here
2089 * is designed to improve scanning.
2090 */
1996static struct ieee80211_channel wl1271_channels[] = { 2091static struct ieee80211_channel wl1271_channels[] = {
1997 { .hw_value = 1, .center_freq = 2412, .max_power = 25 }, 2092 { .hw_value = 1, .center_freq = 2412, .max_power = 25 },
1998 { .hw_value = 2, .center_freq = 2417, .max_power = 25 },
1999 { .hw_value = 3, .center_freq = 2422, .max_power = 25 },
2000 { .hw_value = 4, .center_freq = 2427, .max_power = 25 },
2001 { .hw_value = 5, .center_freq = 2432, .max_power = 25 }, 2093 { .hw_value = 5, .center_freq = 2432, .max_power = 25 },
2002 { .hw_value = 6, .center_freq = 2437, .max_power = 25 },
2003 { .hw_value = 7, .center_freq = 2442, .max_power = 25 },
2004 { .hw_value = 8, .center_freq = 2447, .max_power = 25 },
2005 { .hw_value = 9, .center_freq = 2452, .max_power = 25 }, 2094 { .hw_value = 9, .center_freq = 2452, .max_power = 25 },
2006 { .hw_value = 10, .center_freq = 2457, .max_power = 25 },
2007 { .hw_value = 11, .center_freq = 2462, .max_power = 25 },
2008 { .hw_value = 12, .center_freq = 2467, .max_power = 25 },
2009 { .hw_value = 13, .center_freq = 2472, .max_power = 25 }, 2095 { .hw_value = 13, .center_freq = 2472, .max_power = 25 },
2096 { .hw_value = 4, .center_freq = 2427, .max_power = 25 },
2097 { .hw_value = 8, .center_freq = 2447, .max_power = 25 },
2098 { .hw_value = 12, .center_freq = 2467, .max_power = 25 },
2099 { .hw_value = 3, .center_freq = 2422, .max_power = 25 },
2100 { .hw_value = 7, .center_freq = 2442, .max_power = 25 },
2101 { .hw_value = 11, .center_freq = 2462, .max_power = 25 },
2102 { .hw_value = 2, .center_freq = 2417, .max_power = 25 },
2103 { .hw_value = 6, .center_freq = 2437, .max_power = 25 },
2104 { .hw_value = 10, .center_freq = 2457, .max_power = 25 },
2010}; 2105};
2011 2106
2012/* mapping to indexes for wl1271_rates */ 2107/* mapping to indexes for wl1271_rates */
@@ -2075,49 +2170,52 @@ static struct ieee80211_rate wl1271_rates_5ghz[] = {
2075 .hw_value_short = CONF_HW_BIT_RATE_54MBPS, }, 2170 .hw_value_short = CONF_HW_BIT_RATE_54MBPS, },
2076}; 2171};
2077 2172
2078/* 5 GHz band channels for WL1273 */ 2173/*
2174 * 5 GHz band channels for WL1273 - can't be const, mac80211 writes to this.
2175 * The order of the channels here is designed to improve scanning.
2176 */
2079static struct ieee80211_channel wl1271_channels_5ghz[] = { 2177static struct ieee80211_channel wl1271_channels_5ghz[] = {
2080 { .hw_value = 183, .center_freq = 4915}, 2178 { .hw_value = 183, .center_freq = 4915},
2081 { .hw_value = 184, .center_freq = 4920},
2082 { .hw_value = 185, .center_freq = 4925},
2083 { .hw_value = 187, .center_freq = 4935},
2084 { .hw_value = 188, .center_freq = 4940}, 2179 { .hw_value = 188, .center_freq = 4940},
2085 { .hw_value = 189, .center_freq = 4945},
2086 { .hw_value = 192, .center_freq = 4960},
2087 { .hw_value = 196, .center_freq = 4980},
2088 { .hw_value = 7, .center_freq = 5035},
2089 { .hw_value = 8, .center_freq = 5040}, 2180 { .hw_value = 8, .center_freq = 5040},
2090 { .hw_value = 9, .center_freq = 5045},
2091 { .hw_value = 11, .center_freq = 5055},
2092 { .hw_value = 12, .center_freq = 5060},
2093 { .hw_value = 16, .center_freq = 5080},
2094 { .hw_value = 34, .center_freq = 5170}, 2181 { .hw_value = 34, .center_freq = 5170},
2095 { .hw_value = 36, .center_freq = 5180},
2096 { .hw_value = 38, .center_freq = 5190},
2097 { .hw_value = 40, .center_freq = 5200},
2098 { .hw_value = 42, .center_freq = 5210},
2099 { .hw_value = 44, .center_freq = 5220}, 2182 { .hw_value = 44, .center_freq = 5220},
2100 { .hw_value = 46, .center_freq = 5230},
2101 { .hw_value = 48, .center_freq = 5240},
2102 { .hw_value = 52, .center_freq = 5260},
2103 { .hw_value = 56, .center_freq = 5280},
2104 { .hw_value = 60, .center_freq = 5300}, 2183 { .hw_value = 60, .center_freq = 5300},
2105 { .hw_value = 64, .center_freq = 5320},
2106 { .hw_value = 100, .center_freq = 5500},
2107 { .hw_value = 104, .center_freq = 5520},
2108 { .hw_value = 108, .center_freq = 5540},
2109 { .hw_value = 112, .center_freq = 5560}, 2184 { .hw_value = 112, .center_freq = 5560},
2110 { .hw_value = 116, .center_freq = 5580},
2111 { .hw_value = 120, .center_freq = 5600},
2112 { .hw_value = 124, .center_freq = 5620},
2113 { .hw_value = 128, .center_freq = 5640},
2114 { .hw_value = 132, .center_freq = 5660}, 2185 { .hw_value = 132, .center_freq = 5660},
2186 { .hw_value = 157, .center_freq = 5785},
2187 { .hw_value = 184, .center_freq = 4920},
2188 { .hw_value = 189, .center_freq = 4945},
2189 { .hw_value = 9, .center_freq = 5045},
2190 { .hw_value = 36, .center_freq = 5180},
2191 { .hw_value = 46, .center_freq = 5230},
2192 { .hw_value = 64, .center_freq = 5320},
2193 { .hw_value = 116, .center_freq = 5580},
2115 { .hw_value = 136, .center_freq = 5680}, 2194 { .hw_value = 136, .center_freq = 5680},
2195 { .hw_value = 192, .center_freq = 4960},
2196 { .hw_value = 11, .center_freq = 5055},
2197 { .hw_value = 38, .center_freq = 5190},
2198 { .hw_value = 48, .center_freq = 5240},
2199 { .hw_value = 100, .center_freq = 5500},
2200 { .hw_value = 120, .center_freq = 5600},
2116 { .hw_value = 140, .center_freq = 5700}, 2201 { .hw_value = 140, .center_freq = 5700},
2202 { .hw_value = 185, .center_freq = 4925},
2203 { .hw_value = 196, .center_freq = 4980},
2204 { .hw_value = 12, .center_freq = 5060},
2205 { .hw_value = 40, .center_freq = 5200},
2206 { .hw_value = 52, .center_freq = 5260},
2207 { .hw_value = 104, .center_freq = 5520},
2208 { .hw_value = 124, .center_freq = 5620},
2117 { .hw_value = 149, .center_freq = 5745}, 2209 { .hw_value = 149, .center_freq = 5745},
2118 { .hw_value = 153, .center_freq = 5765},
2119 { .hw_value = 157, .center_freq = 5785},
2120 { .hw_value = 161, .center_freq = 5805}, 2210 { .hw_value = 161, .center_freq = 5805},
2211 { .hw_value = 187, .center_freq = 4935},
2212 { .hw_value = 7, .center_freq = 5035},
2213 { .hw_value = 16, .center_freq = 5080},
2214 { .hw_value = 42, .center_freq = 5210},
2215 { .hw_value = 56, .center_freq = 5280},
2216 { .hw_value = 108, .center_freq = 5540},
2217 { .hw_value = 128, .center_freq = 5640},
2218 { .hw_value = 153, .center_freq = 5765},
2121 { .hw_value = 165, .center_freq = 5825}, 2219 { .hw_value = 165, .center_freq = 5825},
2122}; 2220};
2123 2221
@@ -2210,8 +2308,7 @@ static ssize_t wl1271_sysfs_show_bt_coex_state(struct device *dev,
2210 struct wl1271 *wl = dev_get_drvdata(dev); 2308 struct wl1271 *wl = dev_get_drvdata(dev);
2211 ssize_t len; 2309 ssize_t len;
2212 2310
2213 /* FIXME: what's the maximum length of buf? page size?*/ 2311 len = PAGE_SIZE;
2214 len = 500;
2215 2312
2216 mutex_lock(&wl->mutex); 2313 mutex_lock(&wl->mutex);
2217 len = snprintf(buf, len, "%d\n\n0 - off\n1 - on\n", 2314 len = snprintf(buf, len, "%d\n\n0 - off\n1 - on\n",
@@ -2272,8 +2369,7 @@ static ssize_t wl1271_sysfs_show_hw_pg_ver(struct device *dev,
2272 struct wl1271 *wl = dev_get_drvdata(dev); 2369 struct wl1271 *wl = dev_get_drvdata(dev);
2273 ssize_t len; 2370 ssize_t len;
2274 2371
2275 /* FIXME: what's the maximum length of buf? page size?*/ 2372 len = PAGE_SIZE;
2276 len = 500;
2277 2373
2278 mutex_lock(&wl->mutex); 2374 mutex_lock(&wl->mutex);
2279 if (wl->hw_pg_ver >= 0) 2375 if (wl->hw_pg_ver >= 0)
@@ -2305,6 +2401,8 @@ int wl1271_register_hw(struct wl1271 *wl)
2305 2401
2306 wl->mac80211_registered = true; 2402 wl->mac80211_registered = true;
2307 2403
2404 register_netdevice_notifier(&wl1271_dev_notifier);
2405
2308 wl1271_notice("loaded"); 2406 wl1271_notice("loaded");
2309 2407
2310 return 0; 2408 return 0;
@@ -2313,6 +2411,7 @@ EXPORT_SYMBOL_GPL(wl1271_register_hw);
2313 2411
2314void wl1271_unregister_hw(struct wl1271 *wl) 2412void wl1271_unregister_hw(struct wl1271 *wl)
2315{ 2413{
2414 unregister_netdevice_notifier(&wl1271_dev_notifier);
2316 ieee80211_unregister_hw(wl->hw); 2415 ieee80211_unregister_hw(wl->hw);
2317 wl->mac80211_registered = false; 2416 wl->mac80211_registered = false;
2318 2417
@@ -2321,6 +2420,14 @@ EXPORT_SYMBOL_GPL(wl1271_unregister_hw);
2321 2420
2322int wl1271_init_ieee80211(struct wl1271 *wl) 2421int wl1271_init_ieee80211(struct wl1271 *wl)
2323{ 2422{
2423 static const u32 cipher_suites[] = {
2424 WLAN_CIPHER_SUITE_WEP40,
2425 WLAN_CIPHER_SUITE_WEP104,
2426 WLAN_CIPHER_SUITE_TKIP,
2427 WLAN_CIPHER_SUITE_CCMP,
2428 WL1271_CIPHER_SUITE_GEM,
2429 };
2430
2324 /* The tx descriptor buffer and the TKIP space. */ 2431 /* The tx descriptor buffer and the TKIP space. */
2325 wl->hw->extra_tx_headroom = WL1271_TKIP_IV_SPACE + 2432 wl->hw->extra_tx_headroom = WL1271_TKIP_IV_SPACE +
2326 sizeof(struct wl1271_tx_hw_descr); 2433 sizeof(struct wl1271_tx_hw_descr);
@@ -2338,13 +2445,14 @@ int wl1271_init_ieee80211(struct wl1271 *wl)
2338 IEEE80211_HW_CONNECTION_MONITOR | 2445 IEEE80211_HW_CONNECTION_MONITOR |
2339 IEEE80211_HW_SUPPORTS_CQM_RSSI; 2446 IEEE80211_HW_SUPPORTS_CQM_RSSI;
2340 2447
2448 wl->hw->wiphy->cipher_suites = cipher_suites;
2449 wl->hw->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites);
2450
2341 wl->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) | 2451 wl->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
2342 BIT(NL80211_IFTYPE_ADHOC); 2452 BIT(NL80211_IFTYPE_ADHOC);
2343 wl->hw->wiphy->max_scan_ssids = 1; 2453 wl->hw->wiphy->max_scan_ssids = 1;
2344 wl->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &wl1271_band_2ghz; 2454 wl->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &wl1271_band_2ghz;
2345 2455 wl->hw->wiphy->bands[IEEE80211_BAND_5GHZ] = &wl1271_band_5ghz;
2346 if (wl1271_11a_enabled())
2347 wl->hw->wiphy->bands[IEEE80211_BAND_5GHZ] = &wl1271_band_5ghz;
2348 2456
2349 wl->hw->queues = 4; 2457 wl->hw->queues = 4;
2350 wl->hw->max_rates = 1; 2458 wl->hw->max_rates = 1;
@@ -2363,6 +2471,7 @@ struct ieee80211_hw *wl1271_alloc_hw(void)
2363 struct platform_device *plat_dev = NULL; 2471 struct platform_device *plat_dev = NULL;
2364 struct wl1271 *wl; 2472 struct wl1271 *wl;
2365 int i, ret; 2473 int i, ret;
2474 unsigned int order;
2366 2475
2367 hw = ieee80211_alloc_hw(sizeof(*wl), &wl1271_ops); 2476 hw = ieee80211_alloc_hw(sizeof(*wl), &wl1271_ops);
2368 if (!hw) { 2477 if (!hw) {
@@ -2390,6 +2499,10 @@ struct ieee80211_hw *wl1271_alloc_hw(void)
2390 2499
2391 INIT_DELAYED_WORK(&wl->elp_work, wl1271_elp_work); 2500 INIT_DELAYED_WORK(&wl->elp_work, wl1271_elp_work);
2392 INIT_DELAYED_WORK(&wl->pspoll_work, wl1271_pspoll_work); 2501 INIT_DELAYED_WORK(&wl->pspoll_work, wl1271_pspoll_work);
2502 INIT_WORK(&wl->irq_work, wl1271_irq_work);
2503 INIT_WORK(&wl->tx_work, wl1271_tx_work);
2504 INIT_WORK(&wl->recovery_work, wl1271_recovery_work);
2505 INIT_DELAYED_WORK(&wl->scan_complete_work, wl1271_scan_complete_work);
2393 wl->channel = WL1271_DEFAULT_CHANNEL; 2506 wl->channel = WL1271_DEFAULT_CHANNEL;
2394 wl->beacon_int = WL1271_DEFAULT_BEACON_INT; 2507 wl->beacon_int = WL1271_DEFAULT_BEACON_INT;
2395 wl->default_key = 0; 2508 wl->default_key = 0;
@@ -2421,11 +2534,18 @@ struct ieee80211_hw *wl1271_alloc_hw(void)
2421 2534
2422 wl1271_debugfs_init(wl); 2535 wl1271_debugfs_init(wl);
2423 2536
2537 order = get_order(WL1271_AGGR_BUFFER_SIZE);
2538 wl->aggr_buf = (u8 *)__get_free_pages(GFP_KERNEL, order);
2539 if (!wl->aggr_buf) {
2540 ret = -ENOMEM;
2541 goto err_hw;
2542 }
2543
2424 /* Register platform device */ 2544 /* Register platform device */
2425 ret = platform_device_register(wl->plat_dev); 2545 ret = platform_device_register(wl->plat_dev);
2426 if (ret) { 2546 if (ret) {
2427 wl1271_error("couldn't register platform device"); 2547 wl1271_error("couldn't register platform device");
2428 goto err_hw; 2548 goto err_aggr;
2429 } 2549 }
2430 dev_set_drvdata(&wl->plat_dev->dev, wl); 2550 dev_set_drvdata(&wl->plat_dev->dev, wl);
2431 2551
@@ -2451,6 +2571,9 @@ err_bt_coex_state:
2451err_platform: 2571err_platform:
2452 platform_device_unregister(wl->plat_dev); 2572 platform_device_unregister(wl->plat_dev);
2453 2573
2574err_aggr:
2575 free_pages((unsigned long)wl->aggr_buf, order);
2576
2454err_hw: 2577err_hw:
2455 wl1271_debugfs_exit(wl); 2578 wl1271_debugfs_exit(wl);
2456 kfree(plat_dev); 2579 kfree(plat_dev);
@@ -2467,6 +2590,8 @@ EXPORT_SYMBOL_GPL(wl1271_alloc_hw);
2467int wl1271_free_hw(struct wl1271 *wl) 2590int wl1271_free_hw(struct wl1271 *wl)
2468{ 2591{
2469 platform_device_unregister(wl->plat_dev); 2592 platform_device_unregister(wl->plat_dev);
2593 free_pages((unsigned long)wl->aggr_buf,
2594 get_order(WL1271_AGGR_BUFFER_SIZE));
2470 kfree(wl->plat_dev); 2595 kfree(wl->plat_dev);
2471 2596
2472 wl1271_debugfs_exit(wl); 2597 wl1271_debugfs_exit(wl);
diff --git a/drivers/net/wireless/wl12xx/wl1271_ps.c b/drivers/net/wireless/wl12xx/wl1271_ps.c
index a5e60e0403e5..e3c332e2f97c 100644
--- a/drivers/net/wireless/wl12xx/wl1271_ps.c
+++ b/drivers/net/wireless/wl12xx/wl1271_ps.c
@@ -39,6 +39,9 @@ void wl1271_elp_work(struct work_struct *work)
39 39
40 mutex_lock(&wl->mutex); 40 mutex_lock(&wl->mutex);
41 41
42 if (unlikely(wl->state == WL1271_STATE_OFF))
43 goto out;
44
42 if (test_bit(WL1271_FLAG_IN_ELP, &wl->flags) || 45 if (test_bit(WL1271_FLAG_IN_ELP, &wl->flags) ||
43 (!test_bit(WL1271_FLAG_PSM, &wl->flags) && 46 (!test_bit(WL1271_FLAG_PSM, &wl->flags) &&
44 !test_bit(WL1271_FLAG_IDLE, &wl->flags))) 47 !test_bit(WL1271_FLAG_IDLE, &wl->flags)))
@@ -61,7 +64,7 @@ void wl1271_ps_elp_sleep(struct wl1271 *wl)
61 test_bit(WL1271_FLAG_IDLE, &wl->flags)) { 64 test_bit(WL1271_FLAG_IDLE, &wl->flags)) {
62 cancel_delayed_work(&wl->elp_work); 65 cancel_delayed_work(&wl->elp_work);
63 ieee80211_queue_delayed_work(wl->hw, &wl->elp_work, 66 ieee80211_queue_delayed_work(wl->hw, &wl->elp_work,
64 msecs_to_jiffies(ELP_ENTRY_DELAY)); 67 msecs_to_jiffies(ELP_ENTRY_DELAY));
65 } 68 }
66} 69}
67 70
@@ -96,6 +99,7 @@ int wl1271_ps_elp_wakeup(struct wl1271 *wl, bool chip_awake)
96 &compl, msecs_to_jiffies(WL1271_WAKEUP_TIMEOUT)); 99 &compl, msecs_to_jiffies(WL1271_WAKEUP_TIMEOUT));
97 if (ret == 0) { 100 if (ret == 0) {
98 wl1271_error("ELP wakeup timeout!"); 101 wl1271_error("ELP wakeup timeout!");
102 ieee80211_queue_work(wl->hw, &wl->recovery_work);
99 ret = -ETIMEDOUT; 103 ret = -ETIMEDOUT;
100 goto err; 104 goto err;
101 } else if (ret < 0) { 105 } else if (ret < 0) {
@@ -121,7 +125,7 @@ out:
121} 125}
122 126
123int wl1271_ps_set_mode(struct wl1271 *wl, enum wl1271_cmd_ps_mode mode, 127int wl1271_ps_set_mode(struct wl1271 *wl, enum wl1271_cmd_ps_mode mode,
124 bool send) 128 u32 rates, bool send)
125{ 129{
126 int ret; 130 int ret;
127 131
@@ -129,7 +133,14 @@ int wl1271_ps_set_mode(struct wl1271 *wl, enum wl1271_cmd_ps_mode mode,
129 case STATION_POWER_SAVE_MODE: 133 case STATION_POWER_SAVE_MODE:
130 wl1271_debug(DEBUG_PSM, "entering psm"); 134 wl1271_debug(DEBUG_PSM, "entering psm");
131 135
132 ret = wl1271_cmd_ps_mode(wl, STATION_POWER_SAVE_MODE, send); 136 ret = wl1271_acx_wake_up_conditions(wl);
137 if (ret < 0) {
138 wl1271_error("couldn't set wake up conditions");
139 return ret;
140 }
141
142 ret = wl1271_cmd_ps_mode(wl, STATION_POWER_SAVE_MODE,
143 rates, send);
133 if (ret < 0) 144 if (ret < 0)
134 return ret; 145 return ret;
135 146
@@ -152,7 +163,8 @@ int wl1271_ps_set_mode(struct wl1271 *wl, enum wl1271_cmd_ps_mode mode,
152 if (ret < 0) 163 if (ret < 0)
153 return ret; 164 return ret;
154 165
155 ret = wl1271_cmd_ps_mode(wl, STATION_ACTIVE_MODE, send); 166 ret = wl1271_cmd_ps_mode(wl, STATION_ACTIVE_MODE,
167 rates, send);
156 if (ret < 0) 168 if (ret < 0)
157 return ret; 169 return ret;
158 170
diff --git a/drivers/net/wireless/wl12xx/wl1271_ps.h b/drivers/net/wireless/wl12xx/wl1271_ps.h
index 940276f517a4..6ba7b032736f 100644
--- a/drivers/net/wireless/wl12xx/wl1271_ps.h
+++ b/drivers/net/wireless/wl12xx/wl1271_ps.h
@@ -28,7 +28,7 @@
28#include "wl1271_acx.h" 28#include "wl1271_acx.h"
29 29
30int wl1271_ps_set_mode(struct wl1271 *wl, enum wl1271_cmd_ps_mode mode, 30int wl1271_ps_set_mode(struct wl1271 *wl, enum wl1271_cmd_ps_mode mode,
31 bool send); 31 u32 rates, bool send);
32void wl1271_ps_elp_sleep(struct wl1271 *wl); 32void wl1271_ps_elp_sleep(struct wl1271 *wl);
33int wl1271_ps_elp_wakeup(struct wl1271 *wl, bool chip_awake); 33int wl1271_ps_elp_wakeup(struct wl1271 *wl, bool chip_awake);
34void wl1271_elp_work(struct work_struct *work); 34void wl1271_elp_work(struct work_struct *work);
diff --git a/drivers/net/wireless/wl12xx/wl1271_rx.c b/drivers/net/wireless/wl12xx/wl1271_rx.c
index 019aa79cd9df..bea133b6e489 100644
--- a/drivers/net/wireless/wl12xx/wl1271_rx.c
+++ b/drivers/net/wireless/wl12xx/wl1271_rx.c
@@ -74,9 +74,8 @@ static void wl1271_rx_status(struct wl1271 *wl,
74 } 74 }
75} 75}
76 76
77static void wl1271_rx_handle_data(struct wl1271 *wl, u32 length) 77static int wl1271_rx_handle_data(struct wl1271 *wl, u8 *data, u32 length)
78{ 78{
79 struct ieee80211_rx_status rx_status;
80 struct wl1271_rx_descriptor *desc; 79 struct wl1271_rx_descriptor *desc;
81 struct sk_buff *skb; 80 struct sk_buff *skb;
82 u16 *fc; 81 u16 *fc;
@@ -88,16 +87,16 @@ static void wl1271_rx_handle_data(struct wl1271 *wl, u32 length)
88 * workaround this by not retrieving them at all. 87 * workaround this by not retrieving them at all.
89 */ 88 */
90 if (unlikely(wl->state == WL1271_STATE_PLT)) 89 if (unlikely(wl->state == WL1271_STATE_PLT))
91 return; 90 return -EINVAL;
92 91
93 skb = __dev_alloc_skb(length, GFP_KERNEL); 92 skb = __dev_alloc_skb(length, GFP_KERNEL);
94 if (!skb) { 93 if (!skb) {
95 wl1271_error("Couldn't allocate RX frame"); 94 wl1271_error("Couldn't allocate RX frame");
96 return; 95 return -ENOMEM;
97 } 96 }
98 97
99 buf = skb_put(skb, length); 98 buf = skb_put(skb, length);
100 wl1271_read(wl, WL1271_SLV_MEM_DATA, buf, length, true); 99 memcpy(buf, data, length);
101 100
102 /* the data read starts with the descriptor */ 101 /* the data read starts with the descriptor */
103 desc = (struct wl1271_rx_descriptor *) buf; 102 desc = (struct wl1271_rx_descriptor *) buf;
@@ -109,15 +108,16 @@ static void wl1271_rx_handle_data(struct wl1271 *wl, u32 length)
109 if ((*fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_BEACON) 108 if ((*fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_BEACON)
110 beacon = 1; 109 beacon = 1;
111 110
112 wl1271_rx_status(wl, desc, &rx_status, beacon); 111 wl1271_rx_status(wl, desc, IEEE80211_SKB_RXCB(skb), beacon);
113 112
114 wl1271_debug(DEBUG_RX, "rx skb 0x%p: %d B %s", skb, skb->len, 113 wl1271_debug(DEBUG_RX, "rx skb 0x%p: %d B %s", skb, skb->len,
115 beacon ? "beacon" : ""); 114 beacon ? "beacon" : "");
116 115
117 skb_trim(skb, skb->len - desc->pad_len); 116 skb_trim(skb, skb->len - desc->pad_len);
118 117
119 memcpy(IEEE80211_SKB_RXCB(skb), &rx_status, sizeof(rx_status));
120 ieee80211_rx_ni(wl->hw, skb); 118 ieee80211_rx_ni(wl->hw, skb);
119
120 return 0;
121} 121}
122 122
123void wl1271_rx(struct wl1271 *wl, struct wl1271_fw_status *status) 123void wl1271_rx(struct wl1271 *wl, struct wl1271_fw_status *status)
@@ -126,31 +126,60 @@ void wl1271_rx(struct wl1271 *wl, struct wl1271_fw_status *status)
126 u32 buf_size; 126 u32 buf_size;
127 u32 fw_rx_counter = status->fw_rx_counter & NUM_RX_PKT_DESC_MOD_MASK; 127 u32 fw_rx_counter = status->fw_rx_counter & NUM_RX_PKT_DESC_MOD_MASK;
128 u32 drv_rx_counter = wl->rx_counter & NUM_RX_PKT_DESC_MOD_MASK; 128 u32 drv_rx_counter = wl->rx_counter & NUM_RX_PKT_DESC_MOD_MASK;
129 u32 rx_counter;
129 u32 mem_block; 130 u32 mem_block;
131 u32 pkt_length;
132 u32 pkt_offset;
130 133
131 while (drv_rx_counter != fw_rx_counter) { 134 while (drv_rx_counter != fw_rx_counter) {
132 mem_block = wl1271_rx_get_mem_block(status, drv_rx_counter); 135 buf_size = 0;
133 buf_size = wl1271_rx_get_buf_size(status, drv_rx_counter); 136 rx_counter = drv_rx_counter;
137 while (rx_counter != fw_rx_counter) {
138 pkt_length = wl1271_rx_get_buf_size(status, rx_counter);
139 if (buf_size + pkt_length > WL1271_AGGR_BUFFER_SIZE)
140 break;
141 buf_size += pkt_length;
142 rx_counter++;
143 rx_counter &= NUM_RX_PKT_DESC_MOD_MASK;
144 }
134 145
135 if (buf_size == 0) { 146 if (buf_size == 0) {
136 wl1271_warning("received empty data"); 147 wl1271_warning("received empty data");
137 break; 148 break;
138 } 149 }
139 150
151 /*
152 * Choose the block we want to read
153 * For aggregated packets, only the first memory block should
154 * be retrieved. The FW takes care of the rest.
155 */
156 mem_block = wl1271_rx_get_mem_block(status, drv_rx_counter);
140 wl->rx_mem_pool_addr.addr = (mem_block << 8) + 157 wl->rx_mem_pool_addr.addr = (mem_block << 8) +
141 le32_to_cpu(wl_mem_map->packet_memory_pool_start); 158 le32_to_cpu(wl_mem_map->packet_memory_pool_start);
142 wl->rx_mem_pool_addr.addr_extra = 159 wl->rx_mem_pool_addr.addr_extra =
143 wl->rx_mem_pool_addr.addr + 4; 160 wl->rx_mem_pool_addr.addr + 4;
144
145 /* Choose the block we want to read */
146 wl1271_write(wl, WL1271_SLV_REG_DATA, &wl->rx_mem_pool_addr, 161 wl1271_write(wl, WL1271_SLV_REG_DATA, &wl->rx_mem_pool_addr,
147 sizeof(wl->rx_mem_pool_addr), false); 162 sizeof(wl->rx_mem_pool_addr), false);
148 163
149 wl1271_rx_handle_data(wl, buf_size); 164 /* Read all available packets at once */
150 165 wl1271_read(wl, WL1271_SLV_MEM_DATA, wl->aggr_buf,
151 wl->rx_counter++; 166 buf_size, true);
152 drv_rx_counter = wl->rx_counter & NUM_RX_PKT_DESC_MOD_MASK; 167
168 /* Split data into separate packets */
169 pkt_offset = 0;
170 while (pkt_offset < buf_size) {
171 pkt_length = wl1271_rx_get_buf_size(status,
172 drv_rx_counter);
173 if (wl1271_rx_handle_data(wl,
174 wl->aggr_buf + pkt_offset,
175 pkt_length) < 0)
176 break;
177 wl->rx_counter++;
178 drv_rx_counter++;
179 drv_rx_counter &= NUM_RX_PKT_DESC_MOD_MASK;
180 pkt_offset += pkt_length;
181 }
153 } 182 }
154 183 wl1271_write32(wl, RX_DRIVER_COUNTER_ADDRESS,
155 wl1271_write32(wl, RX_DRIVER_COUNTER_ADDRESS, wl->rx_counter); 184 cpu_to_le32(wl->rx_counter));
156} 185}
diff --git a/drivers/net/wireless/wl12xx/wl1271_scan.c b/drivers/net/wireless/wl12xx/wl1271_scan.c
index e4950c8e396e..909bb47995b6 100644
--- a/drivers/net/wireless/wl12xx/wl1271_scan.c
+++ b/drivers/net/wireless/wl12xx/wl1271_scan.c
@@ -28,11 +28,43 @@
28#include "wl1271_scan.h" 28#include "wl1271_scan.h"
29#include "wl1271_acx.h" 29#include "wl1271_acx.h"
30 30
31void wl1271_scan_complete_work(struct work_struct *work)
32{
33 struct delayed_work *dwork;
34 struct wl1271 *wl;
35
36 dwork = container_of(work, struct delayed_work, work);
37 wl = container_of(dwork, struct wl1271, scan_complete_work);
38
39 wl1271_debug(DEBUG_SCAN, "Scanning complete");
40
41 mutex_lock(&wl->mutex);
42
43 if (wl->scan.state == WL1271_SCAN_STATE_IDLE) {
44 mutex_unlock(&wl->mutex);
45 return;
46 }
47
48 wl->scan.state = WL1271_SCAN_STATE_IDLE;
49 kfree(wl->scan.scanned_ch);
50 wl->scan.scanned_ch = NULL;
51 mutex_unlock(&wl->mutex);
52
53 ieee80211_scan_completed(wl->hw, false);
54
55 if (wl->scan.failed) {
56 wl1271_info("Scan completed due to error.");
57 ieee80211_queue_work(wl->hw, &wl->recovery_work);
58 }
59}
60
61
31static int wl1271_get_scan_channels(struct wl1271 *wl, 62static int wl1271_get_scan_channels(struct wl1271 *wl,
32 struct cfg80211_scan_request *req, 63 struct cfg80211_scan_request *req,
33 struct basic_scan_channel_params *channels, 64 struct basic_scan_channel_params *channels,
34 enum ieee80211_band band, bool passive) 65 enum ieee80211_band band, bool passive)
35{ 66{
67 struct conf_scan_settings *c = &wl->conf.scan;
36 int i, j; 68 int i, j;
37 u32 flags; 69 u32 flags;
38 70
@@ -60,10 +92,17 @@ static int wl1271_get_scan_channels(struct wl1271 *wl,
60 wl1271_debug(DEBUG_SCAN, "beacon_found %d", 92 wl1271_debug(DEBUG_SCAN, "beacon_found %d",
61 req->channels[i]->beacon_found); 93 req->channels[i]->beacon_found);
62 94
63 channels[j].min_duration = 95 if (!passive) {
64 cpu_to_le32(WL1271_SCAN_CHAN_MIN_DURATION); 96 channels[j].min_duration =
65 channels[j].max_duration = 97 cpu_to_le32(c->min_dwell_time_active);
66 cpu_to_le32(WL1271_SCAN_CHAN_MAX_DURATION); 98 channels[j].max_duration =
99 cpu_to_le32(c->max_dwell_time_active);
100 } else {
101 channels[j].min_duration =
102 cpu_to_le32(c->min_dwell_time_passive);
103 channels[j].max_duration =
104 cpu_to_le32(c->max_dwell_time_passive);
105 }
67 channels[j].early_termination = 0; 106 channels[j].early_termination = 0;
68 channels[j].tx_power_att = req->channels[i]->max_power; 107 channels[j].tx_power_att = req->channels[i]->max_power;
69 channels[j].channel = req->channels[i]->hw_value; 108 channels[j].channel = req->channels[i]->hw_value;
@@ -100,8 +139,11 @@ static int wl1271_scan_send(struct wl1271 *wl, enum ieee80211_band band,
100 139
101 /* We always use high priority scans */ 140 /* We always use high priority scans */
102 scan_options = WL1271_SCAN_OPT_PRIORITY_HIGH; 141 scan_options = WL1271_SCAN_OPT_PRIORITY_HIGH;
103 if(passive) 142
143 /* No SSIDs means that we have a forced passive scan */
144 if (passive || wl->scan.req->n_ssids == 0)
104 scan_options |= WL1271_SCAN_OPT_PASSIVE; 145 scan_options |= WL1271_SCAN_OPT_PASSIVE;
146
105 cmd->params.scan_options = cpu_to_le16(scan_options); 147 cmd->params.scan_options = cpu_to_le16(scan_options);
106 148
107 cmd->params.n_ch = wl1271_get_scan_channels(wl, wl->scan.req, 149 cmd->params.n_ch = wl1271_get_scan_channels(wl, wl->scan.req,
@@ -117,7 +159,7 @@ static int wl1271_scan_send(struct wl1271 *wl, enum ieee80211_band band,
117 cmd->params.rx_filter_options = 159 cmd->params.rx_filter_options =
118 cpu_to_le32(CFG_RX_PRSP_EN | CFG_RX_MGMT_EN | CFG_RX_BCN_EN); 160 cpu_to_le32(CFG_RX_PRSP_EN | CFG_RX_MGMT_EN | CFG_RX_BCN_EN);
119 161
120 cmd->params.n_probe_reqs = WL1271_SCAN_PROBE_REQS; 162 cmd->params.n_probe_reqs = wl->conf.scan.num_probe_reqs;
121 cmd->params.tx_rate = cpu_to_le32(basic_rate); 163 cmd->params.tx_rate = cpu_to_le32(basic_rate);
122 cmd->params.tid_trigger = 0; 164 cmd->params.tid_trigger = 0;
123 cmd->params.scan_tag = WL1271_SCAN_DEFAULT_TAG; 165 cmd->params.scan_tag = WL1271_SCAN_DEFAULT_TAG;
@@ -165,7 +207,7 @@ out:
165 207
166void wl1271_scan_stm(struct wl1271 *wl) 208void wl1271_scan_stm(struct wl1271 *wl)
167{ 209{
168 int ret; 210 int ret = 0;
169 211
170 switch (wl->scan.state) { 212 switch (wl->scan.state) {
171 case WL1271_SCAN_STATE_IDLE: 213 case WL1271_SCAN_STATE_IDLE:
@@ -185,7 +227,7 @@ void wl1271_scan_stm(struct wl1271 *wl)
185 ret = wl1271_scan_send(wl, IEEE80211_BAND_2GHZ, true, 227 ret = wl1271_scan_send(wl, IEEE80211_BAND_2GHZ, true,
186 wl->conf.tx.basic_rate); 228 wl->conf.tx.basic_rate);
187 if (ret == WL1271_NOTHING_TO_SCAN) { 229 if (ret == WL1271_NOTHING_TO_SCAN) {
188 if (wl1271_11a_enabled()) 230 if (wl->enable_11a)
189 wl->scan.state = WL1271_SCAN_STATE_5GHZ_ACTIVE; 231 wl->scan.state = WL1271_SCAN_STATE_5GHZ_ACTIVE;
190 else 232 else
191 wl->scan.state = WL1271_SCAN_STATE_DONE; 233 wl->scan.state = WL1271_SCAN_STATE_DONE;
@@ -215,18 +257,22 @@ void wl1271_scan_stm(struct wl1271 *wl)
215 break; 257 break;
216 258
217 case WL1271_SCAN_STATE_DONE: 259 case WL1271_SCAN_STATE_DONE:
218 ieee80211_scan_completed(wl->hw, false); 260 wl->scan.failed = false;
219 261 cancel_delayed_work(&wl->scan_complete_work);
220 kfree(wl->scan.scanned_ch); 262 ieee80211_queue_delayed_work(wl->hw, &wl->scan_complete_work,
221 wl->scan.scanned_ch = NULL; 263 msecs_to_jiffies(0));
222
223 wl->scan.state = WL1271_SCAN_STATE_IDLE;
224 break; 264 break;
225 265
226 default: 266 default:
227 wl1271_error("invalid scan state"); 267 wl1271_error("invalid scan state");
228 break; 268 break;
229 } 269 }
270
271 if (ret < 0) {
272 cancel_delayed_work(&wl->scan_complete_work);
273 ieee80211_queue_delayed_work(wl->hw, &wl->scan_complete_work,
274 msecs_to_jiffies(0));
275 }
230} 276}
231 277
232int wl1271_scan(struct wl1271 *wl, const u8 *ssid, size_t ssid_len, 278int wl1271_scan(struct wl1271 *wl, const u8 *ssid, size_t ssid_len,
@@ -249,6 +295,11 @@ int wl1271_scan(struct wl1271 *wl, const u8 *ssid, size_t ssid_len,
249 wl->scan.scanned_ch = kcalloc(req->n_channels, 295 wl->scan.scanned_ch = kcalloc(req->n_channels,
250 sizeof(*wl->scan.scanned_ch), 296 sizeof(*wl->scan.scanned_ch),
251 GFP_KERNEL); 297 GFP_KERNEL);
298 /* we assume failure so that timeout scenarios are handled correctly */
299 wl->scan.failed = true;
300 ieee80211_queue_delayed_work(wl->hw, &wl->scan_complete_work,
301 msecs_to_jiffies(WL1271_SCAN_TIMEOUT));
302
252 wl1271_scan_stm(wl); 303 wl1271_scan_stm(wl);
253 304
254 return 0; 305 return 0;
diff --git a/drivers/net/wireless/wl12xx/wl1271_scan.h b/drivers/net/wireless/wl12xx/wl1271_scan.h
index f1815700f5f9..6d57127b5e6b 100644
--- a/drivers/net/wireless/wl12xx/wl1271_scan.h
+++ b/drivers/net/wireless/wl12xx/wl1271_scan.h
@@ -32,6 +32,7 @@ int wl1271_scan_build_probe_req(struct wl1271 *wl,
32 const u8 *ssid, size_t ssid_len, 32 const u8 *ssid, size_t ssid_len,
33 const u8 *ie, size_t ie_len, u8 band); 33 const u8 *ie, size_t ie_len, u8 band);
34void wl1271_scan_stm(struct wl1271 *wl); 34void wl1271_scan_stm(struct wl1271 *wl);
35void wl1271_scan_complete_work(struct work_struct *work);
35 36
36#define WL1271_SCAN_MAX_CHANNELS 24 37#define WL1271_SCAN_MAX_CHANNELS 24
37#define WL1271_SCAN_DEFAULT_TAG 1 38#define WL1271_SCAN_DEFAULT_TAG 1
@@ -39,11 +40,10 @@ void wl1271_scan_stm(struct wl1271 *wl);
39#define WL1271_SCAN_OPT_ACTIVE 0 40#define WL1271_SCAN_OPT_ACTIVE 0
40#define WL1271_SCAN_OPT_PASSIVE 1 41#define WL1271_SCAN_OPT_PASSIVE 1
41#define WL1271_SCAN_OPT_PRIORITY_HIGH 4 42#define WL1271_SCAN_OPT_PRIORITY_HIGH 4
42#define WL1271_SCAN_CHAN_MIN_DURATION 30000 /* TU */
43#define WL1271_SCAN_CHAN_MAX_DURATION 60000 /* TU */
44#define WL1271_SCAN_BAND_2_4_GHZ 0 43#define WL1271_SCAN_BAND_2_4_GHZ 0
45#define WL1271_SCAN_BAND_5_GHZ 1 44#define WL1271_SCAN_BAND_5_GHZ 1
46#define WL1271_SCAN_PROBE_REQS 3 45
46#define WL1271_SCAN_TIMEOUT 10000 /* msec */
47 47
48enum { 48enum {
49 WL1271_SCAN_STATE_IDLE, 49 WL1271_SCAN_STATE_IDLE,
diff --git a/drivers/net/wireless/wl12xx/wl1271_sdio.c b/drivers/net/wireless/wl12xx/wl1271_sdio.c
index 7059b5cccf0f..784ef3432641 100644
--- a/drivers/net/wireless/wl12xx/wl1271_sdio.c
+++ b/drivers/net/wireless/wl12xx/wl1271_sdio.c
@@ -29,14 +29,13 @@
29#include <linux/mmc/sdio_ids.h> 29#include <linux/mmc/sdio_ids.h>
30#include <linux/mmc/card.h> 30#include <linux/mmc/card.h>
31#include <linux/gpio.h> 31#include <linux/gpio.h>
32#include <linux/wl12xx.h>
33#include <linux/pm_runtime.h>
32 34
33#include "wl1271.h" 35#include "wl1271.h"
34#include "wl12xx_80211.h" 36#include "wl12xx_80211.h"
35#include "wl1271_io.h" 37#include "wl1271_io.h"
36 38
37
38#define RX71_WL1271_IRQ_GPIO 42
39
40#ifndef SDIO_VENDOR_ID_TI 39#ifndef SDIO_VENDOR_ID_TI
41#define SDIO_VENDOR_ID_TI 0x0097 40#define SDIO_VENDOR_ID_TI 0x0097
42#endif 41#endif
@@ -107,6 +106,8 @@ static void wl1271_sdio_raw_read(struct wl1271 *wl, int addr, void *buf,
107 int ret; 106 int ret;
108 struct sdio_func *func = wl_to_func(wl); 107 struct sdio_func *func = wl_to_func(wl);
109 108
109 sdio_claim_host(func);
110
110 if (unlikely(addr == HW_ACCESS_ELP_CTRL_REG_ADDR)) { 111 if (unlikely(addr == HW_ACCESS_ELP_CTRL_REG_ADDR)) {
111 ((u8 *)buf)[0] = sdio_f0_readb(func, addr, &ret); 112 ((u8 *)buf)[0] = sdio_f0_readb(func, addr, &ret);
112 wl1271_debug(DEBUG_SDIO, "sdio read 52 addr 0x%x, byte 0x%02x", 113 wl1271_debug(DEBUG_SDIO, "sdio read 52 addr 0x%x, byte 0x%02x",
@@ -122,9 +123,10 @@ static void wl1271_sdio_raw_read(struct wl1271 *wl, int addr, void *buf,
122 wl1271_dump_ascii(DEBUG_SDIO, "data: ", buf, len); 123 wl1271_dump_ascii(DEBUG_SDIO, "data: ", buf, len);
123 } 124 }
124 125
126 sdio_release_host(func);
127
125 if (ret) 128 if (ret)
126 wl1271_error("sdio read failed (%d)", ret); 129 wl1271_error("sdio read failed (%d)", ret);
127
128} 130}
129 131
130static void wl1271_sdio_raw_write(struct wl1271 *wl, int addr, void *buf, 132static void wl1271_sdio_raw_write(struct wl1271 *wl, int addr, void *buf,
@@ -133,6 +135,8 @@ static void wl1271_sdio_raw_write(struct wl1271 *wl, int addr, void *buf,
133 int ret; 135 int ret;
134 struct sdio_func *func = wl_to_func(wl); 136 struct sdio_func *func = wl_to_func(wl);
135 137
138 sdio_claim_host(func);
139
136 if (unlikely(addr == HW_ACCESS_ELP_CTRL_REG_ADDR)) { 140 if (unlikely(addr == HW_ACCESS_ELP_CTRL_REG_ADDR)) {
137 sdio_f0_writeb(func, ((u8 *)buf)[0], addr, &ret); 141 sdio_f0_writeb(func, ((u8 *)buf)[0], addr, &ret);
138 wl1271_debug(DEBUG_SDIO, "sdio write 52 addr 0x%x, byte 0x%02x", 142 wl1271_debug(DEBUG_SDIO, "sdio write 52 addr 0x%x, byte 0x%02x",
@@ -147,26 +151,49 @@ static void wl1271_sdio_raw_write(struct wl1271 *wl, int addr, void *buf,
147 else 151 else
148 ret = sdio_memcpy_toio(func, addr, buf, len); 152 ret = sdio_memcpy_toio(func, addr, buf, len);
149 } 153 }
154
155 sdio_release_host(func);
156
150 if (ret) 157 if (ret)
151 wl1271_error("sdio write failed (%d)", ret); 158 wl1271_error("sdio write failed (%d)", ret);
159}
152 160
161static int wl1271_sdio_power_on(struct wl1271 *wl)
162{
163 struct sdio_func *func = wl_to_func(wl);
164 int ret;
165
166 /* Power up the card */
167 ret = pm_runtime_get_sync(&func->dev);
168 if (ret < 0)
169 goto out;
170
171 sdio_claim_host(func);
172 sdio_enable_func(func);
173 sdio_release_host(func);
174
175out:
176 return ret;
153} 177}
154 178
155static void wl1271_sdio_set_power(struct wl1271 *wl, bool enable) 179static int wl1271_sdio_power_off(struct wl1271 *wl)
156{ 180{
157 struct sdio_func *func = wl_to_func(wl); 181 struct sdio_func *func = wl_to_func(wl);
158 182
159 /* Let the SDIO stack handle wlan_enable control, so we 183 sdio_claim_host(func);
160 * keep host claimed while wlan is in use to keep wl1271 184 sdio_disable_func(func);
161 * alive. 185 sdio_release_host(func);
162 */ 186
163 if (enable) { 187 /* Power down the card */
164 sdio_claim_host(func); 188 return pm_runtime_put_sync(&func->dev);
165 sdio_enable_func(func); 189}
166 } else { 190
167 sdio_disable_func(func); 191static int wl1271_sdio_set_power(struct wl1271 *wl, bool enable)
168 sdio_release_host(func); 192{
169 } 193 if (enable)
194 return wl1271_sdio_power_on(wl);
195 else
196 return wl1271_sdio_power_off(wl);
170} 197}
171 198
172static struct wl1271_if_operations sdio_ops = { 199static struct wl1271_if_operations sdio_ops = {
@@ -184,6 +211,7 @@ static int __devinit wl1271_probe(struct sdio_func *func,
184 const struct sdio_device_id *id) 211 const struct sdio_device_id *id)
185{ 212{
186 struct ieee80211_hw *hw; 213 struct ieee80211_hw *hw;
214 const struct wl12xx_platform_data *wlan_data;
187 struct wl1271 *wl; 215 struct wl1271 *wl;
188 int ret; 216 int ret;
189 217
@@ -203,13 +231,16 @@ static int __devinit wl1271_probe(struct sdio_func *func,
203 /* Grab access to FN0 for ELP reg. */ 231 /* Grab access to FN0 for ELP reg. */
204 func->card->quirks |= MMC_QUIRK_LENIENT_FN0; 232 func->card->quirks |= MMC_QUIRK_LENIENT_FN0;
205 233
206 wl->irq = gpio_to_irq(RX71_WL1271_IRQ_GPIO); 234 wlan_data = wl12xx_get_platform_data();
207 if (wl->irq < 0) { 235 if (IS_ERR(wlan_data)) {
208 ret = wl->irq; 236 ret = PTR_ERR(wlan_data);
209 wl1271_error("could not get irq!"); 237 wl1271_error("missing wlan platform data: %d", ret);
210 goto out_free; 238 goto out_free;
211 } 239 }
212 240
241 wl->irq = wlan_data->irq;
242 wl->ref_clock = wlan_data->board_ref_clock;
243
213 ret = request_irq(wl->irq, wl1271_irq, 0, DRIVER_NAME, wl); 244 ret = request_irq(wl->irq, wl1271_irq, 0, DRIVER_NAME, wl);
214 if (ret < 0) { 245 if (ret < 0) {
215 wl1271_error("request_irq() failed: %d", ret); 246 wl1271_error("request_irq() failed: %d", ret);
@@ -230,6 +261,9 @@ static int __devinit wl1271_probe(struct sdio_func *func,
230 261
231 sdio_set_drvdata(func, wl); 262 sdio_set_drvdata(func, wl);
232 263
264 /* Tell PM core that we don't need the card to be powered now */
265 pm_runtime_put_noidle(&func->dev);
266
233 wl1271_notice("initialized"); 267 wl1271_notice("initialized");
234 268
235 return 0; 269 return 0;
@@ -248,17 +282,39 @@ static void __devexit wl1271_remove(struct sdio_func *func)
248{ 282{
249 struct wl1271 *wl = sdio_get_drvdata(func); 283 struct wl1271 *wl = sdio_get_drvdata(func);
250 284
251 free_irq(wl->irq, wl); 285 /* Undo decrement done above in wl1271_probe */
286 pm_runtime_get_noresume(&func->dev);
252 287
253 wl1271_unregister_hw(wl); 288 wl1271_unregister_hw(wl);
289 free_irq(wl->irq, wl);
254 wl1271_free_hw(wl); 290 wl1271_free_hw(wl);
255} 291}
256 292
293static int wl1271_suspend(struct device *dev)
294{
295 /* Tell MMC/SDIO core it's OK to power down the card
296 * (if it isn't already), but not to remove it completely */
297 return 0;
298}
299
300static int wl1271_resume(struct device *dev)
301{
302 return 0;
303}
304
305static const struct dev_pm_ops wl1271_sdio_pm_ops = {
306 .suspend = wl1271_suspend,
307 .resume = wl1271_resume,
308};
309
257static struct sdio_driver wl1271_sdio_driver = { 310static struct sdio_driver wl1271_sdio_driver = {
258 .name = "wl1271_sdio", 311 .name = "wl1271_sdio",
259 .id_table = wl1271_devices, 312 .id_table = wl1271_devices,
260 .probe = wl1271_probe, 313 .probe = wl1271_probe,
261 .remove = __devexit_p(wl1271_remove), 314 .remove = __devexit_p(wl1271_remove),
315 .drv = {
316 .pm = &wl1271_sdio_pm_ops,
317 },
262}; 318};
263 319
264static int __init wl1271_init(void) 320static int __init wl1271_init(void)
diff --git a/drivers/net/wireless/wl12xx/wl1271_spi.c b/drivers/net/wireless/wl12xx/wl1271_spi.c
index 4cb99c541e2a..ef801680773f 100644
--- a/drivers/net/wireless/wl12xx/wl1271_spi.c
+++ b/drivers/net/wireless/wl12xx/wl1271_spi.c
@@ -25,7 +25,7 @@
25#include <linux/module.h> 25#include <linux/module.h>
26#include <linux/crc7.h> 26#include <linux/crc7.h>
27#include <linux/spi/spi.h> 27#include <linux/spi/spi.h>
28#include <linux/spi/wl12xx.h> 28#include <linux/wl12xx.h>
29#include <linux/slab.h> 29#include <linux/slab.h>
30 30
31#include "wl1271.h" 31#include "wl1271.h"
@@ -63,6 +63,11 @@
63 ((WL1271_BUSY_WORD_LEN - 4) / sizeof(u32)) 63 ((WL1271_BUSY_WORD_LEN - 4) / sizeof(u32))
64#define HW_ACCESS_WSPI_INIT_CMD_MASK 0 64#define HW_ACCESS_WSPI_INIT_CMD_MASK 0
65 65
66/* HW limitation: maximum possible chunk size is 4095 bytes */
67#define WSPI_MAX_CHUNK_SIZE 4092
68
69#define WSPI_MAX_NUM_OF_CHUNKS (WL1271_AGGR_BUFFER_SIZE / WSPI_MAX_CHUNK_SIZE)
70
66static inline struct spi_device *wl_to_spi(struct wl1271 *wl) 71static inline struct spi_device *wl_to_spi(struct wl1271 *wl)
67{ 72{
68 return wl->if_priv; 73 return wl->if_priv;
@@ -202,90 +207,117 @@ static int wl1271_spi_read_busy(struct wl1271 *wl)
202static void wl1271_spi_raw_read(struct wl1271 *wl, int addr, void *buf, 207static void wl1271_spi_raw_read(struct wl1271 *wl, int addr, void *buf,
203 size_t len, bool fixed) 208 size_t len, bool fixed)
204{ 209{
205 struct spi_transfer t[3]; 210 struct spi_transfer t[2];
206 struct spi_message m; 211 struct spi_message m;
207 u32 *busy_buf; 212 u32 *busy_buf;
208 u32 *cmd; 213 u32 *cmd;
214 u32 chunk_len;
209 215
210 cmd = &wl->buffer_cmd; 216 while (len > 0) {
211 busy_buf = wl->buffer_busyword; 217 chunk_len = min((size_t)WSPI_MAX_CHUNK_SIZE, len);
212 218
213 *cmd = 0; 219 cmd = &wl->buffer_cmd;
214 *cmd |= WSPI_CMD_READ; 220 busy_buf = wl->buffer_busyword;
215 *cmd |= (len << WSPI_CMD_BYTE_LENGTH_OFFSET) & WSPI_CMD_BYTE_LENGTH;
216 *cmd |= addr & WSPI_CMD_BYTE_ADDR;
217 221
218 if (fixed) 222 *cmd = 0;
219 *cmd |= WSPI_CMD_FIXED; 223 *cmd |= WSPI_CMD_READ;
224 *cmd |= (chunk_len << WSPI_CMD_BYTE_LENGTH_OFFSET) &
225 WSPI_CMD_BYTE_LENGTH;
226 *cmd |= addr & WSPI_CMD_BYTE_ADDR;
220 227
221 spi_message_init(&m); 228 if (fixed)
222 memset(t, 0, sizeof(t)); 229 *cmd |= WSPI_CMD_FIXED;
223 230
224 t[0].tx_buf = cmd; 231 spi_message_init(&m);
225 t[0].len = 4; 232 memset(t, 0, sizeof(t));
226 t[0].cs_change = true;
227 spi_message_add_tail(&t[0], &m);
228 233
229 /* Busy and non busy words read */ 234 t[0].tx_buf = cmd;
230 t[1].rx_buf = busy_buf; 235 t[0].len = 4;
231 t[1].len = WL1271_BUSY_WORD_LEN; 236 t[0].cs_change = true;
232 t[1].cs_change = true; 237 spi_message_add_tail(&t[0], &m);
233 spi_message_add_tail(&t[1], &m);
234 238
235 spi_sync(wl_to_spi(wl), &m); 239 /* Busy and non busy words read */
240 t[1].rx_buf = busy_buf;
241 t[1].len = WL1271_BUSY_WORD_LEN;
242 t[1].cs_change = true;
243 spi_message_add_tail(&t[1], &m);
236 244
237 if (!(busy_buf[WL1271_BUSY_WORD_CNT - 1] & 0x1) && 245 spi_sync(wl_to_spi(wl), &m);
238 wl1271_spi_read_busy(wl)) {
239 memset(buf, 0, len);
240 return;
241 }
242 246
243 spi_message_init(&m); 247 if (!(busy_buf[WL1271_BUSY_WORD_CNT - 1] & 0x1) &&
244 memset(t, 0, sizeof(t)); 248 wl1271_spi_read_busy(wl)) {
249 memset(buf, 0, chunk_len);
250 return;
251 }
245 252
246 t[0].rx_buf = buf; 253 spi_message_init(&m);
247 t[0].len = len; 254 memset(t, 0, sizeof(t));
248 t[0].cs_change = true;
249 spi_message_add_tail(&t[0], &m);
250 255
251 spi_sync(wl_to_spi(wl), &m); 256 t[0].rx_buf = buf;
257 t[0].len = chunk_len;
258 t[0].cs_change = true;
259 spi_message_add_tail(&t[0], &m);
252 260
253 wl1271_dump(DEBUG_SPI, "spi_read cmd -> ", cmd, sizeof(*cmd)); 261 spi_sync(wl_to_spi(wl), &m);
254 wl1271_dump(DEBUG_SPI, "spi_read buf <- ", buf, len); 262
263 wl1271_dump(DEBUG_SPI, "spi_read cmd -> ", cmd, sizeof(*cmd));
264 wl1271_dump(DEBUG_SPI, "spi_read buf <- ", buf, chunk_len);
265
266 if (!fixed)
267 addr += chunk_len;
268 buf += chunk_len;
269 len -= chunk_len;
270 }
255} 271}
256 272
257static void wl1271_spi_raw_write(struct wl1271 *wl, int addr, void *buf, 273static void wl1271_spi_raw_write(struct wl1271 *wl, int addr, void *buf,
258 size_t len, bool fixed) 274 size_t len, bool fixed)
259{ 275{
260 struct spi_transfer t[2]; 276 struct spi_transfer t[2 * WSPI_MAX_NUM_OF_CHUNKS];
261 struct spi_message m; 277 struct spi_message m;
278 u32 commands[WSPI_MAX_NUM_OF_CHUNKS];
262 u32 *cmd; 279 u32 *cmd;
280 u32 chunk_len;
281 int i;
263 282
264 cmd = &wl->buffer_cmd; 283 WARN_ON(len > WL1271_AGGR_BUFFER_SIZE);
265
266 *cmd = 0;
267 *cmd |= WSPI_CMD_WRITE;
268 *cmd |= (len << WSPI_CMD_BYTE_LENGTH_OFFSET) & WSPI_CMD_BYTE_LENGTH;
269 *cmd |= addr & WSPI_CMD_BYTE_ADDR;
270
271 if (fixed)
272 *cmd |= WSPI_CMD_FIXED;
273 284
274 spi_message_init(&m); 285 spi_message_init(&m);
275 memset(t, 0, sizeof(t)); 286 memset(t, 0, sizeof(t));
276 287
277 t[0].tx_buf = cmd; 288 cmd = &commands[0];
278 t[0].len = sizeof(*cmd); 289 i = 0;
279 spi_message_add_tail(&t[0], &m); 290 while (len > 0) {
291 chunk_len = min((size_t)WSPI_MAX_CHUNK_SIZE, len);
280 292
281 t[1].tx_buf = buf; 293 *cmd = 0;
282 t[1].len = len; 294 *cmd |= WSPI_CMD_WRITE;
283 spi_message_add_tail(&t[1], &m); 295 *cmd |= (chunk_len << WSPI_CMD_BYTE_LENGTH_OFFSET) &
296 WSPI_CMD_BYTE_LENGTH;
297 *cmd |= addr & WSPI_CMD_BYTE_ADDR;
284 298
285 spi_sync(wl_to_spi(wl), &m); 299 if (fixed)
300 *cmd |= WSPI_CMD_FIXED;
301
302 t[i].tx_buf = cmd;
303 t[i].len = sizeof(*cmd);
304 spi_message_add_tail(&t[i++], &m);
305
306 t[i].tx_buf = buf;
307 t[i].len = chunk_len;
308 spi_message_add_tail(&t[i++], &m);
286 309
287 wl1271_dump(DEBUG_SPI, "spi_write cmd -> ", cmd, sizeof(*cmd)); 310 wl1271_dump(DEBUG_SPI, "spi_write cmd -> ", cmd, sizeof(*cmd));
288 wl1271_dump(DEBUG_SPI, "spi_write buf -> ", buf, len); 311 wl1271_dump(DEBUG_SPI, "spi_write buf -> ", buf, chunk_len);
312
313 if (!fixed)
314 addr += chunk_len;
315 buf += chunk_len;
316 len -= chunk_len;
317 cmd++;
318 }
319
320 spi_sync(wl_to_spi(wl), &m);
289} 321}
290 322
291static irqreturn_t wl1271_irq(int irq, void *cookie) 323static irqreturn_t wl1271_irq(int irq, void *cookie)
@@ -312,10 +344,12 @@ static irqreturn_t wl1271_irq(int irq, void *cookie)
312 return IRQ_HANDLED; 344 return IRQ_HANDLED;
313} 345}
314 346
315static void wl1271_spi_set_power(struct wl1271 *wl, bool enable) 347static int wl1271_spi_set_power(struct wl1271 *wl, bool enable)
316{ 348{
317 if (wl->set_power) 349 if (wl->set_power)
318 wl->set_power(enable); 350 wl->set_power(enable);
351
352 return 0;
319} 353}
320 354
321static struct wl1271_if_operations spi_ops = { 355static struct wl1271_if_operations spi_ops = {
@@ -370,6 +404,8 @@ static int __devinit wl1271_probe(struct spi_device *spi)
370 goto out_free; 404 goto out_free;
371 } 405 }
372 406
407 wl->ref_clock = pdata->board_ref_clock;
408
373 wl->irq = spi->irq; 409 wl->irq = spi->irq;
374 if (wl->irq < 0) { 410 if (wl->irq < 0) {
375 wl1271_error("irq missing in platform data"); 411 wl1271_error("irq missing in platform data");
@@ -412,9 +448,8 @@ static int __devexit wl1271_remove(struct spi_device *spi)
412{ 448{
413 struct wl1271 *wl = dev_get_drvdata(&spi->dev); 449 struct wl1271 *wl = dev_get_drvdata(&spi->dev);
414 450
415 free_irq(wl->irq, wl);
416
417 wl1271_unregister_hw(wl); 451 wl1271_unregister_hw(wl);
452 free_irq(wl->irq, wl);
418 wl1271_free_hw(wl); 453 wl1271_free_hw(wl);
419 454
420 return 0; 455 return 0;
diff --git a/drivers/net/wireless/wl12xx/wl1271_testmode.c b/drivers/net/wireless/wl12xx/wl1271_testmode.c
index 6e0952f79e9a..a3aa84386c88 100644
--- a/drivers/net/wireless/wl12xx/wl1271_testmode.c
+++ b/drivers/net/wireless/wl12xx/wl1271_testmode.c
@@ -199,19 +199,6 @@ static int wl1271_tm_cmd_nvs_push(struct wl1271 *wl, struct nlattr *tb[])
199 buf = nla_data(tb[WL1271_TM_ATTR_DATA]); 199 buf = nla_data(tb[WL1271_TM_ATTR_DATA]);
200 len = nla_len(tb[WL1271_TM_ATTR_DATA]); 200 len = nla_len(tb[WL1271_TM_ATTR_DATA]);
201 201
202 /*
203 * FIXME: the LEGACY NVS image support (NVS's missing the 5GHz band
204 * configurations) can be removed when those NVS files stop floating
205 * around.
206 */
207 if (len != sizeof(struct wl1271_nvs_file) &&
208 (len != WL1271_INI_LEGACY_NVS_FILE_SIZE ||
209 wl1271_11a_enabled())) {
210 wl1271_error("nvs size is not as expected: %zu != %zu",
211 len, sizeof(struct wl1271_nvs_file));
212 return -EMSGSIZE;
213 }
214
215 mutex_lock(&wl->mutex); 202 mutex_lock(&wl->mutex);
216 203
217 kfree(wl->nvs); 204 kfree(wl->nvs);
@@ -224,6 +211,7 @@ static int wl1271_tm_cmd_nvs_push(struct wl1271 *wl, struct nlattr *tb[])
224 } 211 }
225 212
226 memcpy(wl->nvs, buf, len); 213 memcpy(wl->nvs, buf, len);
214 wl->nvs_len = len;
227 215
228 wl1271_debug(DEBUG_TESTMODE, "testmode pushed nvs"); 216 wl1271_debug(DEBUG_TESTMODE, "testmode pushed nvs");
229 217
diff --git a/drivers/net/wireless/wl12xx/wl1271_tx.c b/drivers/net/wireless/wl12xx/wl1271_tx.c
index dc0b46c93c4b..e3dc13c4d01a 100644
--- a/drivers/net/wireless/wl12xx/wl1271_tx.c
+++ b/drivers/net/wireless/wl12xx/wl1271_tx.c
@@ -43,13 +43,17 @@ static int wl1271_tx_id(struct wl1271 *wl, struct sk_buff *skb)
43 return -EBUSY; 43 return -EBUSY;
44} 44}
45 45
46static int wl1271_tx_allocate(struct wl1271 *wl, struct sk_buff *skb, u32 extra) 46static int wl1271_tx_allocate(struct wl1271 *wl, struct sk_buff *skb, u32 extra,
47 u32 buf_offset)
47{ 48{
48 struct wl1271_tx_hw_descr *desc; 49 struct wl1271_tx_hw_descr *desc;
49 u32 total_len = skb->len + sizeof(struct wl1271_tx_hw_descr) + extra; 50 u32 total_len = skb->len + sizeof(struct wl1271_tx_hw_descr) + extra;
50 u32 total_blocks; 51 u32 total_blocks;
51 int id, ret = -EBUSY; 52 int id, ret = -EBUSY;
52 53
54 if (buf_offset + total_len > WL1271_AGGR_BUFFER_SIZE)
55 return -EBUSY;
56
53 /* allocate free identifier for the packet */ 57 /* allocate free identifier for the packet */
54 id = wl1271_tx_id(wl, skb); 58 id = wl1271_tx_id(wl, skb);
55 if (id < 0) 59 if (id < 0)
@@ -82,7 +86,7 @@ static int wl1271_tx_allocate(struct wl1271 *wl, struct sk_buff *skb, u32 extra)
82 return ret; 86 return ret;
83} 87}
84 88
85static int wl1271_tx_fill_hdr(struct wl1271 *wl, struct sk_buff *skb, 89static void wl1271_tx_fill_hdr(struct wl1271 *wl, struct sk_buff *skb,
86 u32 extra, struct ieee80211_tx_info *control) 90 u32 extra, struct ieee80211_tx_info *control)
87{ 91{
88 struct timespec ts; 92 struct timespec ts;
@@ -110,9 +114,9 @@ static int wl1271_tx_fill_hdr(struct wl1271 *wl, struct sk_buff *skb,
110 /* configure the tx attributes */ 114 /* configure the tx attributes */
111 tx_attr = wl->session_counter << TX_HW_ATTR_OFST_SESSION_COUNTER; 115 tx_attr = wl->session_counter << TX_HW_ATTR_OFST_SESSION_COUNTER;
112 116
113 /* queue */ 117 /* queue (we use same identifiers for tid's and ac's */
114 ac = wl1271_tx_get_queue(skb_get_queue_mapping(skb)); 118 ac = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
115 desc->tid = wl1271_tx_ac_to_tid(ac); 119 desc->tid = ac;
116 120
117 desc->aid = TX_HW_DEFAULT_AID; 121 desc->aid = TX_HW_DEFAULT_AID;
118 desc->reserved = 0; 122 desc->reserved = 0;
@@ -133,59 +137,17 @@ static int wl1271_tx_fill_hdr(struct wl1271 *wl, struct sk_buff *skb,
133 desc->tx_attr = cpu_to_le16(tx_attr); 137 desc->tx_attr = cpu_to_le16(tx_attr);
134 138
135 wl1271_debug(DEBUG_TX, "tx_fill_hdr: pad: %d", pad); 139 wl1271_debug(DEBUG_TX, "tx_fill_hdr: pad: %d", pad);
136 return 0;
137}
138
139static int wl1271_tx_send_packet(struct wl1271 *wl, struct sk_buff *skb,
140 struct ieee80211_tx_info *control)
141{
142
143 struct wl1271_tx_hw_descr *desc;
144 int len;
145
146 /* FIXME: This is a workaround for getting non-aligned packets.
147 This happens at least with EAPOL packets from the user space.
148 Our DMA requires packets to be aligned on a 4-byte boundary.
149 */
150 if (unlikely((long)skb->data & 0x03)) {
151 int offset = (4 - (long)skb->data) & 0x03;
152 wl1271_debug(DEBUG_TX, "skb offset %d", offset);
153
154 /* check whether the current skb can be used */
155 if (!skb_cloned(skb) && (skb_tailroom(skb) >= offset)) {
156 unsigned char *src = skb->data;
157
158 /* align the buffer on a 4-byte boundary */
159 skb_reserve(skb, offset);
160 memmove(skb->data, src, skb->len);
161 } else {
162 wl1271_info("No handler, fixme!");
163 return -EINVAL;
164 }
165 }
166
167 len = WL1271_TX_ALIGN(skb->len);
168
169 /* perform a fixed address block write with the packet */
170 wl1271_write(wl, WL1271_SLV_MEM_DATA, skb->data, len, true);
171
172 /* write packet new counter into the write access register */
173 wl->tx_packets_count++;
174
175 desc = (struct wl1271_tx_hw_descr *) skb->data;
176 wl1271_debug(DEBUG_TX, "tx id %u skb 0x%p payload %u (%u words)",
177 desc->id, skb, len, desc->length);
178
179 return 0;
180} 140}
181 141
182/* caller must hold wl->mutex */ 142/* caller must hold wl->mutex */
183static int wl1271_tx_frame(struct wl1271 *wl, struct sk_buff *skb) 143static int wl1271_prepare_tx_frame(struct wl1271 *wl, struct sk_buff *skb,
144 u32 buf_offset)
184{ 145{
185 struct ieee80211_tx_info *info; 146 struct ieee80211_tx_info *info;
186 u32 extra = 0; 147 u32 extra = 0;
187 int ret = 0; 148 int ret = 0;
188 u8 idx; 149 u8 idx;
150 u32 total_len;
189 151
190 if (!skb) 152 if (!skb)
191 return -EINVAL; 153 return -EINVAL;
@@ -208,19 +170,22 @@ static int wl1271_tx_frame(struct wl1271 *wl, struct sk_buff *skb)
208 } 170 }
209 } 171 }
210 172
211 ret = wl1271_tx_allocate(wl, skb, extra); 173 ret = wl1271_tx_allocate(wl, skb, extra, buf_offset);
212 if (ret < 0) 174 if (ret < 0)
213 return ret; 175 return ret;
214 176
215 ret = wl1271_tx_fill_hdr(wl, skb, extra, info); 177 wl1271_tx_fill_hdr(wl, skb, extra, info);
216 if (ret < 0)
217 return ret;
218 178
219 ret = wl1271_tx_send_packet(wl, skb, info); 179 /*
220 if (ret < 0) 180 * The length of each packet is stored in terms of words. Thus, we must
221 return ret; 181 * pad the skb data to make sure its length is aligned.
182 * The number of padding bytes is computed and set in wl1271_tx_fill_hdr
183 */
184 total_len = WL1271_TX_ALIGN(skb->len);
185 memcpy(wl->aggr_buf + buf_offset, skb->data, skb->len);
186 memset(wl->aggr_buf + buf_offset + skb->len, 0, total_len - skb->len);
222 187
223 return ret; 188 return total_len;
224} 189}
225 190
226u32 wl1271_tx_enabled_rates_get(struct wl1271 *wl, u32 rate_set) 191u32 wl1271_tx_enabled_rates_get(struct wl1271 *wl, u32 rate_set)
@@ -245,7 +210,7 @@ void wl1271_tx_work(struct work_struct *work)
245 struct sk_buff *skb; 210 struct sk_buff *skb;
246 bool woken_up = false; 211 bool woken_up = false;
247 u32 sta_rates = 0; 212 u32 sta_rates = 0;
248 u32 prev_tx_packets_count; 213 u32 buf_offset;
249 int ret; 214 int ret;
250 215
251 /* check if the rates supported by the AP have changed */ 216 /* check if the rates supported by the AP have changed */
@@ -262,14 +227,15 @@ void wl1271_tx_work(struct work_struct *work)
262 if (unlikely(wl->state == WL1271_STATE_OFF)) 227 if (unlikely(wl->state == WL1271_STATE_OFF))
263 goto out; 228 goto out;
264 229
265 prev_tx_packets_count = wl->tx_packets_count;
266
267 /* if rates have changed, re-configure the rate policy */ 230 /* if rates have changed, re-configure the rate policy */
268 if (unlikely(sta_rates)) { 231 if (unlikely(sta_rates)) {
269 wl->rate_set = wl1271_tx_enabled_rates_get(wl, sta_rates); 232 wl->rate_set = wl1271_tx_enabled_rates_get(wl, sta_rates);
270 wl1271_acx_rate_policies(wl); 233 wl1271_acx_rate_policies(wl);
271 } 234 }
272 235
236 /* Prepare the transfer buffer, by aggregating all
237 * available packets */
238 buf_offset = 0;
273 while ((skb = skb_dequeue(&wl->tx_queue))) { 239 while ((skb = skb_dequeue(&wl->tx_queue))) {
274 if (!woken_up) { 240 if (!woken_up) {
275 ret = wl1271_ps_elp_wakeup(wl, false); 241 ret = wl1271_ps_elp_wakeup(wl, false);
@@ -278,21 +244,30 @@ void wl1271_tx_work(struct work_struct *work)
278 woken_up = true; 244 woken_up = true;
279 } 245 }
280 246
281 ret = wl1271_tx_frame(wl, skb); 247 ret = wl1271_prepare_tx_frame(wl, skb, buf_offset);
282 if (ret == -EBUSY) { 248 if (ret == -EBUSY) {
283 /* firmware buffer is full, lets stop transmitting. */ 249 /*
250 * Either the firmware buffer is full, or the
251 * aggregation buffer is.
252 * Queue back last skb, and stop aggregating.
253 */
284 skb_queue_head(&wl->tx_queue, skb); 254 skb_queue_head(&wl->tx_queue, skb);
285 goto out_ack; 255 goto out_ack;
286 } else if (ret < 0) { 256 } else if (ret < 0) {
287 dev_kfree_skb(skb); 257 dev_kfree_skb(skb);
288 goto out_ack; 258 goto out_ack;
289 } 259 }
260 buf_offset += ret;
261 wl->tx_packets_count++;
290 } 262 }
291 263
292out_ack: 264out_ack:
293 /* interrupt the firmware with the new packets */ 265 if (buf_offset) {
294 if (prev_tx_packets_count != wl->tx_packets_count) 266 wl1271_write(wl, WL1271_SLV_MEM_DATA, wl->aggr_buf,
267 buf_offset, true);
268 /* interrupt the firmware with the new packets */
295 wl1271_write32(wl, WL1271_HOST_WR_ACCESS, wl->tx_packets_count); 269 wl1271_write32(wl, WL1271_HOST_WR_ACCESS, wl->tx_packets_count);
270 }
296 271
297out: 272out:
298 if (woken_up) 273 if (woken_up)
@@ -422,8 +397,6 @@ void wl1271_tx_reset(struct wl1271 *wl)
422 struct sk_buff *skb; 397 struct sk_buff *skb;
423 398
424 /* TX failure */ 399 /* TX failure */
425/* control->flags = 0; FIXME */
426
427 while ((skb = skb_dequeue(&wl->tx_queue))) { 400 while ((skb = skb_dequeue(&wl->tx_queue))) {
428 wl1271_debug(DEBUG_TX, "freeing skb 0x%p", skb); 401 wl1271_debug(DEBUG_TX, "freeing skb 0x%p", skb);
429 ieee80211_tx_status(wl->hw, skb); 402 ieee80211_tx_status(wl->hw, skb);
diff --git a/drivers/net/wireless/wl12xx/wl1271_tx.h b/drivers/net/wireless/wl12xx/wl1271_tx.h
index 48bf92621c03..d12a129ad11c 100644
--- a/drivers/net/wireless/wl12xx/wl1271_tx.h
+++ b/drivers/net/wireless/wl12xx/wl1271_tx.h
@@ -139,23 +139,6 @@ static inline int wl1271_tx_get_queue(int queue)
139 } 139 }
140} 140}
141 141
142/* wl1271 tx descriptor needs the tid and we need to convert it from ac */
143static inline int wl1271_tx_ac_to_tid(int ac)
144{
145 switch (ac) {
146 case 0:
147 return 0;
148 case 1:
149 return 2;
150 case 2:
151 return 4;
152 case 3:
153 return 6;
154 default:
155 return 0;
156 }
157}
158
159void wl1271_tx_work(struct work_struct *work); 142void wl1271_tx_work(struct work_struct *work);
160void wl1271_tx_complete(struct wl1271 *wl); 143void wl1271_tx_complete(struct wl1271 *wl);
161void wl1271_tx_reset(struct wl1271 *wl); 144void wl1271_tx_reset(struct wl1271 *wl);
diff --git a/drivers/net/wireless/wl12xx/wl12xx_platform_data.c b/drivers/net/wireless/wl12xx/wl12xx_platform_data.c
new file mode 100644
index 000000000000..973b11060a8f
--- /dev/null
+++ b/drivers/net/wireless/wl12xx/wl12xx_platform_data.c
@@ -0,0 +1,28 @@
1#include <linux/module.h>
2#include <linux/err.h>
3#include <linux/wl12xx.h>
4
5static const struct wl12xx_platform_data *platform_data;
6
7int __init wl12xx_set_platform_data(const struct wl12xx_platform_data *data)
8{
9 if (platform_data)
10 return -EBUSY;
11 if (!data)
12 return -EINVAL;
13
14 platform_data = kmemdup(data, sizeof(*data), GFP_KERNEL);
15 if (!platform_data)
16 return -ENOMEM;
17
18 return 0;
19}
20
21const struct wl12xx_platform_data *wl12xx_get_platform_data(void)
22{
23 if (!platform_data)
24 return ERR_PTR(-ENODEV);
25
26 return platform_data;
27}
28EXPORT_SYMBOL(wl12xx_get_platform_data);
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index 788a9bc1dbac..630fb8664768 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -135,7 +135,7 @@ static void skb_entry_set_link(union skb_entry *list, unsigned short id)
135static int skb_entry_is_link(const union skb_entry *list) 135static int skb_entry_is_link(const union skb_entry *list)
136{ 136{
137 BUILD_BUG_ON(sizeof(list->skb) != sizeof(list->link)); 137 BUILD_BUG_ON(sizeof(list->skb) != sizeof(list->link));
138 return ((unsigned long)list->skb < PAGE_OFFSET); 138 return (unsigned long)list->skb < PAGE_OFFSET;
139} 139}
140 140
141/* 141/*
@@ -203,8 +203,8 @@ static void rx_refill_timeout(unsigned long data)
203 203
204static int netfront_tx_slot_available(struct netfront_info *np) 204static int netfront_tx_slot_available(struct netfront_info *np)
205{ 205{
206 return ((np->tx.req_prod_pvt - np->tx.rsp_cons) < 206 return (np->tx.req_prod_pvt - np->tx.rsp_cons) <
207 (TX_MAX_TARGET - MAX_SKB_FRAGS - 2)); 207 (TX_MAX_TARGET - MAX_SKB_FRAGS - 2);
208} 208}
209 209
210static void xennet_maybe_wake_tx(struct net_device *dev) 210static void xennet_maybe_wake_tx(struct net_device *dev)
diff --git a/drivers/oprofile/buffer_sync.c b/drivers/oprofile/buffer_sync.c
index a9352b2c7ac4..b7e755f4178a 100644
--- a/drivers/oprofile/buffer_sync.c
+++ b/drivers/oprofile/buffer_sync.c
@@ -141,16 +141,6 @@ static struct notifier_block module_load_nb = {
141 .notifier_call = module_load_notify, 141 .notifier_call = module_load_notify,
142}; 142};
143 143
144
145static void end_sync(void)
146{
147 end_cpu_work();
148 /* make sure we don't leak task structs */
149 process_task_mortuary();
150 process_task_mortuary();
151}
152
153
154int sync_start(void) 144int sync_start(void)
155{ 145{
156 int err; 146 int err;
@@ -158,7 +148,7 @@ int sync_start(void)
158 if (!zalloc_cpumask_var(&marked_cpus, GFP_KERNEL)) 148 if (!zalloc_cpumask_var(&marked_cpus, GFP_KERNEL))
159 return -ENOMEM; 149 return -ENOMEM;
160 150
161 start_cpu_work(); 151 mutex_lock(&buffer_mutex);
162 152
163 err = task_handoff_register(&task_free_nb); 153 err = task_handoff_register(&task_free_nb);
164 if (err) 154 if (err)
@@ -173,7 +163,10 @@ int sync_start(void)
173 if (err) 163 if (err)
174 goto out4; 164 goto out4;
175 165
166 start_cpu_work();
167
176out: 168out:
169 mutex_unlock(&buffer_mutex);
177 return err; 170 return err;
178out4: 171out4:
179 profile_event_unregister(PROFILE_MUNMAP, &munmap_nb); 172 profile_event_unregister(PROFILE_MUNMAP, &munmap_nb);
@@ -182,7 +175,6 @@ out3:
182out2: 175out2:
183 task_handoff_unregister(&task_free_nb); 176 task_handoff_unregister(&task_free_nb);
184out1: 177out1:
185 end_sync();
186 free_cpumask_var(marked_cpus); 178 free_cpumask_var(marked_cpus);
187 goto out; 179 goto out;
188} 180}
@@ -190,11 +182,20 @@ out1:
190 182
191void sync_stop(void) 183void sync_stop(void)
192{ 184{
185 /* flush buffers */
186 mutex_lock(&buffer_mutex);
187 end_cpu_work();
193 unregister_module_notifier(&module_load_nb); 188 unregister_module_notifier(&module_load_nb);
194 profile_event_unregister(PROFILE_MUNMAP, &munmap_nb); 189 profile_event_unregister(PROFILE_MUNMAP, &munmap_nb);
195 profile_event_unregister(PROFILE_TASK_EXIT, &task_exit_nb); 190 profile_event_unregister(PROFILE_TASK_EXIT, &task_exit_nb);
196 task_handoff_unregister(&task_free_nb); 191 task_handoff_unregister(&task_free_nb);
197 end_sync(); 192 mutex_unlock(&buffer_mutex);
193 flush_scheduled_work();
194
195 /* make sure we don't leak task structs */
196 process_task_mortuary();
197 process_task_mortuary();
198
198 free_cpumask_var(marked_cpus); 199 free_cpumask_var(marked_cpus);
199} 200}
200 201
diff --git a/drivers/oprofile/cpu_buffer.c b/drivers/oprofile/cpu_buffer.c
index 219f79e2210a..f179ac2ea801 100644
--- a/drivers/oprofile/cpu_buffer.c
+++ b/drivers/oprofile/cpu_buffer.c
@@ -120,8 +120,6 @@ void end_cpu_work(void)
120 120
121 cancel_delayed_work(&b->work); 121 cancel_delayed_work(&b->work);
122 } 122 }
123
124 flush_scheduled_work();
125} 123}
126 124
127/* 125/*
diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c
index c3ceebb5be84..4789f8e8bf7a 100644
--- a/drivers/pci/intel-iommu.c
+++ b/drivers/pci/intel-iommu.c
@@ -71,6 +71,49 @@
71#define DMA_32BIT_PFN IOVA_PFN(DMA_BIT_MASK(32)) 71#define DMA_32BIT_PFN IOVA_PFN(DMA_BIT_MASK(32))
72#define DMA_64BIT_PFN IOVA_PFN(DMA_BIT_MASK(64)) 72#define DMA_64BIT_PFN IOVA_PFN(DMA_BIT_MASK(64))
73 73
74/* page table handling */
75#define LEVEL_STRIDE (9)
76#define LEVEL_MASK (((u64)1 << LEVEL_STRIDE) - 1)
77
78static inline int agaw_to_level(int agaw)
79{
80 return agaw + 2;
81}
82
83static inline int agaw_to_width(int agaw)
84{
85 return 30 + agaw * LEVEL_STRIDE;
86}
87
88static inline int width_to_agaw(int width)
89{
90 return (width - 30) / LEVEL_STRIDE;
91}
92
93static inline unsigned int level_to_offset_bits(int level)
94{
95 return (level - 1) * LEVEL_STRIDE;
96}
97
98static inline int pfn_level_offset(unsigned long pfn, int level)
99{
100 return (pfn >> level_to_offset_bits(level)) & LEVEL_MASK;
101}
102
103static inline unsigned long level_mask(int level)
104{
105 return -1UL << level_to_offset_bits(level);
106}
107
108static inline unsigned long level_size(int level)
109{
110 return 1UL << level_to_offset_bits(level);
111}
112
113static inline unsigned long align_to_level(unsigned long pfn, int level)
114{
115 return (pfn + level_size(level) - 1) & level_mask(level);
116}
74 117
75/* VT-d pages must always be _smaller_ than MM pages. Otherwise things 118/* VT-d pages must always be _smaller_ than MM pages. Otherwise things
76 are never going to work. */ 119 are never going to work. */
@@ -434,8 +477,6 @@ void free_iova_mem(struct iova *iova)
434} 477}
435 478
436 479
437static inline int width_to_agaw(int width);
438
439static int __iommu_calculate_agaw(struct intel_iommu *iommu, int max_gaw) 480static int __iommu_calculate_agaw(struct intel_iommu *iommu, int max_gaw)
440{ 481{
441 unsigned long sagaw; 482 unsigned long sagaw;
@@ -646,51 +687,6 @@ out:
646 spin_unlock_irqrestore(&iommu->lock, flags); 687 spin_unlock_irqrestore(&iommu->lock, flags);
647} 688}
648 689
649/* page table handling */
650#define LEVEL_STRIDE (9)
651#define LEVEL_MASK (((u64)1 << LEVEL_STRIDE) - 1)
652
653static inline int agaw_to_level(int agaw)
654{
655 return agaw + 2;
656}
657
658static inline int agaw_to_width(int agaw)
659{
660 return 30 + agaw * LEVEL_STRIDE;
661
662}
663
664static inline int width_to_agaw(int width)
665{
666 return (width - 30) / LEVEL_STRIDE;
667}
668
669static inline unsigned int level_to_offset_bits(int level)
670{
671 return (level - 1) * LEVEL_STRIDE;
672}
673
674static inline int pfn_level_offset(unsigned long pfn, int level)
675{
676 return (pfn >> level_to_offset_bits(level)) & LEVEL_MASK;
677}
678
679static inline unsigned long level_mask(int level)
680{
681 return -1UL << level_to_offset_bits(level);
682}
683
684static inline unsigned long level_size(int level)
685{
686 return 1UL << level_to_offset_bits(level);
687}
688
689static inline unsigned long align_to_level(unsigned long pfn, int level)
690{
691 return (pfn + level_size(level) - 1) & level_mask(level);
692}
693
694static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain, 690static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
695 unsigned long pfn) 691 unsigned long pfn)
696{ 692{
@@ -3761,6 +3757,33 @@ static void __devinit quirk_iommu_rwbf(struct pci_dev *dev)
3761 3757
3762DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_rwbf); 3758DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_rwbf);
3763 3759
3760#define GGC 0x52
3761#define GGC_MEMORY_SIZE_MASK (0xf << 8)
3762#define GGC_MEMORY_SIZE_NONE (0x0 << 8)
3763#define GGC_MEMORY_SIZE_1M (0x1 << 8)
3764#define GGC_MEMORY_SIZE_2M (0x3 << 8)
3765#define GGC_MEMORY_VT_ENABLED (0x8 << 8)
3766#define GGC_MEMORY_SIZE_2M_VT (0x9 << 8)
3767#define GGC_MEMORY_SIZE_3M_VT (0xa << 8)
3768#define GGC_MEMORY_SIZE_4M_VT (0xb << 8)
3769
3770static void __devinit quirk_calpella_no_shadow_gtt(struct pci_dev *dev)
3771{
3772 unsigned short ggc;
3773
3774 if (pci_read_config_word(dev, GGC, &ggc))
3775 return;
3776
3777 if (!(ggc & GGC_MEMORY_VT_ENABLED)) {
3778 printk(KERN_INFO "DMAR: BIOS has allocated no shadow GTT; disabling IOMMU for graphics\n");
3779 dmar_map_gfx = 0;
3780 }
3781}
3782DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0040, quirk_calpella_no_shadow_gtt);
3783DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0044, quirk_calpella_no_shadow_gtt);
3784DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0062, quirk_calpella_no_shadow_gtt);
3785DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x006a, quirk_calpella_no_shadow_gtt);
3786
3764/* On Tylersburg chipsets, some BIOSes have been known to enable the 3787/* On Tylersburg chipsets, some BIOSes have been known to enable the
3765 ISOCH DMAR unit for the Azalia sound device, but not give it any 3788 ISOCH DMAR unit for the Azalia sound device, but not give it any
3766 TLB entries, which causes it to deadlock. Check for that. We do 3789 TLB entries, which causes it to deadlock. Check for that. We do
diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c
index ce6a3666b3d9..553d8ee55c1c 100644
--- a/drivers/pci/iov.c
+++ b/drivers/pci/iov.c
@@ -608,7 +608,7 @@ int pci_iov_resource_bar(struct pci_dev *dev, int resno,
608 * the VF BAR size multiplied by the number of VFs. The alignment 608 * the VF BAR size multiplied by the number of VFs. The alignment
609 * is just the VF BAR size. 609 * is just the VF BAR size.
610 */ 610 */
611int pci_sriov_resource_alignment(struct pci_dev *dev, int resno) 611resource_size_t pci_sriov_resource_alignment(struct pci_dev *dev, int resno)
612{ 612{
613 struct resource tmp; 613 struct resource tmp;
614 enum pci_bar_type type; 614 enum pci_bar_type type;
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index 7754a678ab15..6beb11b617a9 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -264,7 +264,8 @@ extern int pci_iov_init(struct pci_dev *dev);
264extern void pci_iov_release(struct pci_dev *dev); 264extern void pci_iov_release(struct pci_dev *dev);
265extern int pci_iov_resource_bar(struct pci_dev *dev, int resno, 265extern int pci_iov_resource_bar(struct pci_dev *dev, int resno,
266 enum pci_bar_type *type); 266 enum pci_bar_type *type);
267extern int pci_sriov_resource_alignment(struct pci_dev *dev, int resno); 267extern resource_size_t pci_sriov_resource_alignment(struct pci_dev *dev,
268 int resno);
268extern void pci_restore_iov_state(struct pci_dev *dev); 269extern void pci_restore_iov_state(struct pci_dev *dev);
269extern int pci_iov_bus_range(struct pci_bus *bus); 270extern int pci_iov_bus_range(struct pci_bus *bus);
270 271
@@ -320,7 +321,7 @@ static inline int pci_ats_enabled(struct pci_dev *dev)
320} 321}
321#endif /* CONFIG_PCI_IOV */ 322#endif /* CONFIG_PCI_IOV */
322 323
323static inline int pci_resource_alignment(struct pci_dev *dev, 324static inline resource_size_t pci_resource_alignment(struct pci_dev *dev,
324 struct resource *res) 325 struct resource *res)
325{ 326{
326#ifdef CONFIG_PCI_IOV 327#ifdef CONFIG_PCI_IOV
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 89ed181cd90c..857ae01734a6 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -163,6 +163,26 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NEC, PCI_DEVICE_ID_NEC_CBUS_2, quirk_isa_d
163DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NEC, PCI_DEVICE_ID_NEC_CBUS_3, quirk_isa_dma_hangs); 163DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NEC, PCI_DEVICE_ID_NEC_CBUS_3, quirk_isa_dma_hangs);
164 164
165/* 165/*
166 * Intel NM10 "TigerPoint" LPC PM1a_STS.BM_STS must be clear
167 * for some HT machines to use C4 w/o hanging.
168 */
169static void __devinit quirk_tigerpoint_bm_sts(struct pci_dev *dev)
170{
171 u32 pmbase;
172 u16 pm1a;
173
174 pci_read_config_dword(dev, 0x40, &pmbase);
175 pmbase = pmbase & 0xff80;
176 pm1a = inw(pmbase);
177
178 if (pm1a & 0x10) {
179 dev_info(&dev->dev, FW_BUG "TigerPoint LPC.BM_STS cleared\n");
180 outw(0x10, pmbase);
181 }
182}
183DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_TGP_LPC, quirk_tigerpoint_bm_sts);
184
185/*
166 * Chipsets where PCI->PCI transfers vanish or hang 186 * Chipsets where PCI->PCI transfers vanish or hang
167 */ 187 */
168static void __devinit quirk_nopcipci(struct pci_dev *dev) 188static void __devinit quirk_nopcipci(struct pci_dev *dev)
diff --git a/drivers/pcmcia/pcmcia_resource.c b/drivers/pcmcia/pcmcia_resource.c
index 54aa1c238cb3..9ba4dade69a4 100644
--- a/drivers/pcmcia/pcmcia_resource.c
+++ b/drivers/pcmcia/pcmcia_resource.c
@@ -163,7 +163,7 @@ static int pcmcia_access_config(struct pcmcia_device *p_dev,
163 c = p_dev->function_config; 163 c = p_dev->function_config;
164 164
165 if (!(c->state & CONFIG_LOCKED)) { 165 if (!(c->state & CONFIG_LOCKED)) {
166 dev_dbg(&s->dev, "Configuration isnt't locked\n"); 166 dev_dbg(&p_dev->dev, "Configuration isnt't locked\n");
167 mutex_unlock(&s->ops_mutex); 167 mutex_unlock(&s->ops_mutex);
168 return -EACCES; 168 return -EACCES;
169 } 169 }
@@ -220,7 +220,7 @@ int pcmcia_map_mem_page(struct pcmcia_device *p_dev, window_handle_t wh,
220 s->win[w].card_start = offset; 220 s->win[w].card_start = offset;
221 ret = s->ops->set_mem_map(s, &s->win[w]); 221 ret = s->ops->set_mem_map(s, &s->win[w]);
222 if (ret) 222 if (ret)
223 dev_warn(&s->dev, "failed to set_mem_map\n"); 223 dev_warn(&p_dev->dev, "failed to set_mem_map\n");
224 mutex_unlock(&s->ops_mutex); 224 mutex_unlock(&s->ops_mutex);
225 return ret; 225 return ret;
226} /* pcmcia_map_mem_page */ 226} /* pcmcia_map_mem_page */
@@ -244,18 +244,18 @@ int pcmcia_modify_configuration(struct pcmcia_device *p_dev,
244 c = p_dev->function_config; 244 c = p_dev->function_config;
245 245
246 if (!(s->state & SOCKET_PRESENT)) { 246 if (!(s->state & SOCKET_PRESENT)) {
247 dev_dbg(&s->dev, "No card present\n"); 247 dev_dbg(&p_dev->dev, "No card present\n");
248 ret = -ENODEV; 248 ret = -ENODEV;
249 goto unlock; 249 goto unlock;
250 } 250 }
251 if (!(c->state & CONFIG_LOCKED)) { 251 if (!(c->state & CONFIG_LOCKED)) {
252 dev_dbg(&s->dev, "Configuration isnt't locked\n"); 252 dev_dbg(&p_dev->dev, "Configuration isnt't locked\n");
253 ret = -EACCES; 253 ret = -EACCES;
254 goto unlock; 254 goto unlock;
255 } 255 }
256 256
257 if (mod->Attributes & (CONF_IRQ_CHANGE_VALID | CONF_VCC_CHANGE_VALID)) { 257 if (mod->Attributes & (CONF_IRQ_CHANGE_VALID | CONF_VCC_CHANGE_VALID)) {
258 dev_dbg(&s->dev, 258 dev_dbg(&p_dev->dev,
259 "changing Vcc or IRQ is not allowed at this time\n"); 259 "changing Vcc or IRQ is not allowed at this time\n");
260 ret = -EINVAL; 260 ret = -EINVAL;
261 goto unlock; 261 goto unlock;
@@ -265,20 +265,22 @@ int pcmcia_modify_configuration(struct pcmcia_device *p_dev,
265 if ((mod->Attributes & CONF_VPP1_CHANGE_VALID) && 265 if ((mod->Attributes & CONF_VPP1_CHANGE_VALID) &&
266 (mod->Attributes & CONF_VPP2_CHANGE_VALID)) { 266 (mod->Attributes & CONF_VPP2_CHANGE_VALID)) {
267 if (mod->Vpp1 != mod->Vpp2) { 267 if (mod->Vpp1 != mod->Vpp2) {
268 dev_dbg(&s->dev, "Vpp1 and Vpp2 must be the same\n"); 268 dev_dbg(&p_dev->dev,
269 "Vpp1 and Vpp2 must be the same\n");
269 ret = -EINVAL; 270 ret = -EINVAL;
270 goto unlock; 271 goto unlock;
271 } 272 }
272 s->socket.Vpp = mod->Vpp1; 273 s->socket.Vpp = mod->Vpp1;
273 if (s->ops->set_socket(s, &s->socket)) { 274 if (s->ops->set_socket(s, &s->socket)) {
274 dev_printk(KERN_WARNING, &s->dev, 275 dev_printk(KERN_WARNING, &p_dev->dev,
275 "Unable to set VPP\n"); 276 "Unable to set VPP\n");
276 ret = -EIO; 277 ret = -EIO;
277 goto unlock; 278 goto unlock;
278 } 279 }
279 } else if ((mod->Attributes & CONF_VPP1_CHANGE_VALID) || 280 } else if ((mod->Attributes & CONF_VPP1_CHANGE_VALID) ||
280 (mod->Attributes & CONF_VPP2_CHANGE_VALID)) { 281 (mod->Attributes & CONF_VPP2_CHANGE_VALID)) {
281 dev_dbg(&s->dev, "changing Vcc is not allowed at this time\n"); 282 dev_dbg(&p_dev->dev,
283 "changing Vcc is not allowed at this time\n");
282 ret = -EINVAL; 284 ret = -EINVAL;
283 goto unlock; 285 goto unlock;
284 } 286 }
@@ -401,7 +403,7 @@ int pcmcia_release_window(struct pcmcia_device *p_dev, struct resource *res)
401 win = &s->win[w]; 403 win = &s->win[w];
402 404
403 if (!(p_dev->_win & CLIENT_WIN_REQ(w))) { 405 if (!(p_dev->_win & CLIENT_WIN_REQ(w))) {
404 dev_dbg(&s->dev, "not releasing unknown window\n"); 406 dev_dbg(&p_dev->dev, "not releasing unknown window\n");
405 mutex_unlock(&s->ops_mutex); 407 mutex_unlock(&s->ops_mutex);
406 return -EINVAL; 408 return -EINVAL;
407 } 409 }
@@ -439,7 +441,7 @@ int pcmcia_request_configuration(struct pcmcia_device *p_dev,
439 return -ENODEV; 441 return -ENODEV;
440 442
441 if (req->IntType & INT_CARDBUS) { 443 if (req->IntType & INT_CARDBUS) {
442 dev_dbg(&s->dev, "IntType may not be INT_CARDBUS\n"); 444 dev_dbg(&p_dev->dev, "IntType may not be INT_CARDBUS\n");
443 return -EINVAL; 445 return -EINVAL;
444 } 446 }
445 447
@@ -447,7 +449,7 @@ int pcmcia_request_configuration(struct pcmcia_device *p_dev,
447 c = p_dev->function_config; 449 c = p_dev->function_config;
448 if (c->state & CONFIG_LOCKED) { 450 if (c->state & CONFIG_LOCKED) {
449 mutex_unlock(&s->ops_mutex); 451 mutex_unlock(&s->ops_mutex);
450 dev_dbg(&s->dev, "Configuration is locked\n"); 452 dev_dbg(&p_dev->dev, "Configuration is locked\n");
451 return -EACCES; 453 return -EACCES;
452 } 454 }
453 455
@@ -455,7 +457,7 @@ int pcmcia_request_configuration(struct pcmcia_device *p_dev,
455 s->socket.Vpp = req->Vpp; 457 s->socket.Vpp = req->Vpp;
456 if (s->ops->set_socket(s, &s->socket)) { 458 if (s->ops->set_socket(s, &s->socket)) {
457 mutex_unlock(&s->ops_mutex); 459 mutex_unlock(&s->ops_mutex);
458 dev_printk(KERN_WARNING, &s->dev, 460 dev_printk(KERN_WARNING, &p_dev->dev,
459 "Unable to set socket state\n"); 461 "Unable to set socket state\n");
460 return -EINVAL; 462 return -EINVAL;
461 } 463 }
@@ -569,19 +571,20 @@ int pcmcia_request_io(struct pcmcia_device *p_dev)
569 int ret = -EINVAL; 571 int ret = -EINVAL;
570 572
571 mutex_lock(&s->ops_mutex); 573 mutex_lock(&s->ops_mutex);
572 dev_dbg(&s->dev, "pcmcia_request_io: %pR , %pR", &c->io[0], &c->io[1]); 574 dev_dbg(&p_dev->dev, "pcmcia_request_io: %pR , %pR",
575 &c->io[0], &c->io[1]);
573 576
574 if (!(s->state & SOCKET_PRESENT)) { 577 if (!(s->state & SOCKET_PRESENT)) {
575 dev_dbg(&s->dev, "pcmcia_request_io: No card present\n"); 578 dev_dbg(&p_dev->dev, "pcmcia_request_io: No card present\n");
576 goto out; 579 goto out;
577 } 580 }
578 581
579 if (c->state & CONFIG_LOCKED) { 582 if (c->state & CONFIG_LOCKED) {
580 dev_dbg(&s->dev, "Configuration is locked\n"); 583 dev_dbg(&p_dev->dev, "Configuration is locked\n");
581 goto out; 584 goto out;
582 } 585 }
583 if (c->state & CONFIG_IO_REQ) { 586 if (c->state & CONFIG_IO_REQ) {
584 dev_dbg(&s->dev, "IO already configured\n"); 587 dev_dbg(&p_dev->dev, "IO already configured\n");
585 goto out; 588 goto out;
586 } 589 }
587 590
@@ -592,7 +595,13 @@ int pcmcia_request_io(struct pcmcia_device *p_dev)
592 if (c->io[1].end) { 595 if (c->io[1].end) {
593 ret = alloc_io_space(s, &c->io[1], p_dev->io_lines); 596 ret = alloc_io_space(s, &c->io[1], p_dev->io_lines);
594 if (ret) { 597 if (ret) {
598 struct resource tmp = c->io[0];
599 /* release the previously allocated resource */
595 release_io_space(s, &c->io[0]); 600 release_io_space(s, &c->io[0]);
601 /* but preserve the settings, for they worked... */
602 c->io[0].end = resource_size(&tmp);
603 c->io[0].start = tmp.start;
604 c->io[0].flags = tmp.flags;
596 goto out; 605 goto out;
597 } 606 }
598 } else 607 } else
@@ -601,7 +610,7 @@ int pcmcia_request_io(struct pcmcia_device *p_dev)
601 c->state |= CONFIG_IO_REQ; 610 c->state |= CONFIG_IO_REQ;
602 p_dev->_io = 1; 611 p_dev->_io = 1;
603 612
604 dev_dbg(&s->dev, "pcmcia_request_io succeeded: %pR , %pR", 613 dev_dbg(&p_dev->dev, "pcmcia_request_io succeeded: %pR , %pR",
605 &c->io[0], &c->io[1]); 614 &c->io[0], &c->io[1]);
606out: 615out:
607 mutex_unlock(&s->ops_mutex); 616 mutex_unlock(&s->ops_mutex);
@@ -800,7 +809,7 @@ int pcmcia_request_window(struct pcmcia_device *p_dev, win_req_t *req, window_ha
800 int w; 809 int w;
801 810
802 if (!(s->state & SOCKET_PRESENT)) { 811 if (!(s->state & SOCKET_PRESENT)) {
803 dev_dbg(&s->dev, "No card present\n"); 812 dev_dbg(&p_dev->dev, "No card present\n");
804 return -ENODEV; 813 return -ENODEV;
805 } 814 }
806 815
@@ -809,12 +818,12 @@ int pcmcia_request_window(struct pcmcia_device *p_dev, win_req_t *req, window_ha
809 req->Size = s->map_size; 818 req->Size = s->map_size;
810 align = (s->features & SS_CAP_MEM_ALIGN) ? req->Size : s->map_size; 819 align = (s->features & SS_CAP_MEM_ALIGN) ? req->Size : s->map_size;
811 if (req->Size & (s->map_size-1)) { 820 if (req->Size & (s->map_size-1)) {
812 dev_dbg(&s->dev, "invalid map size\n"); 821 dev_dbg(&p_dev->dev, "invalid map size\n");
813 return -EINVAL; 822 return -EINVAL;
814 } 823 }
815 if ((req->Base && (s->features & SS_CAP_STATIC_MAP)) || 824 if ((req->Base && (s->features & SS_CAP_STATIC_MAP)) ||
816 (req->Base & (align-1))) { 825 (req->Base & (align-1))) {
817 dev_dbg(&s->dev, "invalid base address\n"); 826 dev_dbg(&p_dev->dev, "invalid base address\n");
818 return -EINVAL; 827 return -EINVAL;
819 } 828 }
820 if (req->Base) 829 if (req->Base)
@@ -826,7 +835,7 @@ int pcmcia_request_window(struct pcmcia_device *p_dev, win_req_t *req, window_ha
826 if (!(s->state & SOCKET_WIN_REQ(w))) 835 if (!(s->state & SOCKET_WIN_REQ(w)))
827 break; 836 break;
828 if (w == MAX_WIN) { 837 if (w == MAX_WIN) {
829 dev_dbg(&s->dev, "all windows are used already\n"); 838 dev_dbg(&p_dev->dev, "all windows are used already\n");
830 mutex_unlock(&s->ops_mutex); 839 mutex_unlock(&s->ops_mutex);
831 return -EINVAL; 840 return -EINVAL;
832 } 841 }
@@ -837,7 +846,7 @@ int pcmcia_request_window(struct pcmcia_device *p_dev, win_req_t *req, window_ha
837 win->res = pcmcia_find_mem_region(req->Base, req->Size, align, 846 win->res = pcmcia_find_mem_region(req->Base, req->Size, align,
838 0, s); 847 0, s);
839 if (!win->res) { 848 if (!win->res) {
840 dev_dbg(&s->dev, "allocating mem region failed\n"); 849 dev_dbg(&p_dev->dev, "allocating mem region failed\n");
841 mutex_unlock(&s->ops_mutex); 850 mutex_unlock(&s->ops_mutex);
842 return -EINVAL; 851 return -EINVAL;
843 } 852 }
@@ -851,7 +860,7 @@ int pcmcia_request_window(struct pcmcia_device *p_dev, win_req_t *req, window_ha
851 win->card_start = 0; 860 win->card_start = 0;
852 861
853 if (s->ops->set_mem_map(s, win) != 0) { 862 if (s->ops->set_mem_map(s, win) != 0) {
854 dev_dbg(&s->dev, "failed to set memory mapping\n"); 863 dev_dbg(&p_dev->dev, "failed to set memory mapping\n");
855 mutex_unlock(&s->ops_mutex); 864 mutex_unlock(&s->ops_mutex);
856 return -EIO; 865 return -EIO;
857 } 866 }
@@ -874,7 +883,7 @@ int pcmcia_request_window(struct pcmcia_device *p_dev, win_req_t *req, window_ha
874 if (win->res) 883 if (win->res)
875 request_resource(&iomem_resource, res); 884 request_resource(&iomem_resource, res);
876 885
877 dev_dbg(&s->dev, "request_window results in %pR\n", res); 886 dev_dbg(&p_dev->dev, "request_window results in %pR\n", res);
878 887
879 mutex_unlock(&s->ops_mutex); 888 mutex_unlock(&s->ops_mutex);
880 *wh = res; 889 *wh = res;
diff --git a/drivers/pcmcia/pd6729.c b/drivers/pcmcia/pd6729.c
index b8a869af0f44..deef6656ab7b 100644
--- a/drivers/pcmcia/pd6729.c
+++ b/drivers/pcmcia/pd6729.c
@@ -646,7 +646,7 @@ static int __devinit pd6729_pci_probe(struct pci_dev *dev,
646 if (!pci_resource_start(dev, 0)) { 646 if (!pci_resource_start(dev, 0)) {
647 dev_warn(&dev->dev, "refusing to load the driver as the " 647 dev_warn(&dev->dev, "refusing to load the driver as the "
648 "io_base is NULL.\n"); 648 "io_base is NULL.\n");
649 goto err_out_free_mem; 649 goto err_out_disable;
650 } 650 }
651 651
652 dev_info(&dev->dev, "Cirrus PD6729 PCI to PCMCIA Bridge at 0x%llx " 652 dev_info(&dev->dev, "Cirrus PD6729 PCI to PCMCIA Bridge at 0x%llx "
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index e35ed128bdef..2d61186ad5a2 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -3093,7 +3093,8 @@ static const struct tpacpi_quirk tpacpi_hotkey_qtable[] __initconst = {
3093 TPACPI_Q_IBM('1', 'D', TPACPI_HK_Q_INIMASK), /* X22, X23, X24 */ 3093 TPACPI_Q_IBM('1', 'D', TPACPI_HK_Q_INIMASK), /* X22, X23, X24 */
3094}; 3094};
3095 3095
3096typedef u16 tpacpi_keymap_t[TPACPI_HOTKEY_MAP_LEN]; 3096typedef u16 tpacpi_keymap_entry_t;
3097typedef tpacpi_keymap_entry_t tpacpi_keymap_t[TPACPI_HOTKEY_MAP_LEN];
3097 3098
3098static int __init hotkey_init(struct ibm_init_struct *iibm) 3099static int __init hotkey_init(struct ibm_init_struct *iibm)
3099{ 3100{
@@ -3230,7 +3231,7 @@ static int __init hotkey_init(struct ibm_init_struct *iibm)
3230 }; 3231 };
3231 3232
3232#define TPACPI_HOTKEY_MAP_SIZE sizeof(tpacpi_keymap_t) 3233#define TPACPI_HOTKEY_MAP_SIZE sizeof(tpacpi_keymap_t)
3233#define TPACPI_HOTKEY_MAP_TYPESIZE sizeof(tpacpi_keymap_t[0]) 3234#define TPACPI_HOTKEY_MAP_TYPESIZE sizeof(tpacpi_keymap_entry_t)
3234 3235
3235 int res, i; 3236 int res, i;
3236 int status; 3237 int status;
diff --git a/drivers/power/apm_power.c b/drivers/power/apm_power.c
index 936bae560fa1..dc628cb2e762 100644
--- a/drivers/power/apm_power.c
+++ b/drivers/power/apm_power.c
@@ -233,6 +233,7 @@ static int calculate_capacity(enum apm_source source)
233 empty_design_prop = POWER_SUPPLY_PROP_ENERGY_EMPTY_DESIGN; 233 empty_design_prop = POWER_SUPPLY_PROP_ENERGY_EMPTY_DESIGN;
234 now_prop = POWER_SUPPLY_PROP_ENERGY_NOW; 234 now_prop = POWER_SUPPLY_PROP_ENERGY_NOW;
235 avg_prop = POWER_SUPPLY_PROP_ENERGY_AVG; 235 avg_prop = POWER_SUPPLY_PROP_ENERGY_AVG;
236 break;
236 case SOURCE_VOLTAGE: 237 case SOURCE_VOLTAGE:
237 full_prop = POWER_SUPPLY_PROP_VOLTAGE_MAX; 238 full_prop = POWER_SUPPLY_PROP_VOLTAGE_MAX;
238 empty_prop = POWER_SUPPLY_PROP_VOLTAGE_MIN; 239 empty_prop = POWER_SUPPLY_PROP_VOLTAGE_MIN;
diff --git a/drivers/power/intel_mid_battery.c b/drivers/power/intel_mid_battery.c
index c61ffec2ff10..2a10cd361181 100644
--- a/drivers/power/intel_mid_battery.c
+++ b/drivers/power/intel_mid_battery.c
@@ -185,8 +185,8 @@ static int pmic_scu_ipc_battery_property_get(struct battery_property *prop)
185{ 185{
186 u32 data[3]; 186 u32 data[3];
187 u8 *p = (u8 *)&data[1]; 187 u8 *p = (u8 *)&data[1];
188 int err = intel_scu_ipc_command(IPC_CMD_BATTERY_PROPERTY, 188 int err = intel_scu_ipc_command(IPCMSG_BATTERY,
189 IPCMSG_BATTERY, NULL, 0, data, 3); 189 IPC_CMD_BATTERY_PROPERTY, NULL, 0, data, 3);
190 190
191 prop->capacity = data[0]; 191 prop->capacity = data[0];
192 prop->crnt = *p++; 192 prop->crnt = *p++;
@@ -207,7 +207,7 @@ static int pmic_scu_ipc_battery_property_get(struct battery_property *prop)
207 207
208static int pmic_scu_ipc_set_charger(int charger) 208static int pmic_scu_ipc_set_charger(int charger)
209{ 209{
210 return intel_scu_ipc_simple_command(charger, IPCMSG_BATTERY); 210 return intel_scu_ipc_simple_command(IPCMSG_BATTERY, charger);
211} 211}
212 212
213/** 213/**
diff --git a/drivers/regulator/88pm8607.c b/drivers/regulator/88pm8607.c
index 7d149a8d8d9b..2ce2eb71d0f5 100644
--- a/drivers/regulator/88pm8607.c
+++ b/drivers/regulator/88pm8607.c
@@ -215,7 +215,7 @@ static int pm8607_list_voltage(struct regulator_dev *rdev, unsigned index)
215 struct pm8607_regulator_info *info = rdev_get_drvdata(rdev); 215 struct pm8607_regulator_info *info = rdev_get_drvdata(rdev);
216 int ret = -EINVAL; 216 int ret = -EINVAL;
217 217
218 if (info->vol_table && (index < (2 << info->vol_nbits))) { 218 if (info->vol_table && (index < (1 << info->vol_nbits))) {
219 ret = info->vol_table[index]; 219 ret = info->vol_table[index];
220 if (info->slope_double) 220 if (info->slope_double)
221 ret <<= 1; 221 ret <<= 1;
@@ -233,7 +233,7 @@ static int choose_voltage(struct regulator_dev *rdev, int min_uV, int max_uV)
233 max_uV = max_uV >> 1; 233 max_uV = max_uV >> 1;
234 } 234 }
235 if (info->vol_table) { 235 if (info->vol_table) {
236 for (i = 0; i < (2 << info->vol_nbits); i++) { 236 for (i = 0; i < (1 << info->vol_nbits); i++) {
237 if (!info->vol_table[i]) 237 if (!info->vol_table[i])
238 break; 238 break;
239 if ((min_uV <= info->vol_table[i]) 239 if ((min_uV <= info->vol_table[i])
diff --git a/drivers/regulator/ab3100.c b/drivers/regulator/ab3100.c
index 11790990277a..b349266a43de 100644
--- a/drivers/regulator/ab3100.c
+++ b/drivers/regulator/ab3100.c
@@ -634,12 +634,9 @@ static int __devinit ab3100_regulators_probe(struct platform_device *pdev)
634 "%s: failed to register regulator %s err %d\n", 634 "%s: failed to register regulator %s err %d\n",
635 __func__, ab3100_regulator_desc[i].name, 635 __func__, ab3100_regulator_desc[i].name,
636 err); 636 err);
637 i--;
638 /* remove the already registered regulators */ 637 /* remove the already registered regulators */
639 while (i > 0) { 638 while (--i >= 0)
640 regulator_unregister(ab3100_regulators[i].rdev); 639 regulator_unregister(ab3100_regulators[i].rdev);
641 i--;
642 }
643 return err; 640 return err;
644 } 641 }
645 642
diff --git a/drivers/regulator/ab8500.c b/drivers/regulator/ab8500.c
index dc3f1a491675..28c7ae67cec9 100644
--- a/drivers/regulator/ab8500.c
+++ b/drivers/regulator/ab8500.c
@@ -157,7 +157,7 @@ static int ab8500_list_voltage(struct regulator_dev *rdev, unsigned selector)
157 if (info->fixed_uV) 157 if (info->fixed_uV)
158 return info->fixed_uV; 158 return info->fixed_uV;
159 159
160 if (selector > info->voltages_len) 160 if (selector >= info->voltages_len)
161 return -EINVAL; 161 return -EINVAL;
162 162
163 return info->supported_voltages[selector]; 163 return info->supported_voltages[selector];
@@ -344,13 +344,14 @@ static inline struct ab8500_regulator_info *find_regulator_info(int id)
344static __devinit int ab8500_regulator_probe(struct platform_device *pdev) 344static __devinit int ab8500_regulator_probe(struct platform_device *pdev)
345{ 345{
346 struct ab8500 *ab8500 = dev_get_drvdata(pdev->dev.parent); 346 struct ab8500 *ab8500 = dev_get_drvdata(pdev->dev.parent);
347 struct ab8500_platform_data *pdata = dev_get_platdata(ab8500->dev); 347 struct ab8500_platform_data *pdata;
348 int i, err; 348 int i, err;
349 349
350 if (!ab8500) { 350 if (!ab8500) {
351 dev_err(&pdev->dev, "null mfd parent\n"); 351 dev_err(&pdev->dev, "null mfd parent\n");
352 return -EINVAL; 352 return -EINVAL;
353 } 353 }
354 pdata = dev_get_platdata(ab8500->dev);
354 355
355 /* register all regulators */ 356 /* register all regulators */
356 for (i = 0; i < ARRAY_SIZE(ab8500_regulator_info); i++) { 357 for (i = 0; i < ARRAY_SIZE(ab8500_regulator_info); i++) {
@@ -368,11 +369,9 @@ static __devinit int ab8500_regulator_probe(struct platform_device *pdev)
368 dev_err(&pdev->dev, "failed to register regulator %s\n", 369 dev_err(&pdev->dev, "failed to register regulator %s\n",
369 info->desc.name); 370 info->desc.name);
370 /* when we fail, un-register all earlier regulators */ 371 /* when we fail, un-register all earlier regulators */
371 i--; 372 while (--i >= 0) {
372 while (i > 0) {
373 info = &ab8500_regulator_info[i]; 373 info = &ab8500_regulator_info[i];
374 regulator_unregister(info->regulator); 374 regulator_unregister(info->regulator);
375 i--;
376 } 375 }
377 return err; 376 return err;
378 } 377 }
diff --git a/drivers/regulator/ad5398.c b/drivers/regulator/ad5398.c
index d59d2f2314af..df1fb53c09d2 100644
--- a/drivers/regulator/ad5398.c
+++ b/drivers/regulator/ad5398.c
@@ -25,7 +25,7 @@ struct ad5398_chip_info {
25 unsigned int current_level; 25 unsigned int current_level;
26 unsigned int current_mask; 26 unsigned int current_mask;
27 unsigned int current_offset; 27 unsigned int current_offset;
28 struct regulator_dev rdev; 28 struct regulator_dev *rdev;
29}; 29};
30 30
31static int ad5398_calc_current(struct ad5398_chip_info *chip, 31static int ad5398_calc_current(struct ad5398_chip_info *chip,
@@ -211,7 +211,6 @@ MODULE_DEVICE_TABLE(i2c, ad5398_id);
211static int __devinit ad5398_probe(struct i2c_client *client, 211static int __devinit ad5398_probe(struct i2c_client *client,
212 const struct i2c_device_id *id) 212 const struct i2c_device_id *id)
213{ 213{
214 struct regulator_dev *rdev;
215 struct regulator_init_data *init_data = client->dev.platform_data; 214 struct regulator_init_data *init_data = client->dev.platform_data;
216 struct ad5398_chip_info *chip; 215 struct ad5398_chip_info *chip;
217 const struct ad5398_current_data_format *df = 216 const struct ad5398_current_data_format *df =
@@ -233,9 +232,10 @@ static int __devinit ad5398_probe(struct i2c_client *client,
233 chip->current_offset = df->current_offset; 232 chip->current_offset = df->current_offset;
234 chip->current_mask = (chip->current_level - 1) << chip->current_offset; 233 chip->current_mask = (chip->current_level - 1) << chip->current_offset;
235 234
236 rdev = regulator_register(&ad5398_reg, &client->dev, init_data, chip); 235 chip->rdev = regulator_register(&ad5398_reg, &client->dev,
237 if (IS_ERR(rdev)) { 236 init_data, chip);
238 ret = PTR_ERR(rdev); 237 if (IS_ERR(chip->rdev)) {
238 ret = PTR_ERR(chip->rdev);
239 dev_err(&client->dev, "failed to register %s %s\n", 239 dev_err(&client->dev, "failed to register %s %s\n",
240 id->name, ad5398_reg.name); 240 id->name, ad5398_reg.name);
241 goto err; 241 goto err;
@@ -254,7 +254,7 @@ static int __devexit ad5398_remove(struct i2c_client *client)
254{ 254{
255 struct ad5398_chip_info *chip = i2c_get_clientdata(client); 255 struct ad5398_chip_info *chip = i2c_get_clientdata(client);
256 256
257 regulator_unregister(&chip->rdev); 257 regulator_unregister(chip->rdev);
258 kfree(chip); 258 kfree(chip);
259 i2c_set_clientdata(client, NULL); 259 i2c_set_clientdata(client, NULL);
260 260
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index 422a709d271d..cc8b337b9119 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -700,7 +700,7 @@ static void print_constraints(struct regulator_dev *rdev)
700 constraints->min_uA != constraints->max_uA) { 700 constraints->min_uA != constraints->max_uA) {
701 ret = _regulator_get_current_limit(rdev); 701 ret = _regulator_get_current_limit(rdev);
702 if (ret > 0) 702 if (ret > 0)
703 count += sprintf(buf + count, "at %d uA ", ret / 1000); 703 count += sprintf(buf + count, "at %d mA ", ret / 1000);
704 } 704 }
705 705
706 if (constraints->valid_modes_mask & REGULATOR_MODE_FAST) 706 if (constraints->valid_modes_mask & REGULATOR_MODE_FAST)
@@ -2302,8 +2302,10 @@ struct regulator_dev *regulator_register(struct regulator_desc *regulator_desc,
2302 dev_set_name(&rdev->dev, "regulator.%d", 2302 dev_set_name(&rdev->dev, "regulator.%d",
2303 atomic_inc_return(&regulator_no) - 1); 2303 atomic_inc_return(&regulator_no) - 1);
2304 ret = device_register(&rdev->dev); 2304 ret = device_register(&rdev->dev);
2305 if (ret != 0) 2305 if (ret != 0) {
2306 put_device(&rdev->dev);
2306 goto clean; 2307 goto clean;
2308 }
2307 2309
2308 dev_set_drvdata(&rdev->dev, rdev); 2310 dev_set_drvdata(&rdev->dev, rdev);
2309 2311
diff --git a/drivers/regulator/isl6271a-regulator.c b/drivers/regulator/isl6271a-regulator.c
index e49d2bd393f2..d61ecb885a8c 100644
--- a/drivers/regulator/isl6271a-regulator.c
+++ b/drivers/regulator/isl6271a-regulator.c
@@ -165,7 +165,7 @@ static int __devinit isl6271a_probe(struct i2c_client *i2c,
165 mutex_init(&pmic->mtx); 165 mutex_init(&pmic->mtx);
166 166
167 for (i = 0; i < 3; i++) { 167 for (i = 0; i < 3; i++) {
168 pmic->rdev[i] = regulator_register(&isl_rd[0], &i2c->dev, 168 pmic->rdev[i] = regulator_register(&isl_rd[i], &i2c->dev,
169 init_data, pmic); 169 init_data, pmic);
170 if (IS_ERR(pmic->rdev[i])) { 170 if (IS_ERR(pmic->rdev[i])) {
171 dev_err(&i2c->dev, "failed to register %s\n", id->name); 171 dev_err(&i2c->dev, "failed to register %s\n", id->name);
diff --git a/drivers/regulator/max1586.c b/drivers/regulator/max1586.c
index 8867c2710a6d..559cfa271a44 100644
--- a/drivers/regulator/max1586.c
+++ b/drivers/regulator/max1586.c
@@ -121,14 +121,14 @@ static int max1586_v6_set(struct regulator_dev *rdev, int min_uV, int max_uV)
121 if (max_uV < MAX1586_V6_MIN_UV || max_uV > MAX1586_V6_MAX_UV) 121 if (max_uV < MAX1586_V6_MIN_UV || max_uV > MAX1586_V6_MAX_UV)
122 return -EINVAL; 122 return -EINVAL;
123 123
124 if (min_uV >= 3000000)
125 selector = 3;
126 if (min_uV < 3000000)
127 selector = 2;
128 if (min_uV < 2500000)
129 selector = 1;
130 if (min_uV < 1800000) 124 if (min_uV < 1800000)
131 selector = 0; 125 selector = 0;
126 else if (min_uV < 2500000)
127 selector = 1;
128 else if (min_uV < 3000000)
129 selector = 2;
130 else if (min_uV >= 3000000)
131 selector = 3;
132 132
133 if (max1586_v6_calc_voltage(selector) > max_uV) 133 if (max1586_v6_calc_voltage(selector) > max_uV)
134 return -EINVAL; 134 return -EINVAL;
diff --git a/drivers/regulator/max8649.c b/drivers/regulator/max8649.c
index 4520ace3f7e7..6b60a9c0366b 100644
--- a/drivers/regulator/max8649.c
+++ b/drivers/regulator/max8649.c
@@ -330,7 +330,7 @@ static int __devinit max8649_regulator_probe(struct i2c_client *client,
330 /* set external clock frequency */ 330 /* set external clock frequency */
331 info->extclk_freq = pdata->extclk_freq; 331 info->extclk_freq = pdata->extclk_freq;
332 max8649_set_bits(info->i2c, MAX8649_SYNC, MAX8649_EXT_MASK, 332 max8649_set_bits(info->i2c, MAX8649_SYNC, MAX8649_EXT_MASK,
333 info->extclk_freq); 333 info->extclk_freq << 6);
334 } 334 }
335 335
336 if (pdata->ramp_timing) { 336 if (pdata->ramp_timing) {
diff --git a/drivers/regulator/max8998.c b/drivers/regulator/max8998.c
index ab67298799f9..a1baf1fbe004 100644
--- a/drivers/regulator/max8998.c
+++ b/drivers/regulator/max8998.c
@@ -549,7 +549,7 @@ static __devinit int max8998_pmic_probe(struct platform_device *pdev)
549 if (!max8998) 549 if (!max8998)
550 return -ENOMEM; 550 return -ENOMEM;
551 551
552 size = sizeof(struct regulator_dev *) * (pdata->num_regulators + 1); 552 size = sizeof(struct regulator_dev *) * pdata->num_regulators;
553 max8998->rdev = kzalloc(size, GFP_KERNEL); 553 max8998->rdev = kzalloc(size, GFP_KERNEL);
554 if (!max8998->rdev) { 554 if (!max8998->rdev) {
555 kfree(max8998); 555 kfree(max8998);
@@ -557,7 +557,9 @@ static __devinit int max8998_pmic_probe(struct platform_device *pdev)
557 } 557 }
558 558
559 rdev = max8998->rdev; 559 rdev = max8998->rdev;
560 max8998->dev = &pdev->dev;
560 max8998->iodev = iodev; 561 max8998->iodev = iodev;
562 max8998->num_regulators = pdata->num_regulators;
561 platform_set_drvdata(pdev, max8998); 563 platform_set_drvdata(pdev, max8998);
562 564
563 for (i = 0; i < pdata->num_regulators; i++) { 565 for (i = 0; i < pdata->num_regulators; i++) {
@@ -583,7 +585,7 @@ static __devinit int max8998_pmic_probe(struct platform_device *pdev)
583 585
584 return 0; 586 return 0;
585err: 587err:
586 for (i = 0; i <= max8998->num_regulators; i++) 588 for (i = 0; i < max8998->num_regulators; i++)
587 if (rdev[i]) 589 if (rdev[i])
588 regulator_unregister(rdev[i]); 590 regulator_unregister(rdev[i]);
589 591
@@ -599,7 +601,7 @@ static int __devexit max8998_pmic_remove(struct platform_device *pdev)
599 struct regulator_dev **rdev = max8998->rdev; 601 struct regulator_dev **rdev = max8998->rdev;
600 int i; 602 int i;
601 603
602 for (i = 0; i <= max8998->num_regulators; i++) 604 for (i = 0; i < max8998->num_regulators; i++)
603 if (rdev[i]) 605 if (rdev[i])
604 regulator_unregister(rdev[i]); 606 regulator_unregister(rdev[i]);
605 607
diff --git a/drivers/regulator/tps6507x-regulator.c b/drivers/regulator/tps6507x-regulator.c
index c239f42aa4a3..020f5878d7ff 100644
--- a/drivers/regulator/tps6507x-regulator.c
+++ b/drivers/regulator/tps6507x-regulator.c
@@ -626,12 +626,6 @@ fail:
626 return error; 626 return error;
627} 627}
628 628
629/**
630 * tps6507x_remove - TPS6507x driver i2c remove handler
631 * @client: i2c driver client device structure
632 *
633 * Unregister TPS driver as an i2c client device driver
634 */
635static int __devexit tps6507x_pmic_remove(struct platform_device *pdev) 629static int __devexit tps6507x_pmic_remove(struct platform_device *pdev)
636{ 630{
637 struct tps6507x_dev *tps6507x_dev = platform_get_drvdata(pdev); 631 struct tps6507x_dev *tps6507x_dev = platform_get_drvdata(pdev);
diff --git a/drivers/regulator/tps6586x-regulator.c b/drivers/regulator/tps6586x-regulator.c
index 8cff1413a147..51237fbb1bbb 100644
--- a/drivers/regulator/tps6586x-regulator.c
+++ b/drivers/regulator/tps6586x-regulator.c
@@ -133,7 +133,7 @@ static int tps6586x_ldo_get_voltage(struct regulator_dev *rdev)
133 mask = ((1 << ri->volt_nbits) - 1) << ri->volt_shift; 133 mask = ((1 << ri->volt_nbits) - 1) << ri->volt_shift;
134 val = (val & mask) >> ri->volt_shift; 134 val = (val & mask) >> ri->volt_shift;
135 135
136 if (val > ri->desc.n_voltages) 136 if (val >= ri->desc.n_voltages)
137 BUG(); 137 BUG();
138 138
139 return ri->voltages[val] * 1000; 139 return ri->voltages[val] * 1000;
@@ -150,7 +150,7 @@ static int tps6586x_dvm_set_voltage(struct regulator_dev *rdev,
150 if (ret) 150 if (ret)
151 return ret; 151 return ret;
152 152
153 return tps6586x_set_bits(parent, ri->go_reg, ri->go_bit); 153 return tps6586x_set_bits(parent, ri->go_reg, 1 << ri->go_bit);
154} 154}
155 155
156static int tps6586x_regulator_enable(struct regulator_dev *rdev) 156static int tps6586x_regulator_enable(struct regulator_dev *rdev)
diff --git a/drivers/regulator/wm831x-ldo.c b/drivers/regulator/wm831x-ldo.c
index e686cdb61b97..9edf8f692341 100644
--- a/drivers/regulator/wm831x-ldo.c
+++ b/drivers/regulator/wm831x-ldo.c
@@ -215,8 +215,7 @@ static int wm831x_gp_ldo_set_mode(struct regulator_dev *rdev,
215 215
216 case REGULATOR_MODE_IDLE: 216 case REGULATOR_MODE_IDLE:
217 ret = wm831x_set_bits(wm831x, ctrl_reg, 217 ret = wm831x_set_bits(wm831x, ctrl_reg,
218 WM831X_LDO1_LP_MODE, 218 WM831X_LDO1_LP_MODE, 0);
219 WM831X_LDO1_LP_MODE);
220 if (ret < 0) 219 if (ret < 0)
221 return ret; 220 return ret;
222 221
@@ -225,10 +224,12 @@ static int wm831x_gp_ldo_set_mode(struct regulator_dev *rdev,
225 WM831X_LDO1_ON_MODE); 224 WM831X_LDO1_ON_MODE);
226 if (ret < 0) 225 if (ret < 0)
227 return ret; 226 return ret;
227 break;
228 228
229 case REGULATOR_MODE_STANDBY: 229 case REGULATOR_MODE_STANDBY:
230 ret = wm831x_set_bits(wm831x, ctrl_reg, 230 ret = wm831x_set_bits(wm831x, ctrl_reg,
231 WM831X_LDO1_LP_MODE, 0); 231 WM831X_LDO1_LP_MODE,
232 WM831X_LDO1_LP_MODE);
232 if (ret < 0) 233 if (ret < 0)
233 return ret; 234 return ret;
234 235
diff --git a/drivers/regulator/wm8350-regulator.c b/drivers/regulator/wm8350-regulator.c
index 0e6ed7db9364..fe4b8a8a9dfd 100644
--- a/drivers/regulator/wm8350-regulator.c
+++ b/drivers/regulator/wm8350-regulator.c
@@ -1129,7 +1129,7 @@ static unsigned int wm8350_dcdc_get_mode(struct regulator_dev *rdev)
1129 mode = REGULATOR_MODE_NORMAL; 1129 mode = REGULATOR_MODE_NORMAL;
1130 } else if (!active && !sleep) 1130 } else if (!active && !sleep)
1131 mode = REGULATOR_MODE_IDLE; 1131 mode = REGULATOR_MODE_IDLE;
1132 else if (!sleep) 1132 else if (sleep)
1133 mode = REGULATOR_MODE_STANDBY; 1133 mode = REGULATOR_MODE_STANDBY;
1134 1134
1135 return mode; 1135 return mode;
diff --git a/drivers/rtc/rtc-ab3100.c b/drivers/rtc/rtc-ab3100.c
index d26780ea254b..261a07e0fb24 100644
--- a/drivers/rtc/rtc-ab3100.c
+++ b/drivers/rtc/rtc-ab3100.c
@@ -235,6 +235,7 @@ static int __init ab3100_rtc_probe(struct platform_device *pdev)
235 err = PTR_ERR(rtc); 235 err = PTR_ERR(rtc);
236 return err; 236 return err;
237 } 237 }
238 platform_set_drvdata(pdev, rtc);
238 239
239 return 0; 240 return 0;
240} 241}
@@ -244,6 +245,7 @@ static int __exit ab3100_rtc_remove(struct platform_device *pdev)
244 struct rtc_device *rtc = platform_get_drvdata(pdev); 245 struct rtc_device *rtc = platform_get_drvdata(pdev);
245 246
246 rtc_device_unregister(rtc); 247 rtc_device_unregister(rtc);
248 platform_set_drvdata(pdev, NULL);
247 return 0; 249 return 0;
248} 250}
249 251
diff --git a/drivers/rtc/rtc-bfin.c b/drivers/rtc/rtc-bfin.c
index 72b2bcc2c224..d4fb82d85e9b 100644
--- a/drivers/rtc/rtc-bfin.c
+++ b/drivers/rtc/rtc-bfin.c
@@ -426,7 +426,7 @@ static int bfin_rtc_suspend(struct platform_device *pdev, pm_message_t state)
426 enable_irq_wake(IRQ_RTC); 426 enable_irq_wake(IRQ_RTC);
427 bfin_rtc_sync_pending(&pdev->dev); 427 bfin_rtc_sync_pending(&pdev->dev);
428 } else 428 } else
429 bfin_rtc_int_clear(-1); 429 bfin_rtc_int_clear(0);
430 430
431 return 0; 431 return 0;
432} 432}
@@ -435,8 +435,17 @@ static int bfin_rtc_resume(struct platform_device *pdev)
435{ 435{
436 if (device_may_wakeup(&pdev->dev)) 436 if (device_may_wakeup(&pdev->dev))
437 disable_irq_wake(IRQ_RTC); 437 disable_irq_wake(IRQ_RTC);
438 else 438
439 bfin_write_RTC_ISTAT(-1); 439 /*
440 * Since only some of the RTC bits are maintained externally in the
441 * Vbat domain, we need to wait for the RTC MMRs to be synced into
442 * the core after waking up. This happens every RTC 1HZ. Once that
443 * has happened, we can go ahead and re-enable the important write
444 * complete interrupt event.
445 */
446 while (!(bfin_read_RTC_ISTAT() & RTC_ISTAT_SEC))
447 continue;
448 bfin_rtc_int_set(RTC_ISTAT_WRITE_COMPLETE);
440 449
441 return 0; 450 return 0;
442} 451}
diff --git a/drivers/rtc/rtc-m41t80.c b/drivers/rtc/rtc-m41t80.c
index 66377f3e28b8..d60557cae8ef 100644
--- a/drivers/rtc/rtc-m41t80.c
+++ b/drivers/rtc/rtc-m41t80.c
@@ -364,7 +364,7 @@ static int m41t80_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *t)
364 t->time.tm_isdst = -1; 364 t->time.tm_isdst = -1;
365 t->enabled = !!(reg[M41T80_REG_ALARM_MON] & M41T80_ALMON_AFE); 365 t->enabled = !!(reg[M41T80_REG_ALARM_MON] & M41T80_ALMON_AFE);
366 t->pending = !!(reg[M41T80_REG_FLAGS] & M41T80_FLAGS_AF); 366 t->pending = !!(reg[M41T80_REG_FLAGS] & M41T80_FLAGS_AF);
367 return rtc_valid_tm(t); 367 return 0;
368} 368}
369 369
370static struct rtc_class_ops m41t80_rtc_ops = { 370static struct rtc_class_ops m41t80_rtc_ops = {
diff --git a/drivers/rtc/rtc-pl031.c b/drivers/rtc/rtc-pl031.c
index 6c418fe7f288..b7a6690e5b35 100644
--- a/drivers/rtc/rtc-pl031.c
+++ b/drivers/rtc/rtc-pl031.c
@@ -403,7 +403,7 @@ static int pl031_probe(struct amba_device *adev, struct amba_id *id)
403 } 403 }
404 404
405 if (request_irq(adev->irq[0], pl031_interrupt, 405 if (request_irq(adev->irq[0], pl031_interrupt,
406 IRQF_DISABLED | IRQF_SHARED, "rtc-pl031", ldata)) { 406 IRQF_DISABLED, "rtc-pl031", ldata)) {
407 ret = -EIO; 407 ret = -EIO;
408 goto out_no_irq; 408 goto out_no_irq;
409 } 409 }
diff --git a/drivers/rtc/rtc-s3c.c b/drivers/rtc/rtc-s3c.c
index a0d3ec89d412..f57a87f4ae96 100644
--- a/drivers/rtc/rtc-s3c.c
+++ b/drivers/rtc/rtc-s3c.c
@@ -310,11 +310,6 @@ static int s3c_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm)
310 310
311 s3c_rtc_setaie(alrm->enabled); 311 s3c_rtc_setaie(alrm->enabled);
312 312
313 if (alrm->enabled)
314 enable_irq_wake(s3c_rtc_alarmno);
315 else
316 disable_irq_wake(s3c_rtc_alarmno);
317
318 return 0; 313 return 0;
319} 314}
320 315
@@ -587,6 +582,10 @@ static int s3c_rtc_suspend(struct platform_device *pdev, pm_message_t state)
587 ticnt_en_save &= S3C64XX_RTCCON_TICEN; 582 ticnt_en_save &= S3C64XX_RTCCON_TICEN;
588 } 583 }
589 s3c_rtc_enable(pdev, 0); 584 s3c_rtc_enable(pdev, 0);
585
586 if (device_may_wakeup(&pdev->dev))
587 enable_irq_wake(s3c_rtc_alarmno);
588
590 return 0; 589 return 0;
591} 590}
592 591
@@ -600,6 +599,10 @@ static int s3c_rtc_resume(struct platform_device *pdev)
600 tmp = readb(s3c_rtc_base + S3C2410_RTCCON); 599 tmp = readb(s3c_rtc_base + S3C2410_RTCCON);
601 writeb(tmp | ticnt_en_save, s3c_rtc_base + S3C2410_RTCCON); 600 writeb(tmp | ticnt_en_save, s3c_rtc_base + S3C2410_RTCCON);
602 } 601 }
602
603 if (device_may_wakeup(&pdev->dev))
604 disable_irq_wake(s3c_rtc_alarmno);
605
603 return 0; 606 return 0;
604} 607}
605#else 608#else
diff --git a/drivers/s390/char/tape_block.c b/drivers/s390/char/tape_block.c
index b7de02525ec9..85cf607fc78f 100644
--- a/drivers/s390/char/tape_block.c
+++ b/drivers/s390/char/tape_block.c
@@ -217,8 +217,7 @@ tapeblock_setup_device(struct tape_device * device)
217 if (!blkdat->request_queue) 217 if (!blkdat->request_queue)
218 return -ENOMEM; 218 return -ENOMEM;
219 219
220 elevator_exit(blkdat->request_queue->elevator); 220 rc = elevator_change(blkdat->request_queue, "noop");
221 rc = elevator_init(blkdat->request_queue, "noop");
222 if (rc) 221 if (rc)
223 goto cleanup_queue; 222 goto cleanup_queue;
224 223
diff --git a/drivers/s390/net/ctcm_main.c b/drivers/s390/net/ctcm_main.c
index 6edf20b62de5..2c7d2d9be4d0 100644
--- a/drivers/s390/net/ctcm_main.c
+++ b/drivers/s390/net/ctcm_main.c
@@ -1154,7 +1154,7 @@ static struct net_device *ctcm_init_netdevice(struct ctcm_priv *priv)
1154 dev_fsm, dev_fsm_len, GFP_KERNEL); 1154 dev_fsm, dev_fsm_len, GFP_KERNEL);
1155 if (priv->fsm == NULL) { 1155 if (priv->fsm == NULL) {
1156 CTCMY_DBF_DEV(SETUP, dev, "init_fsm error"); 1156 CTCMY_DBF_DEV(SETUP, dev, "init_fsm error");
1157 kfree(dev); 1157 free_netdev(dev);
1158 return NULL; 1158 return NULL;
1159 } 1159 }
1160 fsm_newstate(priv->fsm, DEV_STATE_STOPPED); 1160 fsm_newstate(priv->fsm, DEV_STATE_STOPPED);
@@ -1165,7 +1165,7 @@ static struct net_device *ctcm_init_netdevice(struct ctcm_priv *priv)
1165 grp = ctcmpc_init_mpc_group(priv); 1165 grp = ctcmpc_init_mpc_group(priv);
1166 if (grp == NULL) { 1166 if (grp == NULL) {
1167 MPC_DBF_DEV(SETUP, dev, "init_mpc_group error"); 1167 MPC_DBF_DEV(SETUP, dev, "init_mpc_group error");
1168 kfree(dev); 1168 free_netdev(dev);
1169 return NULL; 1169 return NULL;
1170 } 1170 }
1171 tasklet_init(&grp->mpc_tasklet2, 1171 tasklet_init(&grp->mpc_tasklet2,
diff --git a/drivers/s390/net/ctcm_mpc.c b/drivers/s390/net/ctcm_mpc.c
index 2861e78773cb..b64881f33f23 100644
--- a/drivers/s390/net/ctcm_mpc.c
+++ b/drivers/s390/net/ctcm_mpc.c
@@ -540,7 +540,7 @@ void ctc_mpc_dealloc_ch(int port_num)
540 540
541 CTCM_DBF_TEXT_(MPC_SETUP, CTC_DBF_DEBUG, 541 CTCM_DBF_TEXT_(MPC_SETUP, CTC_DBF_DEBUG,
542 "%s: %s: refcount = %d\n", 542 "%s: %s: refcount = %d\n",
543 CTCM_FUNTAIL, dev->name, atomic_read(&dev->refcnt)); 543 CTCM_FUNTAIL, dev->name, netdev_refcnt_read(dev));
544 544
545 fsm_deltimer(&priv->restart_timer); 545 fsm_deltimer(&priv->restart_timer);
546 grp->channels_terminating = 0; 546 grp->channels_terminating = 0;
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index 01c3c1f77879..847e8797073c 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -310,6 +310,8 @@ static void qeth_l2_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
310 struct qeth_vlan_vid *id; 310 struct qeth_vlan_vid *id;
311 311
312 QETH_CARD_TEXT_(card, 4, "aid:%d", vid); 312 QETH_CARD_TEXT_(card, 4, "aid:%d", vid);
313 if (!vid)
314 return;
313 if (card->info.type == QETH_CARD_TYPE_OSM) { 315 if (card->info.type == QETH_CARD_TYPE_OSM) {
314 QETH_CARD_TEXT(card, 3, "aidOSM"); 316 QETH_CARD_TEXT(card, 3, "aidOSM");
315 return; 317 return;
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index 5b79f573bd93..74d1401a5d5e 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -1820,7 +1820,7 @@ static void qeth_l3_add_vlan_mc(struct qeth_card *card)
1820 return; 1820 return;
1821 1821
1822 vg = card->vlangrp; 1822 vg = card->vlangrp;
1823 for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) { 1823 for (i = 0; i < VLAN_N_VID; i++) {
1824 struct net_device *netdev = vlan_group_get_device(vg, i); 1824 struct net_device *netdev = vlan_group_get_device(vg, i);
1825 if (netdev == NULL || 1825 if (netdev == NULL ||
1826 !(netdev->flags & IFF_UP)) 1826 !(netdev->flags & IFF_UP))
@@ -1883,7 +1883,7 @@ static void qeth_l3_add_vlan_mc6(struct qeth_card *card)
1883 return; 1883 return;
1884 1884
1885 vg = card->vlangrp; 1885 vg = card->vlangrp;
1886 for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) { 1886 for (i = 0; i < VLAN_N_VID; i++) {
1887 struct net_device *netdev = vlan_group_get_device(vg, i); 1887 struct net_device *netdev = vlan_group_get_device(vg, i);
1888 if (netdev == NULL || 1888 if (netdev == NULL ||
1889 !(netdev->flags & IFF_UP)) 1889 !(netdev->flags & IFF_UP))
@@ -2013,13 +2013,14 @@ static void qeth_l3_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
2013 qeth_l3_set_multicast_list(card->dev); 2013 qeth_l3_set_multicast_list(card->dev);
2014} 2014}
2015 2015
2016static inline __u16 qeth_l3_rebuild_skb(struct qeth_card *card, 2016static inline int qeth_l3_rebuild_skb(struct qeth_card *card,
2017 struct sk_buff *skb, struct qeth_hdr *hdr) 2017 struct sk_buff *skb, struct qeth_hdr *hdr,
2018 unsigned short *vlan_id)
2018{ 2019{
2019 unsigned short vlan_id = 0;
2020 __be16 prot; 2020 __be16 prot;
2021 struct iphdr *ip_hdr; 2021 struct iphdr *ip_hdr;
2022 unsigned char tg_addr[MAX_ADDR_LEN]; 2022 unsigned char tg_addr[MAX_ADDR_LEN];
2023 int is_vlan = 0;
2023 2024
2024 if (!(hdr->hdr.l3.flags & QETH_HDR_PASSTHRU)) { 2025 if (!(hdr->hdr.l3.flags & QETH_HDR_PASSTHRU)) {
2025 prot = htons((hdr->hdr.l3.flags & QETH_HDR_IPV6)? ETH_P_IPV6 : 2026 prot = htons((hdr->hdr.l3.flags & QETH_HDR_IPV6)? ETH_P_IPV6 :
@@ -2082,8 +2083,9 @@ static inline __u16 qeth_l3_rebuild_skb(struct qeth_card *card,
2082 2083
2083 if (hdr->hdr.l3.ext_flags & 2084 if (hdr->hdr.l3.ext_flags &
2084 (QETH_HDR_EXT_VLAN_FRAME | QETH_HDR_EXT_INCLUDE_VLAN_TAG)) { 2085 (QETH_HDR_EXT_VLAN_FRAME | QETH_HDR_EXT_INCLUDE_VLAN_TAG)) {
2085 vlan_id = (hdr->hdr.l3.ext_flags & QETH_HDR_EXT_VLAN_FRAME)? 2086 *vlan_id = (hdr->hdr.l3.ext_flags & QETH_HDR_EXT_VLAN_FRAME) ?
2086 hdr->hdr.l3.vlan_id : *((u16 *)&hdr->hdr.l3.dest_addr[12]); 2087 hdr->hdr.l3.vlan_id : *((u16 *)&hdr->hdr.l3.dest_addr[12]);
2088 is_vlan = 1;
2087 } 2089 }
2088 2090
2089 switch (card->options.checksum_type) { 2091 switch (card->options.checksum_type) {
@@ -2104,7 +2106,7 @@ static inline __u16 qeth_l3_rebuild_skb(struct qeth_card *card,
2104 skb->ip_summed = CHECKSUM_NONE; 2106 skb->ip_summed = CHECKSUM_NONE;
2105 } 2107 }
2106 2108
2107 return vlan_id; 2109 return is_vlan;
2108} 2110}
2109 2111
2110static int qeth_l3_process_inbound_buffer(struct qeth_card *card, 2112static int qeth_l3_process_inbound_buffer(struct qeth_card *card,
@@ -2114,6 +2116,7 @@ static int qeth_l3_process_inbound_buffer(struct qeth_card *card,
2114 struct sk_buff *skb; 2116 struct sk_buff *skb;
2115 struct qeth_hdr *hdr; 2117 struct qeth_hdr *hdr;
2116 __u16 vlan_tag = 0; 2118 __u16 vlan_tag = 0;
2119 int is_vlan;
2117 unsigned int len; 2120 unsigned int len;
2118 2121
2119 *done = 0; 2122 *done = 0;
@@ -2129,16 +2132,12 @@ static int qeth_l3_process_inbound_buffer(struct qeth_card *card,
2129 skb->dev = card->dev; 2132 skb->dev = card->dev;
2130 switch (hdr->hdr.l3.id) { 2133 switch (hdr->hdr.l3.id) {
2131 case QETH_HEADER_TYPE_LAYER3: 2134 case QETH_HEADER_TYPE_LAYER3:
2132 vlan_tag = qeth_l3_rebuild_skb(card, skb, hdr); 2135 is_vlan = qeth_l3_rebuild_skb(card, skb, hdr,
2136 &vlan_tag);
2133 len = skb->len; 2137 len = skb->len;
2134 if (vlan_tag && !card->options.sniffer) 2138 if (is_vlan && !card->options.sniffer)
2135 if (card->vlangrp) 2139 vlan_gro_receive(&card->napi, card->vlangrp,
2136 vlan_gro_receive(&card->napi, 2140 vlan_tag, skb);
2137 card->vlangrp, vlan_tag, skb);
2138 else {
2139 dev_kfree_skb_any(skb);
2140 continue;
2141 }
2142 else 2141 else
2143 napi_gro_receive(&card->napi, skb); 2142 napi_gro_receive(&card->napi, skb);
2144 break; 2143 break;
@@ -2248,7 +2247,7 @@ static int qeth_l3_verify_vlan_dev(struct net_device *dev,
2248 if (!vg) 2247 if (!vg)
2249 return rc; 2248 return rc;
2250 2249
2251 for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) { 2250 for (i = 0; i < VLAN_N_VID; i++) {
2252 if (vlan_group_get_device(vg, i) == dev) { 2251 if (vlan_group_get_device(vg, i) == dev) {
2253 rc = QETH_VLAN_CARD; 2252 rc = QETH_VLAN_CARD;
2254 break; 2253 break;
diff --git a/drivers/scsi/be2iscsi/be_iscsi.c b/drivers/scsi/be2iscsi/be_iscsi.c
index 7d4d2275573c..7f11f3e48e12 100644
--- a/drivers/scsi/be2iscsi/be_iscsi.c
+++ b/drivers/scsi/be2iscsi/be_iscsi.c
@@ -300,8 +300,7 @@ int beiscsi_get_host_param(struct Scsi_Host *shost,
300 enum iscsi_host_param param, char *buf) 300 enum iscsi_host_param param, char *buf)
301{ 301{
302 struct beiscsi_hba *phba = (struct beiscsi_hba *)iscsi_host_priv(shost); 302 struct beiscsi_hba *phba = (struct beiscsi_hba *)iscsi_host_priv(shost);
303 int len = 0; 303 int status = 0;
304 int status;
305 304
306 SE_DEBUG(DBG_LVL_8, "In beiscsi_get_host_param, param= %d\n", param); 305 SE_DEBUG(DBG_LVL_8, "In beiscsi_get_host_param, param= %d\n", param);
307 switch (param) { 306 switch (param) {
@@ -315,7 +314,7 @@ int beiscsi_get_host_param(struct Scsi_Host *shost,
315 default: 314 default:
316 return iscsi_host_get_param(shost, param, buf); 315 return iscsi_host_get_param(shost, param, buf);
317 } 316 }
318 return len; 317 return status;
319} 318}
320 319
321int beiscsi_get_macaddr(char *buf, struct beiscsi_hba *phba) 320int beiscsi_get_macaddr(char *buf, struct beiscsi_hba *phba)
diff --git a/drivers/scsi/be2iscsi/be_mgmt.c b/drivers/scsi/be2iscsi/be_mgmt.c
index 26350e470bcc..877324fc594c 100644
--- a/drivers/scsi/be2iscsi/be_mgmt.c
+++ b/drivers/scsi/be2iscsi/be_mgmt.c
@@ -368,7 +368,7 @@ int mgmt_open_connection(struct beiscsi_hba *phba,
368 memset(req, 0, sizeof(*req)); 368 memset(req, 0, sizeof(*req));
369 wrb->tag0 |= tag; 369 wrb->tag0 |= tag;
370 370
371 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 1); 371 be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1);
372 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI, 372 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
373 OPCODE_COMMON_ISCSI_TCP_CONNECT_AND_OFFLOAD, 373 OPCODE_COMMON_ISCSI_TCP_CONNECT_AND_OFFLOAD,
374 sizeof(*req)); 374 sizeof(*req));
diff --git a/drivers/scsi/bnx2i/57xx_iscsi_constants.h b/drivers/scsi/bnx2i/57xx_iscsi_constants.h
index 2fceb19eb27b..1b6f86b2482d 100644
--- a/drivers/scsi/bnx2i/57xx_iscsi_constants.h
+++ b/drivers/scsi/bnx2i/57xx_iscsi_constants.h
@@ -120,6 +120,8 @@
120/* additional LOM specific iSCSI license not installed */ 120/* additional LOM specific iSCSI license not installed */
121#define ISCSI_KCQE_COMPLETION_STATUS_LOM_ISCSI_NOT_ENABLED (0x51) 121#define ISCSI_KCQE_COMPLETION_STATUS_LOM_ISCSI_NOT_ENABLED (0x51)
122 122
123#define ISCSI_KCQE_COMPLETION_STATUS_CID_BUSY (0x80)
124
123/* SQ/RQ/CQ DB structure sizes */ 125/* SQ/RQ/CQ DB structure sizes */
124#define ISCSI_SQ_DB_SIZE (16) 126#define ISCSI_SQ_DB_SIZE (16)
125#define ISCSI_RQ_DB_SIZE (16) 127#define ISCSI_RQ_DB_SIZE (16)
diff --git a/drivers/scsi/bnx2i/bnx2i.h b/drivers/scsi/bnx2i/bnx2i.h
index 00c033511cbf..99568cb9ad1c 100644
--- a/drivers/scsi/bnx2i/bnx2i.h
+++ b/drivers/scsi/bnx2i/bnx2i.h
@@ -58,6 +58,8 @@
58#define MAX_PAGES_PER_CTRL_STRUCT_POOL 8 58#define MAX_PAGES_PER_CTRL_STRUCT_POOL 8
59#define BNX2I_RESERVED_SLOW_PATH_CMD_SLOTS 4 59#define BNX2I_RESERVED_SLOW_PATH_CMD_SLOTS 4
60 60
61#define BNX2I_5771X_DBELL_PAGE_SIZE 128
62
61/* 5706/08 hardware has limit on maximum buffer size per BD it can handle */ 63/* 5706/08 hardware has limit on maximum buffer size per BD it can handle */
62#define MAX_BD_LENGTH 65535 64#define MAX_BD_LENGTH 65535
63#define BD_SPLIT_SIZE 32768 65#define BD_SPLIT_SIZE 32768
diff --git a/drivers/scsi/bnx2i/bnx2i_hwi.c b/drivers/scsi/bnx2i/bnx2i_hwi.c
index d23fc256d585..99c71e6d4c14 100644
--- a/drivers/scsi/bnx2i/bnx2i_hwi.c
+++ b/drivers/scsi/bnx2i/bnx2i_hwi.c
@@ -2405,7 +2405,8 @@ int bnx2i_map_ep_dbell_regs(struct bnx2i_endpoint *ep)
2405 if (test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type)) { 2405 if (test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type)) {
2406 reg_base = pci_resource_start(ep->hba->pcidev, 2406 reg_base = pci_resource_start(ep->hba->pcidev,
2407 BNX2X_DOORBELL_PCI_BAR); 2407 BNX2X_DOORBELL_PCI_BAR);
2408 reg_off = PAGE_SIZE * (cid_num & 0x1FFFF) + DPM_TRIGER_TYPE; 2408 reg_off = BNX2I_5771X_DBELL_PAGE_SIZE * (cid_num & 0x1FFFF) +
2409 DPM_TRIGER_TYPE;
2409 ep->qp.ctx_base = ioremap_nocache(reg_base + reg_off, 4); 2410 ep->qp.ctx_base = ioremap_nocache(reg_base + reg_off, 4);
2410 goto arm_cq; 2411 goto arm_cq;
2411 } 2412 }
diff --git a/drivers/scsi/constants.c b/drivers/scsi/constants.c
index cd05e049d5f6..d0c82340f0e2 100644
--- a/drivers/scsi/constants.c
+++ b/drivers/scsi/constants.c
@@ -1404,13 +1404,13 @@ void scsi_print_sense(char *name, struct scsi_cmnd *cmd)
1404{ 1404{
1405 struct scsi_sense_hdr sshdr; 1405 struct scsi_sense_hdr sshdr;
1406 1406
1407 scmd_printk(KERN_INFO, cmd, ""); 1407 scmd_printk(KERN_INFO, cmd, " ");
1408 scsi_decode_sense_buffer(cmd->sense_buffer, SCSI_SENSE_BUFFERSIZE, 1408 scsi_decode_sense_buffer(cmd->sense_buffer, SCSI_SENSE_BUFFERSIZE,
1409 &sshdr); 1409 &sshdr);
1410 scsi_show_sense_hdr(&sshdr); 1410 scsi_show_sense_hdr(&sshdr);
1411 scsi_decode_sense_extras(cmd->sense_buffer, SCSI_SENSE_BUFFERSIZE, 1411 scsi_decode_sense_extras(cmd->sense_buffer, SCSI_SENSE_BUFFERSIZE,
1412 &sshdr); 1412 &sshdr);
1413 scmd_printk(KERN_INFO, cmd, ""); 1413 scmd_printk(KERN_INFO, cmd, " ");
1414 scsi_show_extd_sense(sshdr.asc, sshdr.ascq); 1414 scsi_show_extd_sense(sshdr.asc, sshdr.ascq);
1415} 1415}
1416EXPORT_SYMBOL(scsi_print_sense); 1416EXPORT_SYMBOL(scsi_print_sense);
@@ -1453,7 +1453,7 @@ EXPORT_SYMBOL(scsi_show_result);
1453 1453
1454void scsi_print_result(struct scsi_cmnd *cmd) 1454void scsi_print_result(struct scsi_cmnd *cmd)
1455{ 1455{
1456 scmd_printk(KERN_INFO, cmd, ""); 1456 scmd_printk(KERN_INFO, cmd, " ");
1457 scsi_show_result(cmd->result); 1457 scsi_show_result(cmd->result);
1458} 1458}
1459EXPORT_SYMBOL(scsi_print_result); 1459EXPORT_SYMBOL(scsi_print_result);
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index 4f5551b5fe53..c5d0606ad097 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -3231,6 +3231,12 @@ static __devinit int hpsa_kdump_hard_reset_controller(struct pci_dev *pdev)
3231 misc_fw_support = readl(&cfgtable->misc_fw_support); 3231 misc_fw_support = readl(&cfgtable->misc_fw_support);
3232 use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET; 3232 use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET;
3233 3233
3234 /* The doorbell reset seems to cause lockups on some Smart
3235 * Arrays (e.g. P410, P410i, maybe others). Until this is
3236 * fixed or at least isolated, avoid the doorbell reset.
3237 */
3238 use_doorbell = 0;
3239
3234 rc = hpsa_controller_hard_reset(pdev, vaddr, use_doorbell); 3240 rc = hpsa_controller_hard_reset(pdev, vaddr, use_doorbell);
3235 if (rc) 3241 if (rc)
3236 goto unmap_cfgtable; 3242 goto unmap_cfgtable;
diff --git a/drivers/scsi/osd/osd_initiator.c b/drivers/scsi/osd/osd_initiator.c
index fda4de3440c4..e88bbdde49c5 100644
--- a/drivers/scsi/osd/osd_initiator.c
+++ b/drivers/scsi/osd/osd_initiator.c
@@ -865,7 +865,7 @@ void osd_req_read(struct osd_request *or,
865{ 865{
866 _osd_req_encode_common(or, OSD_ACT_READ, obj, offset, len); 866 _osd_req_encode_common(or, OSD_ACT_READ, obj, offset, len);
867 WARN_ON(or->in.bio || or->in.total_bytes); 867 WARN_ON(or->in.bio || or->in.total_bytes);
868 WARN_ON(1 == (bio->bi_rw & REQ_WRITE)); 868 WARN_ON(bio->bi_rw & REQ_WRITE);
869 or->in.bio = bio; 869 or->in.bio = bio;
870 or->in.total_bytes = len; 870 or->in.total_bytes = len;
871} 871}
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index 420238cc794e..114bc5a81171 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -1838,26 +1838,33 @@ qla24xx_vport_delete(struct fc_vport *fc_vport)
1838 1838
1839 qla24xx_disable_vp(vha); 1839 qla24xx_disable_vp(vha);
1840 1840
1841 vha->flags.delete_progress = 1;
1842
1841 fc_remove_host(vha->host); 1843 fc_remove_host(vha->host);
1842 1844
1843 scsi_remove_host(vha->host); 1845 scsi_remove_host(vha->host);
1844 1846
1845 qla2x00_free_fcports(vha); 1847 if (vha->timer_active) {
1848 qla2x00_vp_stop_timer(vha);
1849 DEBUG15(printk(KERN_INFO "scsi(%ld): timer for the vport[%d]"
1850 " = %p has stopped\n", vha->host_no, vha->vp_idx, vha));
1851 }
1846 1852
1847 qla24xx_deallocate_vp_id(vha); 1853 qla24xx_deallocate_vp_id(vha);
1848 1854
1855 /* No pending activities shall be there on the vha now */
1856 DEBUG(msleep(random32()%10)); /* Just to see if something falls on
1857 * the net we have placed below */
1858
1859 BUG_ON(atomic_read(&vha->vref_count));
1860
1861 qla2x00_free_fcports(vha);
1862
1849 mutex_lock(&ha->vport_lock); 1863 mutex_lock(&ha->vport_lock);
1850 ha->cur_vport_count--; 1864 ha->cur_vport_count--;
1851 clear_bit(vha->vp_idx, ha->vp_idx_map); 1865 clear_bit(vha->vp_idx, ha->vp_idx_map);
1852 mutex_unlock(&ha->vport_lock); 1866 mutex_unlock(&ha->vport_lock);
1853 1867
1854 if (vha->timer_active) {
1855 qla2x00_vp_stop_timer(vha);
1856 DEBUG15(printk ("scsi(%ld): timer for the vport[%d] = %p "
1857 "has stopped\n",
1858 vha->host_no, vha->vp_idx, vha));
1859 }
1860
1861 if (vha->req->id && !ha->flags.cpu_affinity_enabled) { 1868 if (vha->req->id && !ha->flags.cpu_affinity_enabled) {
1862 if (qla25xx_delete_req_que(vha, vha->req) != QLA_SUCCESS) 1869 if (qla25xx_delete_req_que(vha, vha->req) != QLA_SUCCESS)
1863 qla_printk(KERN_WARNING, ha, 1870 qla_printk(KERN_WARNING, ha,
diff --git a/drivers/scsi/qla2xxx/qla_dbg.h b/drivers/scsi/qla2xxx/qla_dbg.h
index 6cfc28a25eb3..b74e6b5743dc 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.h
+++ b/drivers/scsi/qla2xxx/qla_dbg.h
@@ -29,8 +29,6 @@
29/* #define QL_DEBUG_LEVEL_17 */ /* Output EEH trace messages */ 29/* #define QL_DEBUG_LEVEL_17 */ /* Output EEH trace messages */
30/* #define QL_DEBUG_LEVEL_18 */ /* Output T10 CRC trace messages */ 30/* #define QL_DEBUG_LEVEL_18 */ /* Output T10 CRC trace messages */
31 31
32/* #define QL_PRINTK_BUF */ /* Captures printk to buffer */
33
34/* 32/*
35* Macros use for debugging the driver. 33* Macros use for debugging the driver.
36*/ 34*/
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index 3a432ea0c7a3..d2a4e1530708 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -2641,6 +2641,7 @@ struct qla_hw_data {
2641#define MBX_UPDATE_FLASH_ACTIVE 3 2641#define MBX_UPDATE_FLASH_ACTIVE 3
2642 2642
2643 struct mutex vport_lock; /* Virtual port synchronization */ 2643 struct mutex vport_lock; /* Virtual port synchronization */
2644 spinlock_t vport_slock; /* order is hardware_lock, then vport_slock */
2644 struct completion mbx_cmd_comp; /* Serialize mbx access */ 2645 struct completion mbx_cmd_comp; /* Serialize mbx access */
2645 struct completion mbx_intr_comp; /* Used for completion notification */ 2646 struct completion mbx_intr_comp; /* Used for completion notification */
2646 struct completion dcbx_comp; /* For set port config notification */ 2647 struct completion dcbx_comp; /* For set port config notification */
@@ -2828,6 +2829,7 @@ typedef struct scsi_qla_host {
2828 uint32_t management_server_logged_in :1; 2829 uint32_t management_server_logged_in :1;
2829 uint32_t process_response_queue :1; 2830 uint32_t process_response_queue :1;
2830 uint32_t difdix_supported:1; 2831 uint32_t difdix_supported:1;
2832 uint32_t delete_progress:1;
2831 } flags; 2833 } flags;
2832 2834
2833 atomic_t loop_state; 2835 atomic_t loop_state;
@@ -2922,6 +2924,8 @@ typedef struct scsi_qla_host {
2922 struct req_que *req; 2924 struct req_que *req;
2923 int fw_heartbeat_counter; 2925 int fw_heartbeat_counter;
2924 int seconds_since_last_heartbeat; 2926 int seconds_since_last_heartbeat;
2927
2928 atomic_t vref_count;
2925} scsi_qla_host_t; 2929} scsi_qla_host_t;
2926 2930
2927/* 2931/*
@@ -2932,6 +2936,22 @@ typedef struct scsi_qla_host {
2932 test_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags) || \ 2936 test_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags) || \
2933 atomic_read(&ha->loop_state) == LOOP_DOWN) 2937 atomic_read(&ha->loop_state) == LOOP_DOWN)
2934 2938
2939#define QLA_VHA_MARK_BUSY(__vha, __bail) do { \
2940 atomic_inc(&__vha->vref_count); \
2941 mb(); \
2942 if (__vha->flags.delete_progress) { \
2943 atomic_dec(&__vha->vref_count); \
2944 __bail = 1; \
2945 } else { \
2946 __bail = 0; \
2947 } \
2948} while (0)
2949
2950#define QLA_VHA_MARK_NOT_BUSY(__vha) do { \
2951 atomic_dec(&__vha->vref_count); \
2952} while (0)
2953
2954
2935#define qla_printk(level, ha, format, arg...) \ 2955#define qla_printk(level, ha, format, arg...) \
2936 dev_printk(level , &((ha)->pdev->dev) , format , ## arg) 2956 dev_printk(level , &((ha)->pdev->dev) , format , ## arg)
2937 2957
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index d863ed2619b5..9c383baebe27 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -69,21 +69,29 @@ qla2x00_ctx_sp_free(srb_t *sp)
69{ 69{
70 struct srb_ctx *ctx = sp->ctx; 70 struct srb_ctx *ctx = sp->ctx;
71 struct srb_iocb *iocb = ctx->u.iocb_cmd; 71 struct srb_iocb *iocb = ctx->u.iocb_cmd;
72 struct scsi_qla_host *vha = sp->fcport->vha;
72 73
73 del_timer_sync(&iocb->timer); 74 del_timer_sync(&iocb->timer);
74 kfree(iocb); 75 kfree(iocb);
75 kfree(ctx); 76 kfree(ctx);
76 mempool_free(sp, sp->fcport->vha->hw->srb_mempool); 77 mempool_free(sp, sp->fcport->vha->hw->srb_mempool);
78
79 QLA_VHA_MARK_NOT_BUSY(vha);
77} 80}
78 81
79inline srb_t * 82inline srb_t *
80qla2x00_get_ctx_sp(scsi_qla_host_t *vha, fc_port_t *fcport, size_t size, 83qla2x00_get_ctx_sp(scsi_qla_host_t *vha, fc_port_t *fcport, size_t size,
81 unsigned long tmo) 84 unsigned long tmo)
82{ 85{
83 srb_t *sp; 86 srb_t *sp = NULL;
84 struct qla_hw_data *ha = vha->hw; 87 struct qla_hw_data *ha = vha->hw;
85 struct srb_ctx *ctx; 88 struct srb_ctx *ctx;
86 struct srb_iocb *iocb; 89 struct srb_iocb *iocb;
90 uint8_t bail;
91
92 QLA_VHA_MARK_BUSY(vha, bail);
93 if (bail)
94 return NULL;
87 95
88 sp = mempool_alloc(ha->srb_mempool, GFP_KERNEL); 96 sp = mempool_alloc(ha->srb_mempool, GFP_KERNEL);
89 if (!sp) 97 if (!sp)
@@ -116,6 +124,8 @@ qla2x00_get_ctx_sp(scsi_qla_host_t *vha, fc_port_t *fcport, size_t size,
116 iocb->timer.function = qla2x00_ctx_sp_timeout; 124 iocb->timer.function = qla2x00_ctx_sp_timeout;
117 add_timer(&iocb->timer); 125 add_timer(&iocb->timer);
118done: 126done:
127 if (!sp)
128 QLA_VHA_MARK_NOT_BUSY(vha);
119 return sp; 129 return sp;
120} 130}
121 131
@@ -1777,11 +1787,15 @@ qla2x00_init_rings(scsi_qla_host_t *vha)
1777 qla2x00_init_response_q_entries(rsp); 1787 qla2x00_init_response_q_entries(rsp);
1778 } 1788 }
1779 1789
1790 spin_lock_irqsave(&ha->vport_slock, flags);
1780 /* Clear RSCN queue. */ 1791 /* Clear RSCN queue. */
1781 list_for_each_entry(vp, &ha->vp_list, list) { 1792 list_for_each_entry(vp, &ha->vp_list, list) {
1782 vp->rscn_in_ptr = 0; 1793 vp->rscn_in_ptr = 0;
1783 vp->rscn_out_ptr = 0; 1794 vp->rscn_out_ptr = 0;
1784 } 1795 }
1796
1797 spin_unlock_irqrestore(&ha->vport_slock, flags);
1798
1785 ha->isp_ops->config_rings(vha); 1799 ha->isp_ops->config_rings(vha);
1786 1800
1787 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1801 spin_unlock_irqrestore(&ha->hardware_lock, flags);
@@ -3218,12 +3232,17 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha,
3218 /* Bypass virtual ports of the same host. */ 3232 /* Bypass virtual ports of the same host. */
3219 found = 0; 3233 found = 0;
3220 if (ha->num_vhosts) { 3234 if (ha->num_vhosts) {
3235 unsigned long flags;
3236
3237 spin_lock_irqsave(&ha->vport_slock, flags);
3221 list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) { 3238 list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) {
3222 if (new_fcport->d_id.b24 == vp->d_id.b24) { 3239 if (new_fcport->d_id.b24 == vp->d_id.b24) {
3223 found = 1; 3240 found = 1;
3224 break; 3241 break;
3225 } 3242 }
3226 } 3243 }
3244 spin_unlock_irqrestore(&ha->vport_slock, flags);
3245
3227 if (found) 3246 if (found)
3228 continue; 3247 continue;
3229 } 3248 }
@@ -3343,6 +3362,7 @@ qla2x00_find_new_loop_id(scsi_qla_host_t *vha, fc_port_t *dev)
3343 struct qla_hw_data *ha = vha->hw; 3362 struct qla_hw_data *ha = vha->hw;
3344 struct scsi_qla_host *vp; 3363 struct scsi_qla_host *vp;
3345 struct scsi_qla_host *tvp; 3364 struct scsi_qla_host *tvp;
3365 unsigned long flags = 0;
3346 3366
3347 rval = QLA_SUCCESS; 3367 rval = QLA_SUCCESS;
3348 3368
@@ -3367,6 +3387,8 @@ qla2x00_find_new_loop_id(scsi_qla_host_t *vha, fc_port_t *dev)
3367 /* Check for loop ID being already in use. */ 3387 /* Check for loop ID being already in use. */
3368 found = 0; 3388 found = 0;
3369 fcport = NULL; 3389 fcport = NULL;
3390
3391 spin_lock_irqsave(&ha->vport_slock, flags);
3370 list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) { 3392 list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) {
3371 list_for_each_entry(fcport, &vp->vp_fcports, list) { 3393 list_for_each_entry(fcport, &vp->vp_fcports, list) {
3372 if (fcport->loop_id == dev->loop_id && 3394 if (fcport->loop_id == dev->loop_id &&
@@ -3379,6 +3401,7 @@ qla2x00_find_new_loop_id(scsi_qla_host_t *vha, fc_port_t *dev)
3379 if (found) 3401 if (found)
3380 break; 3402 break;
3381 } 3403 }
3404 spin_unlock_irqrestore(&ha->vport_slock, flags);
3382 3405
3383 /* If not in use then it is free to use. */ 3406 /* If not in use then it is free to use. */
3384 if (!found) { 3407 if (!found) {
@@ -3791,14 +3814,27 @@ void
3791qla2x00_update_fcports(scsi_qla_host_t *base_vha) 3814qla2x00_update_fcports(scsi_qla_host_t *base_vha)
3792{ 3815{
3793 fc_port_t *fcport; 3816 fc_port_t *fcport;
3794 struct scsi_qla_host *tvp, *vha; 3817 struct scsi_qla_host *vha;
3818 struct qla_hw_data *ha = base_vha->hw;
3819 unsigned long flags;
3795 3820
3821 spin_lock_irqsave(&ha->vport_slock, flags);
3796 /* Go with deferred removal of rport references. */ 3822 /* Go with deferred removal of rport references. */
3797 list_for_each_entry_safe(vha, tvp, &base_vha->hw->vp_list, list) 3823 list_for_each_entry(vha, &base_vha->hw->vp_list, list) {
3798 list_for_each_entry(fcport, &vha->vp_fcports, list) 3824 atomic_inc(&vha->vref_count);
3825 list_for_each_entry(fcport, &vha->vp_fcports, list) {
3799 if (fcport && fcport->drport && 3826 if (fcport && fcport->drport &&
3800 atomic_read(&fcport->state) != FCS_UNCONFIGURED) 3827 atomic_read(&fcport->state) != FCS_UNCONFIGURED) {
3828 spin_unlock_irqrestore(&ha->vport_slock, flags);
3829
3801 qla2x00_rport_del(fcport); 3830 qla2x00_rport_del(fcport);
3831
3832 spin_lock_irqsave(&ha->vport_slock, flags);
3833 }
3834 }
3835 atomic_dec(&vha->vref_count);
3836 }
3837 spin_unlock_irqrestore(&ha->vport_slock, flags);
3802} 3838}
3803 3839
3804void 3840void
@@ -3806,7 +3842,7 @@ qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha)
3806{ 3842{
3807 struct qla_hw_data *ha = vha->hw; 3843 struct qla_hw_data *ha = vha->hw;
3808 struct scsi_qla_host *vp, *base_vha = pci_get_drvdata(ha->pdev); 3844 struct scsi_qla_host *vp, *base_vha = pci_get_drvdata(ha->pdev);
3809 struct scsi_qla_host *tvp; 3845 unsigned long flags;
3810 3846
3811 vha->flags.online = 0; 3847 vha->flags.online = 0;
3812 ha->flags.chip_reset_done = 0; 3848 ha->flags.chip_reset_done = 0;
@@ -3824,8 +3860,18 @@ qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha)
3824 if (atomic_read(&vha->loop_state) != LOOP_DOWN) { 3860 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
3825 atomic_set(&vha->loop_state, LOOP_DOWN); 3861 atomic_set(&vha->loop_state, LOOP_DOWN);
3826 qla2x00_mark_all_devices_lost(vha, 0); 3862 qla2x00_mark_all_devices_lost(vha, 0);
3827 list_for_each_entry_safe(vp, tvp, &base_vha->hw->vp_list, list) 3863
3864 spin_lock_irqsave(&ha->vport_slock, flags);
3865 list_for_each_entry(vp, &base_vha->hw->vp_list, list) {
3866 atomic_inc(&vp->vref_count);
3867 spin_unlock_irqrestore(&ha->vport_slock, flags);
3868
3828 qla2x00_mark_all_devices_lost(vp, 0); 3869 qla2x00_mark_all_devices_lost(vp, 0);
3870
3871 spin_lock_irqsave(&ha->vport_slock, flags);
3872 atomic_dec(&vp->vref_count);
3873 }
3874 spin_unlock_irqrestore(&ha->vport_slock, flags);
3829 } else { 3875 } else {
3830 if (!atomic_read(&vha->loop_down_timer)) 3876 if (!atomic_read(&vha->loop_down_timer))
3831 atomic_set(&vha->loop_down_timer, 3877 atomic_set(&vha->loop_down_timer,
@@ -3862,8 +3908,8 @@ qla2x00_abort_isp(scsi_qla_host_t *vha)
3862 uint8_t status = 0; 3908 uint8_t status = 0;
3863 struct qla_hw_data *ha = vha->hw; 3909 struct qla_hw_data *ha = vha->hw;
3864 struct scsi_qla_host *vp; 3910 struct scsi_qla_host *vp;
3865 struct scsi_qla_host *tvp;
3866 struct req_que *req = ha->req_q_map[0]; 3911 struct req_que *req = ha->req_q_map[0];
3912 unsigned long flags;
3867 3913
3868 if (vha->flags.online) { 3914 if (vha->flags.online) {
3869 qla2x00_abort_isp_cleanup(vha); 3915 qla2x00_abort_isp_cleanup(vha);
@@ -3970,10 +4016,21 @@ qla2x00_abort_isp(scsi_qla_host_t *vha)
3970 DEBUG(printk(KERN_INFO 4016 DEBUG(printk(KERN_INFO
3971 "qla2x00_abort_isp(%ld): succeeded.\n", 4017 "qla2x00_abort_isp(%ld): succeeded.\n",
3972 vha->host_no)); 4018 vha->host_no));
3973 list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) { 4019
3974 if (vp->vp_idx) 4020 spin_lock_irqsave(&ha->vport_slock, flags);
4021 list_for_each_entry(vp, &ha->vp_list, list) {
4022 if (vp->vp_idx) {
4023 atomic_inc(&vp->vref_count);
4024 spin_unlock_irqrestore(&ha->vport_slock, flags);
4025
3975 qla2x00_vp_abort_isp(vp); 4026 qla2x00_vp_abort_isp(vp);
4027
4028 spin_lock_irqsave(&ha->vport_slock, flags);
4029 atomic_dec(&vp->vref_count);
4030 }
3976 } 4031 }
4032 spin_unlock_irqrestore(&ha->vport_slock, flags);
4033
3977 } else { 4034 } else {
3978 qla_printk(KERN_INFO, ha, 4035 qla_printk(KERN_INFO, ha,
3979 "qla2x00_abort_isp: **** FAILED ****\n"); 4036 "qla2x00_abort_isp: **** FAILED ****\n");
@@ -5185,7 +5242,7 @@ qla82xx_restart_isp(scsi_qla_host_t *vha)
5185 struct req_que *req = ha->req_q_map[0]; 5242 struct req_que *req = ha->req_q_map[0];
5186 struct rsp_que *rsp = ha->rsp_q_map[0]; 5243 struct rsp_que *rsp = ha->rsp_q_map[0];
5187 struct scsi_qla_host *vp; 5244 struct scsi_qla_host *vp;
5188 struct scsi_qla_host *tvp; 5245 unsigned long flags;
5189 5246
5190 status = qla2x00_init_rings(vha); 5247 status = qla2x00_init_rings(vha);
5191 if (!status) { 5248 if (!status) {
@@ -5272,10 +5329,21 @@ qla82xx_restart_isp(scsi_qla_host_t *vha)
5272 DEBUG(printk(KERN_INFO 5329 DEBUG(printk(KERN_INFO
5273 "qla82xx_restart_isp(%ld): succeeded.\n", 5330 "qla82xx_restart_isp(%ld): succeeded.\n",
5274 vha->host_no)); 5331 vha->host_no));
5275 list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) { 5332
5276 if (vp->vp_idx) 5333 spin_lock_irqsave(&ha->vport_slock, flags);
5334 list_for_each_entry(vp, &ha->vp_list, list) {
5335 if (vp->vp_idx) {
5336 atomic_inc(&vp->vref_count);
5337 spin_unlock_irqrestore(&ha->vport_slock, flags);
5338
5277 qla2x00_vp_abort_isp(vp); 5339 qla2x00_vp_abort_isp(vp);
5340
5341 spin_lock_irqsave(&ha->vport_slock, flags);
5342 atomic_dec(&vp->vref_count);
5343 }
5278 } 5344 }
5345 spin_unlock_irqrestore(&ha->vport_slock, flags);
5346
5279 } else { 5347 } else {
5280 qla_printk(KERN_INFO, ha, 5348 qla_printk(KERN_INFO, ha,
5281 "qla82xx_restart_isp: **** FAILED ****\n"); 5349 "qla82xx_restart_isp: **** FAILED ****\n");
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 6982ba70e12a..28f65be19dad 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -1706,19 +1706,20 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
1706 cp->result = DID_ERROR << 16; 1706 cp->result = DID_ERROR << 16;
1707 break; 1707 break;
1708 } 1708 }
1709 } else if (!lscsi_status) { 1709 } else {
1710 DEBUG2(qla_printk(KERN_INFO, ha, 1710 DEBUG2(qla_printk(KERN_INFO, ha,
1711 "scsi(%ld:%d:%d) Dropped frame(s) detected (0x%x " 1711 "scsi(%ld:%d:%d) Dropped frame(s) detected (0x%x "
1712 "of 0x%x bytes).\n", vha->host_no, cp->device->id, 1712 "of 0x%x bytes).\n", vha->host_no, cp->device->id,
1713 cp->device->lun, resid, scsi_bufflen(cp))); 1713 cp->device->lun, resid, scsi_bufflen(cp)));
1714 1714
1715 cp->result = DID_ERROR << 16; 1715 cp->result = DID_ERROR << 16 | lscsi_status;
1716 break; 1716 goto check_scsi_status;
1717 } 1717 }
1718 1718
1719 cp->result = DID_OK << 16 | lscsi_status; 1719 cp->result = DID_OK << 16 | lscsi_status;
1720 logit = 0; 1720 logit = 0;
1721 1721
1722check_scsi_status:
1722 /* 1723 /*
1723 * Check to see if SCSI Status is non zero. If so report SCSI 1724 * Check to see if SCSI Status is non zero. If so report SCSI
1724 * Status. 1725 * Status.
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index 6009b0c69488..a595ec8264f8 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -2913,7 +2913,7 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
2913 uint16_t stat = le16_to_cpu(rptid_entry->vp_idx); 2913 uint16_t stat = le16_to_cpu(rptid_entry->vp_idx);
2914 struct qla_hw_data *ha = vha->hw; 2914 struct qla_hw_data *ha = vha->hw;
2915 scsi_qla_host_t *vp; 2915 scsi_qla_host_t *vp;
2916 scsi_qla_host_t *tvp; 2916 unsigned long flags;
2917 2917
2918 if (rptid_entry->entry_status != 0) 2918 if (rptid_entry->entry_status != 0)
2919 return; 2919 return;
@@ -2945,9 +2945,12 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
2945 return; 2945 return;
2946 } 2946 }
2947 2947
2948 list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) 2948 spin_lock_irqsave(&ha->vport_slock, flags);
2949 list_for_each_entry(vp, &ha->vp_list, list)
2949 if (vp_idx == vp->vp_idx) 2950 if (vp_idx == vp->vp_idx)
2950 break; 2951 break;
2952 spin_unlock_irqrestore(&ha->vport_slock, flags);
2953
2951 if (!vp) 2954 if (!vp)
2952 return; 2955 return;
2953 2956
diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c
index 987c5b0ca78e..2b69392a71a1 100644
--- a/drivers/scsi/qla2xxx/qla_mid.c
+++ b/drivers/scsi/qla2xxx/qla_mid.c
@@ -30,6 +30,7 @@ qla24xx_allocate_vp_id(scsi_qla_host_t *vha)
30{ 30{
31 uint32_t vp_id; 31 uint32_t vp_id;
32 struct qla_hw_data *ha = vha->hw; 32 struct qla_hw_data *ha = vha->hw;
33 unsigned long flags;
33 34
34 /* Find an empty slot and assign an vp_id */ 35 /* Find an empty slot and assign an vp_id */
35 mutex_lock(&ha->vport_lock); 36 mutex_lock(&ha->vport_lock);
@@ -44,7 +45,11 @@ qla24xx_allocate_vp_id(scsi_qla_host_t *vha)
44 set_bit(vp_id, ha->vp_idx_map); 45 set_bit(vp_id, ha->vp_idx_map);
45 ha->num_vhosts++; 46 ha->num_vhosts++;
46 vha->vp_idx = vp_id; 47 vha->vp_idx = vp_id;
48
49 spin_lock_irqsave(&ha->vport_slock, flags);
47 list_add_tail(&vha->list, &ha->vp_list); 50 list_add_tail(&vha->list, &ha->vp_list);
51 spin_unlock_irqrestore(&ha->vport_slock, flags);
52
48 mutex_unlock(&ha->vport_lock); 53 mutex_unlock(&ha->vport_lock);
49 return vp_id; 54 return vp_id;
50} 55}
@@ -54,12 +59,31 @@ qla24xx_deallocate_vp_id(scsi_qla_host_t *vha)
54{ 59{
55 uint16_t vp_id; 60 uint16_t vp_id;
56 struct qla_hw_data *ha = vha->hw; 61 struct qla_hw_data *ha = vha->hw;
62 unsigned long flags = 0;
57 63
58 mutex_lock(&ha->vport_lock); 64 mutex_lock(&ha->vport_lock);
65 /*
66 * Wait for all pending activities to finish before removing vport from
67 * the list.
68 * Lock needs to be held for safe removal from the list (it
69 * ensures no active vp_list traversal while the vport is removed
70 * from the queue)
71 */
72 spin_lock_irqsave(&ha->vport_slock, flags);
73 while (atomic_read(&vha->vref_count)) {
74 spin_unlock_irqrestore(&ha->vport_slock, flags);
75
76 msleep(500);
77
78 spin_lock_irqsave(&ha->vport_slock, flags);
79 }
80 list_del(&vha->list);
81 spin_unlock_irqrestore(&ha->vport_slock, flags);
82
59 vp_id = vha->vp_idx; 83 vp_id = vha->vp_idx;
60 ha->num_vhosts--; 84 ha->num_vhosts--;
61 clear_bit(vp_id, ha->vp_idx_map); 85 clear_bit(vp_id, ha->vp_idx_map);
62 list_del(&vha->list); 86
63 mutex_unlock(&ha->vport_lock); 87 mutex_unlock(&ha->vport_lock);
64} 88}
65 89
@@ -68,12 +92,17 @@ qla24xx_find_vhost_by_name(struct qla_hw_data *ha, uint8_t *port_name)
68{ 92{
69 scsi_qla_host_t *vha; 93 scsi_qla_host_t *vha;
70 struct scsi_qla_host *tvha; 94 struct scsi_qla_host *tvha;
95 unsigned long flags;
71 96
97 spin_lock_irqsave(&ha->vport_slock, flags);
72 /* Locate matching device in database. */ 98 /* Locate matching device in database. */
73 list_for_each_entry_safe(vha, tvha, &ha->vp_list, list) { 99 list_for_each_entry_safe(vha, tvha, &ha->vp_list, list) {
74 if (!memcmp(port_name, vha->port_name, WWN_SIZE)) 100 if (!memcmp(port_name, vha->port_name, WWN_SIZE)) {
101 spin_unlock_irqrestore(&ha->vport_slock, flags);
75 return vha; 102 return vha;
103 }
76 } 104 }
105 spin_unlock_irqrestore(&ha->vport_slock, flags);
77 return NULL; 106 return NULL;
78} 107}
79 108
@@ -93,6 +122,12 @@ qla24xx_find_vhost_by_name(struct qla_hw_data *ha, uint8_t *port_name)
93static void 122static void
94qla2x00_mark_vp_devices_dead(scsi_qla_host_t *vha) 123qla2x00_mark_vp_devices_dead(scsi_qla_host_t *vha)
95{ 124{
125 /*
126 * !!! NOTE !!!
127 * This function, if called in contexts other than vp create, disable
128 * or delete, please make sure this is synchronized with the
129 * delete thread.
130 */
96 fc_port_t *fcport; 131 fc_port_t *fcport;
97 132
98 list_for_each_entry(fcport, &vha->vp_fcports, list) { 133 list_for_each_entry(fcport, &vha->vp_fcports, list) {
@@ -100,7 +135,6 @@ qla2x00_mark_vp_devices_dead(scsi_qla_host_t *vha)
100 "loop_id=0x%04x :%x\n", 135 "loop_id=0x%04x :%x\n",
101 vha->host_no, fcport->loop_id, fcport->vp_idx)); 136 vha->host_no, fcport->loop_id, fcport->vp_idx));
102 137
103 atomic_set(&fcport->state, FCS_DEVICE_DEAD);
104 qla2x00_mark_device_lost(vha, fcport, 0, 0); 138 qla2x00_mark_device_lost(vha, fcport, 0, 0);
105 atomic_set(&fcport->state, FCS_UNCONFIGURED); 139 atomic_set(&fcport->state, FCS_UNCONFIGURED);
106 } 140 }
@@ -194,12 +228,17 @@ qla24xx_configure_vp(scsi_qla_host_t *vha)
194void 228void
195qla2x00_alert_all_vps(struct rsp_que *rsp, uint16_t *mb) 229qla2x00_alert_all_vps(struct rsp_que *rsp, uint16_t *mb)
196{ 230{
197 scsi_qla_host_t *vha, *tvha; 231 scsi_qla_host_t *vha;
198 struct qla_hw_data *ha = rsp->hw; 232 struct qla_hw_data *ha = rsp->hw;
199 int i = 0; 233 int i = 0;
234 unsigned long flags;
200 235
201 list_for_each_entry_safe(vha, tvha, &ha->vp_list, list) { 236 spin_lock_irqsave(&ha->vport_slock, flags);
237 list_for_each_entry(vha, &ha->vp_list, list) {
202 if (vha->vp_idx) { 238 if (vha->vp_idx) {
239 atomic_inc(&vha->vref_count);
240 spin_unlock_irqrestore(&ha->vport_slock, flags);
241
203 switch (mb[0]) { 242 switch (mb[0]) {
204 case MBA_LIP_OCCURRED: 243 case MBA_LIP_OCCURRED:
205 case MBA_LOOP_UP: 244 case MBA_LOOP_UP:
@@ -215,9 +254,13 @@ qla2x00_alert_all_vps(struct rsp_que *rsp, uint16_t *mb)
215 qla2x00_async_event(vha, rsp, mb); 254 qla2x00_async_event(vha, rsp, mb);
216 break; 255 break;
217 } 256 }
257
258 spin_lock_irqsave(&ha->vport_slock, flags);
259 atomic_dec(&vha->vref_count);
218 } 260 }
219 i++; 261 i++;
220 } 262 }
263 spin_unlock_irqrestore(&ha->vport_slock, flags);
221} 264}
222 265
223int 266int
@@ -297,7 +340,7 @@ qla2x00_do_dpc_all_vps(scsi_qla_host_t *vha)
297 int ret; 340 int ret;
298 struct qla_hw_data *ha = vha->hw; 341 struct qla_hw_data *ha = vha->hw;
299 scsi_qla_host_t *vp; 342 scsi_qla_host_t *vp;
300 struct scsi_qla_host *tvp; 343 unsigned long flags = 0;
301 344
302 if (vha->vp_idx) 345 if (vha->vp_idx)
303 return; 346 return;
@@ -309,10 +352,19 @@ qla2x00_do_dpc_all_vps(scsi_qla_host_t *vha)
309 if (!(ha->current_topology & ISP_CFG_F)) 352 if (!(ha->current_topology & ISP_CFG_F))
310 return; 353 return;
311 354
312 list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) { 355 spin_lock_irqsave(&ha->vport_slock, flags);
313 if (vp->vp_idx) 356 list_for_each_entry(vp, &ha->vp_list, list) {
357 if (vp->vp_idx) {
358 atomic_inc(&vp->vref_count);
359 spin_unlock_irqrestore(&ha->vport_slock, flags);
360
314 ret = qla2x00_do_dpc_vp(vp); 361 ret = qla2x00_do_dpc_vp(vp);
362
363 spin_lock_irqsave(&ha->vport_slock, flags);
364 atomic_dec(&vp->vref_count);
365 }
315 } 366 }
367 spin_unlock_irqrestore(&ha->vport_slock, flags);
316} 368}
317 369
318int 370int
diff --git a/drivers/scsi/qla2xxx/qla_nx.c b/drivers/scsi/qla2xxx/qla_nx.c
index 915b77a6e193..0a71cc71eab2 100644
--- a/drivers/scsi/qla2xxx/qla_nx.c
+++ b/drivers/scsi/qla2xxx/qla_nx.c
@@ -2672,6 +2672,19 @@ qla82xx_start_scsi(srb_t *sp)
2672sufficient_dsds: 2672sufficient_dsds:
2673 req_cnt = 1; 2673 req_cnt = 1;
2674 2674
2675 if (req->cnt < (req_cnt + 2)) {
2676 cnt = (uint16_t)RD_REG_DWORD_RELAXED(
2677 &reg->req_q_out[0]);
2678 if (req->ring_index < cnt)
2679 req->cnt = cnt - req->ring_index;
2680 else
2681 req->cnt = req->length -
2682 (req->ring_index - cnt);
2683 }
2684
2685 if (req->cnt < (req_cnt + 2))
2686 goto queuing_error;
2687
2675 ctx = sp->ctx = mempool_alloc(ha->ctx_mempool, GFP_ATOMIC); 2688 ctx = sp->ctx = mempool_alloc(ha->ctx_mempool, GFP_ATOMIC);
2676 if (!sp->ctx) { 2689 if (!sp->ctx) {
2677 DEBUG(printk(KERN_INFO 2690 DEBUG(printk(KERN_INFO
@@ -3307,16 +3320,19 @@ qla82xx_check_fw_alive(scsi_qla_host_t *vha)
3307 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 3320 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
3308 } 3321 }
3309 qla2xxx_wake_dpc(vha); 3322 qla2xxx_wake_dpc(vha);
3323 ha->flags.fw_hung = 1;
3310 if (ha->flags.mbox_busy) { 3324 if (ha->flags.mbox_busy) {
3311 ha->flags.fw_hung = 1;
3312 ha->flags.mbox_int = 1; 3325 ha->flags.mbox_int = 1;
3313 DEBUG2(qla_printk(KERN_ERR, ha, 3326 DEBUG2(qla_printk(KERN_ERR, ha,
3314 "Due to fw hung, doing premature " 3327 "Due to fw hung, doing premature "
3315 "completion of mbx command\n")); 3328 "completion of mbx command\n"));
3316 complete(&ha->mbx_intr_comp); 3329 if (test_bit(MBX_INTR_WAIT,
3330 &ha->mbx_cmd_flags))
3331 complete(&ha->mbx_intr_comp);
3317 } 3332 }
3318 } 3333 }
3319 } 3334 } else
3335 vha->seconds_since_last_heartbeat = 0;
3320 vha->fw_heartbeat_counter = fw_heartbeat_counter; 3336 vha->fw_heartbeat_counter = fw_heartbeat_counter;
3321} 3337}
3322 3338
@@ -3418,13 +3434,15 @@ void qla82xx_watchdog(scsi_qla_host_t *vha)
3418 "%s(): Adapter reset needed!\n", __func__); 3434 "%s(): Adapter reset needed!\n", __func__);
3419 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 3435 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
3420 qla2xxx_wake_dpc(vha); 3436 qla2xxx_wake_dpc(vha);
3437 ha->flags.fw_hung = 1;
3421 if (ha->flags.mbox_busy) { 3438 if (ha->flags.mbox_busy) {
3422 ha->flags.fw_hung = 1;
3423 ha->flags.mbox_int = 1; 3439 ha->flags.mbox_int = 1;
3424 DEBUG2(qla_printk(KERN_ERR, ha, 3440 DEBUG2(qla_printk(KERN_ERR, ha,
3425 "Need reset, doing premature " 3441 "Need reset, doing premature "
3426 "completion of mbx command\n")); 3442 "completion of mbx command\n"));
3427 complete(&ha->mbx_intr_comp); 3443 if (test_bit(MBX_INTR_WAIT,
3444 &ha->mbx_cmd_flags))
3445 complete(&ha->mbx_intr_comp);
3428 } 3446 }
3429 } else { 3447 } else {
3430 qla82xx_check_fw_alive(vha); 3448 qla82xx_check_fw_alive(vha);
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 8c80b49ac1c4..1e4bff695254 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -2341,16 +2341,28 @@ probe_out:
2341static void 2341static void
2342qla2x00_remove_one(struct pci_dev *pdev) 2342qla2x00_remove_one(struct pci_dev *pdev)
2343{ 2343{
2344 scsi_qla_host_t *base_vha, *vha, *temp; 2344 scsi_qla_host_t *base_vha, *vha;
2345 struct qla_hw_data *ha; 2345 struct qla_hw_data *ha;
2346 unsigned long flags;
2346 2347
2347 base_vha = pci_get_drvdata(pdev); 2348 base_vha = pci_get_drvdata(pdev);
2348 ha = base_vha->hw; 2349 ha = base_vha->hw;
2349 2350
2350 list_for_each_entry_safe(vha, temp, &ha->vp_list, list) { 2351 spin_lock_irqsave(&ha->vport_slock, flags);
2351 if (vha && vha->fc_vport) 2352 list_for_each_entry(vha, &ha->vp_list, list) {
2353 atomic_inc(&vha->vref_count);
2354
2355 if (vha && vha->fc_vport) {
2356 spin_unlock_irqrestore(&ha->vport_slock, flags);
2357
2352 fc_vport_terminate(vha->fc_vport); 2358 fc_vport_terminate(vha->fc_vport);
2359
2360 spin_lock_irqsave(&ha->vport_slock, flags);
2361 }
2362
2363 atomic_dec(&vha->vref_count);
2353 } 2364 }
2365 spin_unlock_irqrestore(&ha->vport_slock, flags);
2354 2366
2355 set_bit(UNLOADING, &base_vha->dpc_flags); 2367 set_bit(UNLOADING, &base_vha->dpc_flags);
2356 2368
@@ -2975,10 +2987,17 @@ static struct qla_work_evt *
2975qla2x00_alloc_work(struct scsi_qla_host *vha, enum qla_work_type type) 2987qla2x00_alloc_work(struct scsi_qla_host *vha, enum qla_work_type type)
2976{ 2988{
2977 struct qla_work_evt *e; 2989 struct qla_work_evt *e;
2990 uint8_t bail;
2991
2992 QLA_VHA_MARK_BUSY(vha, bail);
2993 if (bail)
2994 return NULL;
2978 2995
2979 e = kzalloc(sizeof(struct qla_work_evt), GFP_ATOMIC); 2996 e = kzalloc(sizeof(struct qla_work_evt), GFP_ATOMIC);
2980 if (!e) 2997 if (!e) {
2998 QLA_VHA_MARK_NOT_BUSY(vha);
2981 return NULL; 2999 return NULL;
3000 }
2982 3001
2983 INIT_LIST_HEAD(&e->list); 3002 INIT_LIST_HEAD(&e->list);
2984 e->type = type; 3003 e->type = type;
@@ -3135,6 +3154,9 @@ qla2x00_do_work(struct scsi_qla_host *vha)
3135 } 3154 }
3136 if (e->flags & QLA_EVT_FLAG_FREE) 3155 if (e->flags & QLA_EVT_FLAG_FREE)
3137 kfree(e); 3156 kfree(e);
3157
3158 /* For each work completed decrement vha ref count */
3159 QLA_VHA_MARK_NOT_BUSY(vha);
3138 } 3160 }
3139} 3161}
3140 3162
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h
index e75ccb91317d..8edbccb3232d 100644
--- a/drivers/scsi/qla2xxx/qla_version.h
+++ b/drivers/scsi/qla2xxx/qla_version.h
@@ -7,9 +7,9 @@
7/* 7/*
8 * Driver version 8 * Driver version
9 */ 9 */
10#define QLA2XXX_VERSION "8.03.03-k0" 10#define QLA2XXX_VERSION "8.03.04-k0"
11 11
12#define QLA_DRIVER_MAJOR_VER 8 12#define QLA_DRIVER_MAJOR_VER 8
13#define QLA_DRIVER_MINOR_VER 3 13#define QLA_DRIVER_MINOR_VER 3
14#define QLA_DRIVER_PATCH_VER 3 14#define QLA_DRIVER_PATCH_VER 4
15#define QLA_DRIVER_BETA_VER 0 15#define QLA_DRIVER_BETA_VER 0
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 9ade720422c6..ee02d3838a0a 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -1011,8 +1011,8 @@ int scsi_init_io(struct scsi_cmnd *cmd, gfp_t gfp_mask)
1011 1011
1012err_exit: 1012err_exit:
1013 scsi_release_buffers(cmd); 1013 scsi_release_buffers(cmd);
1014 scsi_put_command(cmd);
1015 cmd->request->special = NULL; 1014 cmd->request->special = NULL;
1015 scsi_put_command(cmd);
1016 return error; 1016 return error;
1017} 1017}
1018EXPORT_SYMBOL(scsi_init_io); 1018EXPORT_SYMBOL(scsi_init_io);
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 2714becc2eaf..ffa0689ee840 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -870,7 +870,7 @@ static int sd_release(struct gendisk *disk, fmode_t mode)
870 870
871 SCSI_LOG_HLQUEUE(3, sd_printk(KERN_INFO, sdkp, "sd_release\n")); 871 SCSI_LOG_HLQUEUE(3, sd_printk(KERN_INFO, sdkp, "sd_release\n"));
872 872
873 if (atomic_dec_return(&sdkp->openers) && sdev->removable) { 873 if (atomic_dec_return(&sdkp->openers) == 0 && sdev->removable) {
874 if (scsi_block_when_processing_errors(sdev)) 874 if (scsi_block_when_processing_errors(sdev))
875 scsi_set_medium_removal(sdev, SCSI_REMOVAL_ALLOW); 875 scsi_set_medium_removal(sdev, SCSI_REMOVAL_ALLOW);
876 } 876 }
@@ -2625,15 +2625,15 @@ module_exit(exit_sd);
2625static void sd_print_sense_hdr(struct scsi_disk *sdkp, 2625static void sd_print_sense_hdr(struct scsi_disk *sdkp,
2626 struct scsi_sense_hdr *sshdr) 2626 struct scsi_sense_hdr *sshdr)
2627{ 2627{
2628 sd_printk(KERN_INFO, sdkp, ""); 2628 sd_printk(KERN_INFO, sdkp, " ");
2629 scsi_show_sense_hdr(sshdr); 2629 scsi_show_sense_hdr(sshdr);
2630 sd_printk(KERN_INFO, sdkp, ""); 2630 sd_printk(KERN_INFO, sdkp, " ");
2631 scsi_show_extd_sense(sshdr->asc, sshdr->ascq); 2631 scsi_show_extd_sense(sshdr->asc, sshdr->ascq);
2632} 2632}
2633 2633
2634static void sd_print_result(struct scsi_disk *sdkp, int result) 2634static void sd_print_result(struct scsi_disk *sdkp, int result)
2635{ 2635{
2636 sd_printk(KERN_INFO, sdkp, ""); 2636 sd_printk(KERN_INFO, sdkp, " ");
2637 scsi_show_result(result); 2637 scsi_show_result(result);
2638} 2638}
2639 2639
diff --git a/drivers/scsi/sym53c8xx_2/sym_hipd.c b/drivers/scsi/sym53c8xx_2/sym_hipd.c
index a7bc8b7b09ac..2c3e89ddf069 100644
--- a/drivers/scsi/sym53c8xx_2/sym_hipd.c
+++ b/drivers/scsi/sym53c8xx_2/sym_hipd.c
@@ -72,10 +72,7 @@ static void sym_printl_hex(u_char *p, int n)
72 72
73static void sym_print_msg(struct sym_ccb *cp, char *label, u_char *msg) 73static void sym_print_msg(struct sym_ccb *cp, char *label, u_char *msg)
74{ 74{
75 if (label) 75 sym_print_addr(cp->cmd, "%s: ", label);
76 sym_print_addr(cp->cmd, "%s: ", label);
77 else
78 sym_print_addr(cp->cmd, "");
79 76
80 spi_print_msg(msg); 77 spi_print_msg(msg);
81 printf("\n"); 78 printf("\n");
@@ -4558,7 +4555,8 @@ static void sym_int_sir(struct sym_hcb *np)
4558 switch (np->msgin [2]) { 4555 switch (np->msgin [2]) {
4559 case M_X_MODIFY_DP: 4556 case M_X_MODIFY_DP:
4560 if (DEBUG_FLAGS & DEBUG_POINTER) 4557 if (DEBUG_FLAGS & DEBUG_POINTER)
4561 sym_print_msg(cp, NULL, np->msgin); 4558 sym_print_msg(cp, "extended msg ",
4559 np->msgin);
4562 tmp = (np->msgin[3]<<24) + (np->msgin[4]<<16) + 4560 tmp = (np->msgin[3]<<24) + (np->msgin[4]<<16) +
4563 (np->msgin[5]<<8) + (np->msgin[6]); 4561 (np->msgin[5]<<8) + (np->msgin[6]);
4564 sym_modify_dp(np, tp, cp, tmp); 4562 sym_modify_dp(np, tp, cp, tmp);
@@ -4585,7 +4583,7 @@ static void sym_int_sir(struct sym_hcb *np)
4585 */ 4583 */
4586 case M_IGN_RESIDUE: 4584 case M_IGN_RESIDUE:
4587 if (DEBUG_FLAGS & DEBUG_POINTER) 4585 if (DEBUG_FLAGS & DEBUG_POINTER)
4588 sym_print_msg(cp, NULL, np->msgin); 4586 sym_print_msg(cp, "1 or 2 byte ", np->msgin);
4589 if (cp->host_flags & HF_SENSE) 4587 if (cp->host_flags & HF_SENSE)
4590 OUTL_DSP(np, SCRIPTA_BA(np, clrack)); 4588 OUTL_DSP(np, SCRIPTA_BA(np, clrack));
4591 else 4589 else
diff --git a/drivers/serial/amba-pl010.c b/drivers/serial/amba-pl010.c
index 50441ffe8e38..2904aa044126 100644
--- a/drivers/serial/amba-pl010.c
+++ b/drivers/serial/amba-pl010.c
@@ -472,14 +472,9 @@ pl010_set_termios(struct uart_port *port, struct ktermios *termios,
472 spin_unlock_irqrestore(&uap->port.lock, flags); 472 spin_unlock_irqrestore(&uap->port.lock, flags);
473} 473}
474 474
475static void pl010_set_ldisc(struct uart_port *port) 475static void pl010_set_ldisc(struct uart_port *port, int new)
476{ 476{
477 int line = port->line; 477 if (new == N_PPS) {
478
479 if (line >= port->state->port.tty->driver->num)
480 return;
481
482 if (port->state->port.tty->ldisc->ops->num == N_PPS) {
483 port->flags |= UPF_HARDPPS_CD; 478 port->flags |= UPF_HARDPPS_CD;
484 pl010_enable_ms(port); 479 pl010_enable_ms(port);
485 } else 480 } else
diff --git a/drivers/serial/mfd.c b/drivers/serial/mfd.c
index bc9af503907f..5dff45c76d32 100644
--- a/drivers/serial/mfd.c
+++ b/drivers/serial/mfd.c
@@ -27,6 +27,7 @@
27#include <linux/init.h> 27#include <linux/init.h>
28#include <linux/console.h> 28#include <linux/console.h>
29#include <linux/sysrq.h> 29#include <linux/sysrq.h>
30#include <linux/slab.h>
30#include <linux/serial_reg.h> 31#include <linux/serial_reg.h>
31#include <linux/circ_buf.h> 32#include <linux/circ_buf.h>
32#include <linux/delay.h> 33#include <linux/delay.h>
@@ -1423,7 +1424,6 @@ static void hsu_global_init(void)
1423 } 1424 }
1424 1425
1425 phsu = hsu; 1426 phsu = hsu;
1426
1427 hsu_debugfs_init(hsu); 1427 hsu_debugfs_init(hsu);
1428 return; 1428 return;
1429 1429
@@ -1435,18 +1435,20 @@ err_free_region:
1435 1435
1436static void serial_hsu_remove(struct pci_dev *pdev) 1436static void serial_hsu_remove(struct pci_dev *pdev)
1437{ 1437{
1438 struct hsu_port *hsu; 1438 void *priv = pci_get_drvdata(pdev);
1439 int i; 1439 struct uart_hsu_port *up;
1440 1440
1441 hsu = pci_get_drvdata(pdev); 1441 if (!priv)
1442 if (!hsu)
1443 return; 1442 return;
1444 1443
1445 for (i = 0; i < 3; i++) 1444 /* For port 0/1/2, priv is the address of uart_hsu_port */
1446 uart_remove_one_port(&serial_hsu_reg, &hsu->port[i].port); 1445 if (pdev->device != 0x081E) {
1446 up = priv;
1447 uart_remove_one_port(&serial_hsu_reg, &up->port);
1448 }
1447 1449
1448 pci_set_drvdata(pdev, NULL); 1450 pci_set_drvdata(pdev, NULL);
1449 free_irq(hsu->irq, hsu); 1451 free_irq(pdev->irq, priv);
1450 pci_disable_device(pdev); 1452 pci_disable_device(pdev);
1451} 1453}
1452 1454
diff --git a/drivers/serial/mpc52xx_uart.c b/drivers/serial/mpc52xx_uart.c
index 8dedb266f143..c4399e23565a 100644
--- a/drivers/serial/mpc52xx_uart.c
+++ b/drivers/serial/mpc52xx_uart.c
@@ -500,6 +500,7 @@ static int __init mpc512x_psc_fifoc_init(void)
500 psc_fifoc = of_iomap(np, 0); 500 psc_fifoc = of_iomap(np, 0);
501 if (!psc_fifoc) { 501 if (!psc_fifoc) {
502 pr_err("%s: Can't map FIFOC\n", __func__); 502 pr_err("%s: Can't map FIFOC\n", __func__);
503 of_node_put(np);
503 return -ENODEV; 504 return -ENODEV;
504 } 505 }
505 506
diff --git a/drivers/serial/mrst_max3110.c b/drivers/serial/mrst_max3110.c
index f6ad1ecbff79..51c15f58e01e 100644
--- a/drivers/serial/mrst_max3110.c
+++ b/drivers/serial/mrst_max3110.c
@@ -29,6 +29,7 @@
29 29
30#include <linux/module.h> 30#include <linux/module.h>
31#include <linux/ioport.h> 31#include <linux/ioport.h>
32#include <linux/irq.h>
32#include <linux/init.h> 33#include <linux/init.h>
33#include <linux/console.h> 34#include <linux/console.h>
34#include <linux/sysrq.h> 35#include <linux/sysrq.h>
diff --git a/drivers/serial/serial_cs.c b/drivers/serial/serial_cs.c
index 141c69554bd4..7d475b2a79e8 100644
--- a/drivers/serial/serial_cs.c
+++ b/drivers/serial/serial_cs.c
@@ -335,8 +335,6 @@ static int serial_probe(struct pcmcia_device *link)
335 info->p_dev = link; 335 info->p_dev = link;
336 link->priv = info; 336 link->priv = info;
337 337
338 link->resource[0]->flags |= IO_DATA_PATH_WIDTH_8;
339 link->resource[0]->end = 8;
340 link->conf.Attributes = CONF_ENABLE_IRQ; 338 link->conf.Attributes = CONF_ENABLE_IRQ;
341 if (do_sound) { 339 if (do_sound) {
342 link->conf.Attributes |= CONF_ENABLE_SPKR; 340 link->conf.Attributes |= CONF_ENABLE_SPKR;
@@ -411,6 +409,27 @@ static int setup_serial(struct pcmcia_device *handle, struct serial_info * info,
411 409
412/*====================================================================*/ 410/*====================================================================*/
413 411
412static int pfc_config(struct pcmcia_device *p_dev)
413{
414 unsigned int port = 0;
415 struct serial_info *info = p_dev->priv;
416
417 if ((p_dev->resource[1]->end != 0) &&
418 (resource_size(p_dev->resource[1]) == 8)) {
419 port = p_dev->resource[1]->start;
420 info->slave = 1;
421 } else if ((info->manfid == MANFID_OSITECH) &&
422 (resource_size(p_dev->resource[0]) == 0x40)) {
423 port = p_dev->resource[0]->start + 0x28;
424 info->slave = 1;
425 }
426 if (info->slave)
427 return setup_serial(p_dev, info, port, p_dev->irq);
428
429 dev_warn(&p_dev->dev, "no usable port range found, giving up\n");
430 return -ENODEV;
431}
432
414static int simple_config_check(struct pcmcia_device *p_dev, 433static int simple_config_check(struct pcmcia_device *p_dev,
415 cistpl_cftable_entry_t *cf, 434 cistpl_cftable_entry_t *cf,
416 cistpl_cftable_entry_t *dflt, 435 cistpl_cftable_entry_t *dflt,
@@ -461,23 +480,8 @@ static int simple_config(struct pcmcia_device *link)
461 struct serial_info *info = link->priv; 480 struct serial_info *info = link->priv;
462 int i = -ENODEV, try; 481 int i = -ENODEV, try;
463 482
464 /* If the card is already configured, look up the port and irq */ 483 link->resource[0]->flags |= IO_DATA_PATH_WIDTH_8;
465 if (link->function_config) { 484 link->resource[0]->end = 8;
466 unsigned int port = 0;
467 if ((link->resource[1]->end != 0) &&
468 (resource_size(link->resource[1]) == 8)) {
469 port = link->resource[1]->end;
470 info->slave = 1;
471 } else if ((info->manfid == MANFID_OSITECH) &&
472 (resource_size(link->resource[0]) == 0x40)) {
473 port = link->resource[0]->start + 0x28;
474 info->slave = 1;
475 }
476 if (info->slave) {
477 return setup_serial(link, info, port,
478 link->irq);
479 }
480 }
481 485
482 /* First pass: look for a config entry that looks normal. 486 /* First pass: look for a config entry that looks normal.
483 * Two tries: without IO aliases, then with aliases */ 487 * Two tries: without IO aliases, then with aliases */
@@ -491,8 +495,7 @@ static int simple_config(struct pcmcia_device *link)
491 if (!pcmcia_loop_config(link, simple_config_check_notpicky, NULL)) 495 if (!pcmcia_loop_config(link, simple_config_check_notpicky, NULL))
492 goto found_port; 496 goto found_port;
493 497
494 printk(KERN_NOTICE 498 dev_warn(&link->dev, "no usable port range found, giving up\n");
495 "serial_cs: no usable port range found, giving up\n");
496 return -1; 499 return -1;
497 500
498found_port: 501found_port:
@@ -558,6 +561,7 @@ static int multi_config(struct pcmcia_device *link)
558 int i, base2 = 0; 561 int i, base2 = 0;
559 562
560 /* First, look for a generic full-sized window */ 563 /* First, look for a generic full-sized window */
564 link->resource[0]->flags |= IO_DATA_PATH_WIDTH_8;
561 link->resource[0]->end = info->multi * 8; 565 link->resource[0]->end = info->multi * 8;
562 if (pcmcia_loop_config(link, multi_config_check, &base2)) { 566 if (pcmcia_loop_config(link, multi_config_check, &base2)) {
563 /* If that didn't work, look for two windows */ 567 /* If that didn't work, look for two windows */
@@ -565,15 +569,14 @@ static int multi_config(struct pcmcia_device *link)
565 info->multi = 2; 569 info->multi = 2;
566 if (pcmcia_loop_config(link, multi_config_check_notpicky, 570 if (pcmcia_loop_config(link, multi_config_check_notpicky,
567 &base2)) { 571 &base2)) {
568 printk(KERN_NOTICE "serial_cs: no usable port range" 572 dev_warn(&link->dev, "no usable port range "
569 "found, giving up\n"); 573 "found, giving up\n");
570 return -ENODEV; 574 return -ENODEV;
571 } 575 }
572 } 576 }
573 577
574 if (!link->irq) 578 if (!link->irq)
575 dev_warn(&link->dev, 579 dev_warn(&link->dev, "no usable IRQ found, continuing...\n");
576 "serial_cs: no usable IRQ found, continuing...\n");
577 580
578 /* 581 /*
579 * Apply any configuration quirks. 582 * Apply any configuration quirks.
@@ -675,6 +678,7 @@ static int serial_config(struct pcmcia_device * link)
675 multifunction cards that ask for appropriate IO port ranges */ 678 multifunction cards that ask for appropriate IO port ranges */
676 if ((info->multi == 0) && 679 if ((info->multi == 0) &&
677 (link->has_func_id) && 680 (link->has_func_id) &&
681 (link->socket->pcmcia_pfc == 0) &&
678 ((link->func_id == CISTPL_FUNCID_MULTI) || 682 ((link->func_id == CISTPL_FUNCID_MULTI) ||
679 (link->func_id == CISTPL_FUNCID_SERIAL))) 683 (link->func_id == CISTPL_FUNCID_SERIAL)))
680 pcmcia_loop_config(link, serial_check_for_multi, info); 684 pcmcia_loop_config(link, serial_check_for_multi, info);
@@ -685,7 +689,13 @@ static int serial_config(struct pcmcia_device * link)
685 if (info->quirk && info->quirk->multi != -1) 689 if (info->quirk && info->quirk->multi != -1)
686 info->multi = info->quirk->multi; 690 info->multi = info->quirk->multi;
687 691
688 if (info->multi > 1) 692 dev_info(&link->dev,
693 "trying to set up [0x%04x:0x%04x] (pfc: %d, multi: %d, quirk: %p)\n",
694 link->manf_id, link->card_id,
695 link->socket->pcmcia_pfc, info->multi, info->quirk);
696 if (link->socket->pcmcia_pfc)
697 i = pfc_config(link);
698 else if (info->multi > 1)
689 i = multi_config(link); 699 i = multi_config(link);
690 else 700 else
691 i = simple_config(link); 701 i = simple_config(link);
@@ -704,7 +714,7 @@ static int serial_config(struct pcmcia_device * link)
704 return 0; 714 return 0;
705 715
706failed: 716failed:
707 dev_warn(&link->dev, "serial_cs: failed to initialize\n"); 717 dev_warn(&link->dev, "failed to initialize\n");
708 serial_remove(link); 718 serial_remove(link);
709 return -ENODEV; 719 return -ENODEV;
710} 720}
diff --git a/drivers/spi/amba-pl022.c b/drivers/spi/amba-pl022.c
index acd35d1ebd12..4c37c4e28647 100644
--- a/drivers/spi/amba-pl022.c
+++ b/drivers/spi/amba-pl022.c
@@ -503,8 +503,9 @@ static void giveback(struct pl022 *pl022)
503 msg->state = NULL; 503 msg->state = NULL;
504 if (msg->complete) 504 if (msg->complete)
505 msg->complete(msg->context); 505 msg->complete(msg->context);
506 /* This message is completed, so let's turn off the clock! */ 506 /* This message is completed, so let's turn off the clocks! */
507 clk_disable(pl022->clk); 507 clk_disable(pl022->clk);
508 amba_pclk_disable(pl022->adev);
508} 509}
509 510
510/** 511/**
@@ -1139,9 +1140,10 @@ static void pump_messages(struct work_struct *work)
1139 /* Setup the SPI using the per chip configuration */ 1140 /* Setup the SPI using the per chip configuration */
1140 pl022->cur_chip = spi_get_ctldata(pl022->cur_msg->spi); 1141 pl022->cur_chip = spi_get_ctldata(pl022->cur_msg->spi);
1141 /* 1142 /*
1142 * We enable the clock here, then the clock will be disabled when 1143 * We enable the clocks here, then the clocks will be disabled when
1143 * giveback() is called in each method (poll/interrupt/DMA) 1144 * giveback() is called in each method (poll/interrupt/DMA)
1144 */ 1145 */
1146 amba_pclk_enable(pl022->adev);
1145 clk_enable(pl022->clk); 1147 clk_enable(pl022->clk);
1146 restore_state(pl022); 1148 restore_state(pl022);
1147 flush(pl022); 1149 flush(pl022);
@@ -1786,11 +1788,9 @@ pl022_probe(struct amba_device *adev, struct amba_id *id)
1786 } 1788 }
1787 1789
1788 /* Disable SSP */ 1790 /* Disable SSP */
1789 clk_enable(pl022->clk);
1790 writew((readw(SSP_CR1(pl022->virtbase)) & (~SSP_CR1_MASK_SSE)), 1791 writew((readw(SSP_CR1(pl022->virtbase)) & (~SSP_CR1_MASK_SSE)),
1791 SSP_CR1(pl022->virtbase)); 1792 SSP_CR1(pl022->virtbase));
1792 load_ssp_default_config(pl022); 1793 load_ssp_default_config(pl022);
1793 clk_disable(pl022->clk);
1794 1794
1795 status = request_irq(adev->irq[0], pl022_interrupt_handler, 0, "pl022", 1795 status = request_irq(adev->irq[0], pl022_interrupt_handler, 0, "pl022",
1796 pl022); 1796 pl022);
@@ -1818,6 +1818,8 @@ pl022_probe(struct amba_device *adev, struct amba_id *id)
1818 goto err_spi_register; 1818 goto err_spi_register;
1819 } 1819 }
1820 dev_dbg(dev, "probe succeded\n"); 1820 dev_dbg(dev, "probe succeded\n");
1821 /* Disable the silicon block pclk and clock it when needed */
1822 amba_pclk_disable(adev);
1821 return 0; 1823 return 0;
1822 1824
1823 err_spi_register: 1825 err_spi_register:
@@ -1879,9 +1881,9 @@ static int pl022_suspend(struct amba_device *adev, pm_message_t state)
1879 return status; 1881 return status;
1880 } 1882 }
1881 1883
1882 clk_enable(pl022->clk); 1884 amba_pclk_enable(adev);
1883 load_ssp_default_config(pl022); 1885 load_ssp_default_config(pl022);
1884 clk_disable(pl022->clk); 1886 amba_pclk_disable(adev);
1885 dev_dbg(&adev->dev, "suspended\n"); 1887 dev_dbg(&adev->dev, "suspended\n");
1886 return 0; 1888 return 0;
1887} 1889}
@@ -1981,7 +1983,7 @@ static int __init pl022_init(void)
1981 return amba_driver_register(&pl022_driver); 1983 return amba_driver_register(&pl022_driver);
1982} 1984}
1983 1985
1984module_init(pl022_init); 1986subsys_initcall(pl022_init);
1985 1987
1986static void __exit pl022_exit(void) 1988static void __exit pl022_exit(void)
1987{ 1989{
diff --git a/drivers/spi/dw_spi.c b/drivers/spi/dw_spi.c
index d256cb00604c..56247853c298 100644
--- a/drivers/spi/dw_spi.c
+++ b/drivers/spi/dw_spi.c
@@ -181,10 +181,6 @@ static void flush(struct dw_spi *dws)
181 wait_till_not_busy(dws); 181 wait_till_not_busy(dws);
182} 182}
183 183
184static void null_cs_control(u32 command)
185{
186}
187
188static int null_writer(struct dw_spi *dws) 184static int null_writer(struct dw_spi *dws)
189{ 185{
190 u8 n_bytes = dws->n_bytes; 186 u8 n_bytes = dws->n_bytes;
@@ -322,7 +318,7 @@ static void giveback(struct dw_spi *dws)
322 struct spi_transfer, 318 struct spi_transfer,
323 transfer_list); 319 transfer_list);
324 320
325 if (!last_transfer->cs_change) 321 if (!last_transfer->cs_change && dws->cs_control)
326 dws->cs_control(MRST_SPI_DEASSERT); 322 dws->cs_control(MRST_SPI_DEASSERT);
327 323
328 msg->state = NULL; 324 msg->state = NULL;
@@ -396,6 +392,11 @@ static irqreturn_t interrupt_transfer(struct dw_spi *dws)
396static irqreturn_t dw_spi_irq(int irq, void *dev_id) 392static irqreturn_t dw_spi_irq(int irq, void *dev_id)
397{ 393{
398 struct dw_spi *dws = dev_id; 394 struct dw_spi *dws = dev_id;
395 u16 irq_status, irq_mask = 0x3f;
396
397 irq_status = dw_readw(dws, isr) & irq_mask;
398 if (!irq_status)
399 return IRQ_NONE;
399 400
400 if (!dws->cur_msg) { 401 if (!dws->cur_msg) {
401 spi_mask_intr(dws, SPI_INT_TXEI); 402 spi_mask_intr(dws, SPI_INT_TXEI);
@@ -544,13 +545,13 @@ static void pump_transfers(unsigned long data)
544 */ 545 */
545 if (dws->cs_control) { 546 if (dws->cs_control) {
546 if (dws->rx && dws->tx) 547 if (dws->rx && dws->tx)
547 chip->tmode = 0x00; 548 chip->tmode = SPI_TMOD_TR;
548 else if (dws->rx) 549 else if (dws->rx)
549 chip->tmode = 0x02; 550 chip->tmode = SPI_TMOD_RO;
550 else 551 else
551 chip->tmode = 0x01; 552 chip->tmode = SPI_TMOD_TO;
552 553
553 cr0 &= ~(0x3 << SPI_MODE_OFFSET); 554 cr0 &= ~SPI_TMOD_MASK;
554 cr0 |= (chip->tmode << SPI_TMOD_OFFSET); 555 cr0 |= (chip->tmode << SPI_TMOD_OFFSET);
555 } 556 }
556 557
@@ -699,9 +700,6 @@ static int dw_spi_setup(struct spi_device *spi)
699 chip = kzalloc(sizeof(struct chip_data), GFP_KERNEL); 700 chip = kzalloc(sizeof(struct chip_data), GFP_KERNEL);
700 if (!chip) 701 if (!chip)
701 return -ENOMEM; 702 return -ENOMEM;
702
703 chip->cs_control = null_cs_control;
704 chip->enable_dma = 0;
705 } 703 }
706 704
707 /* 705 /*
@@ -883,7 +881,7 @@ int __devinit dw_spi_add_host(struct dw_spi *dws)
883 dws->dma_inited = 0; 881 dws->dma_inited = 0;
884 dws->dma_addr = (dma_addr_t)(dws->paddr + 0x60); 882 dws->dma_addr = (dma_addr_t)(dws->paddr + 0x60);
885 883
886 ret = request_irq(dws->irq, dw_spi_irq, 0, 884 ret = request_irq(dws->irq, dw_spi_irq, IRQF_SHARED,
887 "dw_spi", dws); 885 "dw_spi", dws);
888 if (ret < 0) { 886 if (ret < 0) {
889 dev_err(&master->dev, "can not get IRQ\n"); 887 dev_err(&master->dev, "can not get IRQ\n");
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index a9e5c79ae52a..b5a78a1f4421 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -23,6 +23,7 @@
23#include <linux/init.h> 23#include <linux/init.h>
24#include <linux/cache.h> 24#include <linux/cache.h>
25#include <linux/mutex.h> 25#include <linux/mutex.h>
26#include <linux/of_device.h>
26#include <linux/slab.h> 27#include <linux/slab.h>
27#include <linux/mod_devicetable.h> 28#include <linux/mod_devicetable.h>
28#include <linux/spi/spi.h> 29#include <linux/spi/spi.h>
@@ -86,6 +87,10 @@ static int spi_match_device(struct device *dev, struct device_driver *drv)
86 const struct spi_device *spi = to_spi_device(dev); 87 const struct spi_device *spi = to_spi_device(dev);
87 const struct spi_driver *sdrv = to_spi_driver(drv); 88 const struct spi_driver *sdrv = to_spi_driver(drv);
88 89
90 /* Attempt an OF style match */
91 if (of_driver_match_device(dev, drv))
92 return 1;
93
89 if (sdrv->id_table) 94 if (sdrv->id_table)
90 return !!spi_match_id(sdrv->id_table, spi); 95 return !!spi_match_id(sdrv->id_table, spi);
91 96
@@ -554,11 +559,9 @@ done:
554EXPORT_SYMBOL_GPL(spi_register_master); 559EXPORT_SYMBOL_GPL(spi_register_master);
555 560
556 561
557static int __unregister(struct device *dev, void *master_dev) 562static int __unregister(struct device *dev, void *null)
558{ 563{
559 /* note: before about 2.6.14-rc1 this would corrupt memory: */ 564 spi_unregister_device(to_spi_device(dev));
560 if (dev != master_dev)
561 spi_unregister_device(to_spi_device(dev));
562 return 0; 565 return 0;
563} 566}
564 567
@@ -576,8 +579,7 @@ void spi_unregister_master(struct spi_master *master)
576{ 579{
577 int dummy; 580 int dummy;
578 581
579 dummy = device_for_each_child(master->dev.parent, &master->dev, 582 dummy = device_for_each_child(&master->dev, NULL, __unregister);
580 __unregister);
581 device_unregister(&master->dev); 583 device_unregister(&master->dev);
582} 584}
583EXPORT_SYMBOL_GPL(spi_unregister_master); 585EXPORT_SYMBOL_GPL(spi_unregister_master);
diff --git a/drivers/spi/spi_gpio.c b/drivers/spi/spi_gpio.c
index e24a63498acb..63e51b011d50 100644
--- a/drivers/spi/spi_gpio.c
+++ b/drivers/spi/spi_gpio.c
@@ -350,7 +350,7 @@ static int __init spi_gpio_probe(struct platform_device *pdev)
350 spi_gpio->bitbang.master = spi_master_get(master); 350 spi_gpio->bitbang.master = spi_master_get(master);
351 spi_gpio->bitbang.chipselect = spi_gpio_chipselect; 351 spi_gpio->bitbang.chipselect = spi_gpio_chipselect;
352 352
353 if ((master_flags & (SPI_MASTER_NO_RX | SPI_MASTER_NO_RX)) == 0) { 353 if ((master_flags & (SPI_MASTER_NO_TX | SPI_MASTER_NO_RX)) == 0) {
354 spi_gpio->bitbang.txrx_word[SPI_MODE_0] = spi_gpio_txrx_word_mode0; 354 spi_gpio->bitbang.txrx_word[SPI_MODE_0] = spi_gpio_txrx_word_mode0;
355 spi_gpio->bitbang.txrx_word[SPI_MODE_1] = spi_gpio_txrx_word_mode1; 355 spi_gpio->bitbang.txrx_word[SPI_MODE_1] = spi_gpio_txrx_word_mode1;
356 spi_gpio->bitbang.txrx_word[SPI_MODE_2] = spi_gpio_txrx_word_mode2; 356 spi_gpio->bitbang.txrx_word[SPI_MODE_2] = spi_gpio_txrx_word_mode2;
diff --git a/drivers/spi/spi_mpc8xxx.c b/drivers/spi/spi_mpc8xxx.c
index d31b57f7baaf..1dd86b835cd8 100644
--- a/drivers/spi/spi_mpc8xxx.c
+++ b/drivers/spi/spi_mpc8xxx.c
@@ -408,11 +408,17 @@ static void mpc8xxx_spi_cpm_bufs_start(struct mpc8xxx_spi *mspi)
408 408
409 xfer_ofs = mspi->xfer_in_progress->len - mspi->count; 409 xfer_ofs = mspi->xfer_in_progress->len - mspi->count;
410 410
411 out_be32(&rx_bd->cbd_bufaddr, mspi->rx_dma + xfer_ofs); 411 if (mspi->rx_dma == mspi->dma_dummy_rx)
412 out_be32(&rx_bd->cbd_bufaddr, mspi->rx_dma);
413 else
414 out_be32(&rx_bd->cbd_bufaddr, mspi->rx_dma + xfer_ofs);
412 out_be16(&rx_bd->cbd_datlen, 0); 415 out_be16(&rx_bd->cbd_datlen, 0);
413 out_be16(&rx_bd->cbd_sc, BD_SC_EMPTY | BD_SC_INTRPT | BD_SC_WRAP); 416 out_be16(&rx_bd->cbd_sc, BD_SC_EMPTY | BD_SC_INTRPT | BD_SC_WRAP);
414 417
415 out_be32(&tx_bd->cbd_bufaddr, mspi->tx_dma + xfer_ofs); 418 if (mspi->tx_dma == mspi->dma_dummy_tx)
419 out_be32(&tx_bd->cbd_bufaddr, mspi->tx_dma);
420 else
421 out_be32(&tx_bd->cbd_bufaddr, mspi->tx_dma + xfer_ofs);
416 out_be16(&tx_bd->cbd_datlen, xfer_len); 422 out_be16(&tx_bd->cbd_datlen, xfer_len);
417 out_be16(&tx_bd->cbd_sc, BD_SC_READY | BD_SC_INTRPT | BD_SC_WRAP | 423 out_be16(&tx_bd->cbd_sc, BD_SC_READY | BD_SC_INTRPT | BD_SC_WRAP |
418 BD_SC_LAST); 424 BD_SC_LAST);
diff --git a/drivers/spi/spi_s3c64xx.c b/drivers/spi/spi_s3c64xx.c
index 97365815a729..c3038da2648a 100644
--- a/drivers/spi/spi_s3c64xx.c
+++ b/drivers/spi/spi_s3c64xx.c
@@ -200,6 +200,9 @@ static void flush_fifo(struct s3c64xx_spi_driver_data *sdd)
200 val = readl(regs + S3C64XX_SPI_STATUS); 200 val = readl(regs + S3C64XX_SPI_STATUS);
201 } while (TX_FIFO_LVL(val, sci) && loops--); 201 } while (TX_FIFO_LVL(val, sci) && loops--);
202 202
203 if (loops == 0)
204 dev_warn(&sdd->pdev->dev, "Timed out flushing TX FIFO\n");
205
203 /* Flush RxFIFO*/ 206 /* Flush RxFIFO*/
204 loops = msecs_to_loops(1); 207 loops = msecs_to_loops(1);
205 do { 208 do {
@@ -210,6 +213,9 @@ static void flush_fifo(struct s3c64xx_spi_driver_data *sdd)
210 break; 213 break;
211 } while (loops--); 214 } while (loops--);
212 215
216 if (loops == 0)
217 dev_warn(&sdd->pdev->dev, "Timed out flushing RX FIFO\n");
218
213 val = readl(regs + S3C64XX_SPI_CH_CFG); 219 val = readl(regs + S3C64XX_SPI_CH_CFG);
214 val &= ~S3C64XX_SPI_CH_SW_RST; 220 val &= ~S3C64XX_SPI_CH_SW_RST;
215 writel(val, regs + S3C64XX_SPI_CH_CFG); 221 writel(val, regs + S3C64XX_SPI_CH_CFG);
@@ -320,16 +326,17 @@ static int wait_for_xfer(struct s3c64xx_spi_driver_data *sdd,
320 326
321 /* millisecs to xfer 'len' bytes @ 'cur_speed' */ 327 /* millisecs to xfer 'len' bytes @ 'cur_speed' */
322 ms = xfer->len * 8 * 1000 / sdd->cur_speed; 328 ms = xfer->len * 8 * 1000 / sdd->cur_speed;
323 ms += 5; /* some tolerance */ 329 ms += 10; /* some tolerance */
324 330
325 if (dma_mode) { 331 if (dma_mode) {
326 val = msecs_to_jiffies(ms) + 10; 332 val = msecs_to_jiffies(ms) + 10;
327 val = wait_for_completion_timeout(&sdd->xfer_completion, val); 333 val = wait_for_completion_timeout(&sdd->xfer_completion, val);
328 } else { 334 } else {
335 u32 status;
329 val = msecs_to_loops(ms); 336 val = msecs_to_loops(ms);
330 do { 337 do {
331 val = readl(regs + S3C64XX_SPI_STATUS); 338 status = readl(regs + S3C64XX_SPI_STATUS);
332 } while (RX_FIFO_LVL(val, sci) < xfer->len && --val); 339 } while (RX_FIFO_LVL(status, sci) < xfer->len && --val);
333 } 340 }
334 341
335 if (!val) 342 if (!val)
@@ -447,8 +454,8 @@ static void s3c64xx_spi_config(struct s3c64xx_spi_driver_data *sdd)
447 writel(val, regs + S3C64XX_SPI_CLK_CFG); 454 writel(val, regs + S3C64XX_SPI_CLK_CFG);
448} 455}
449 456
450void s3c64xx_spi_dma_rxcb(struct s3c2410_dma_chan *chan, void *buf_id, 457static void s3c64xx_spi_dma_rxcb(struct s3c2410_dma_chan *chan, void *buf_id,
451 int size, enum s3c2410_dma_buffresult res) 458 int size, enum s3c2410_dma_buffresult res)
452{ 459{
453 struct s3c64xx_spi_driver_data *sdd = buf_id; 460 struct s3c64xx_spi_driver_data *sdd = buf_id;
454 unsigned long flags; 461 unsigned long flags;
@@ -467,8 +474,8 @@ void s3c64xx_spi_dma_rxcb(struct s3c2410_dma_chan *chan, void *buf_id,
467 spin_unlock_irqrestore(&sdd->lock, flags); 474 spin_unlock_irqrestore(&sdd->lock, flags);
468} 475}
469 476
470void s3c64xx_spi_dma_txcb(struct s3c2410_dma_chan *chan, void *buf_id, 477static void s3c64xx_spi_dma_txcb(struct s3c2410_dma_chan *chan, void *buf_id,
471 int size, enum s3c2410_dma_buffresult res) 478 int size, enum s3c2410_dma_buffresult res)
472{ 479{
473 struct s3c64xx_spi_driver_data *sdd = buf_id; 480 struct s3c64xx_spi_driver_data *sdd = buf_id;
474 unsigned long flags; 481 unsigned long flags;
@@ -508,8 +515,9 @@ static int s3c64xx_spi_map_mssg(struct s3c64xx_spi_driver_data *sdd,
508 list_for_each_entry(xfer, &msg->transfers, transfer_list) { 515 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
509 516
510 if (xfer->tx_buf != NULL) { 517 if (xfer->tx_buf != NULL) {
511 xfer->tx_dma = dma_map_single(dev, xfer->tx_buf, 518 xfer->tx_dma = dma_map_single(dev,
512 xfer->len, DMA_TO_DEVICE); 519 (void *)xfer->tx_buf, xfer->len,
520 DMA_TO_DEVICE);
513 if (dma_mapping_error(dev, xfer->tx_dma)) { 521 if (dma_mapping_error(dev, xfer->tx_dma)) {
514 dev_err(dev, "dma_map_single Tx failed\n"); 522 dev_err(dev, "dma_map_single Tx failed\n");
515 xfer->tx_dma = XFER_DMAADDR_INVALID; 523 xfer->tx_dma = XFER_DMAADDR_INVALID;
@@ -919,6 +927,13 @@ static int __init s3c64xx_spi_probe(struct platform_device *pdev)
919 return -ENODEV; 927 return -ENODEV;
920 } 928 }
921 929
930 sci = pdev->dev.platform_data;
931 if (!sci->src_clk_name) {
932 dev_err(&pdev->dev,
933 "Board init must call s3c64xx_spi_set_info()\n");
934 return -EINVAL;
935 }
936
922 /* Check for availability of necessary resource */ 937 /* Check for availability of necessary resource */
923 938
924 dmatx_res = platform_get_resource(pdev, IORESOURCE_DMA, 0); 939 dmatx_res = platform_get_resource(pdev, IORESOURCE_DMA, 0);
@@ -946,8 +961,6 @@ static int __init s3c64xx_spi_probe(struct platform_device *pdev)
946 return -ENOMEM; 961 return -ENOMEM;
947 } 962 }
948 963
949 sci = pdev->dev.platform_data;
950
951 platform_set_drvdata(pdev, master); 964 platform_set_drvdata(pdev, master);
952 965
953 sdd = spi_master_get_devdata(master); 966 sdd = spi_master_get_devdata(master);
@@ -1170,7 +1183,7 @@ static int __init s3c64xx_spi_init(void)
1170{ 1183{
1171 return platform_driver_probe(&s3c64xx_spi_driver, s3c64xx_spi_probe); 1184 return platform_driver_probe(&s3c64xx_spi_driver, s3c64xx_spi_probe);
1172} 1185}
1173module_init(s3c64xx_spi_init); 1186subsys_initcall(s3c64xx_spi_init);
1174 1187
1175static void __exit s3c64xx_spi_exit(void) 1188static void __exit s3c64xx_spi_exit(void)
1176{ 1189{
diff --git a/drivers/staging/batman-adv/hard-interface.c b/drivers/staging/batman-adv/hard-interface.c
index baa8b05b9e8d..6e973a79aa25 100644
--- a/drivers/staging/batman-adv/hard-interface.c
+++ b/drivers/staging/batman-adv/hard-interface.c
@@ -30,7 +30,6 @@
30#include "hash.h" 30#include "hash.h"
31 31
32#include <linux/if_arp.h> 32#include <linux/if_arp.h>
33#include <linux/netfilter_bridge.h>
34 33
35#define MIN(x, y) ((x) < (y) ? (x) : (y)) 34#define MIN(x, y) ((x) < (y) ? (x) : (y))
36 35
@@ -431,11 +430,6 @@ out:
431 return NOTIFY_DONE; 430 return NOTIFY_DONE;
432} 431}
433 432
434static int batman_skb_recv_finish(struct sk_buff *skb)
435{
436 return NF_ACCEPT;
437}
438
439/* receive a packet with the batman ethertype coming on a hard 433/* receive a packet with the batman ethertype coming on a hard
440 * interface */ 434 * interface */
441int batman_skb_recv(struct sk_buff *skb, struct net_device *dev, 435int batman_skb_recv(struct sk_buff *skb, struct net_device *dev,
@@ -456,13 +450,6 @@ int batman_skb_recv(struct sk_buff *skb, struct net_device *dev,
456 if (atomic_read(&module_state) != MODULE_ACTIVE) 450 if (atomic_read(&module_state) != MODULE_ACTIVE)
457 goto err_free; 451 goto err_free;
458 452
459 /* if netfilter/ebtables wants to block incoming batman
460 * packets then give them a chance to do so here */
461 ret = NF_HOOK(PF_BRIDGE, NF_BR_LOCAL_IN, skb, dev, NULL,
462 batman_skb_recv_finish);
463 if (ret != 1)
464 goto err_out;
465
466 /* packet should hold at least type and version */ 453 /* packet should hold at least type and version */
467 if (unlikely(skb_headlen(skb) < 2)) 454 if (unlikely(skb_headlen(skb) < 2))
468 goto err_free; 455 goto err_free;
diff --git a/drivers/staging/batman-adv/send.c b/drivers/staging/batman-adv/send.c
index 055edee7b4e4..da3c82e47bbd 100644
--- a/drivers/staging/batman-adv/send.c
+++ b/drivers/staging/batman-adv/send.c
@@ -29,7 +29,6 @@
29#include "vis.h" 29#include "vis.h"
30#include "aggregation.h" 30#include "aggregation.h"
31 31
32#include <linux/netfilter_bridge.h>
33 32
34static void send_outstanding_bcast_packet(struct work_struct *work); 33static void send_outstanding_bcast_packet(struct work_struct *work);
35 34
@@ -92,12 +91,9 @@ int send_skb_packet(struct sk_buff *skb,
92 91
93 /* dev_queue_xmit() returns a negative result on error. However on 92 /* dev_queue_xmit() returns a negative result on error. However on
94 * congestion and traffic shaping, it drops and returns NET_XMIT_DROP 93 * congestion and traffic shaping, it drops and returns NET_XMIT_DROP
95 * (which is > 0). This will not be treated as an error. 94 * (which is > 0). This will not be treated as an error. */
96 * Also, if netfilter/ebtables wants to block outgoing batman
97 * packets then giving them a chance to do so here */
98 95
99 return NF_HOOK(PF_BRIDGE, NF_BR_LOCAL_OUT, skb, NULL, skb->dev, 96 return dev_queue_xmit(skb);
100 dev_queue_xmit);
101send_skb_err: 97send_skb_err:
102 kfree_skb(skb); 98 kfree_skb(skb);
103 return NET_XMIT_DROP; 99 return NET_XMIT_DROP;
diff --git a/drivers/staging/ti-st/st.h b/drivers/staging/ti-st/st.h
index 9952579425b9..1b3060eb2921 100644
--- a/drivers/staging/ti-st/st.h
+++ b/drivers/staging/ti-st/st.h
@@ -80,5 +80,4 @@ struct st_proto_s {
80extern long st_register(struct st_proto_s *); 80extern long st_register(struct st_proto_s *);
81extern long st_unregister(enum proto_type); 81extern long st_unregister(enum proto_type);
82 82
83extern struct platform_device *st_get_plat_device(void);
84#endif /* ST_H */ 83#endif /* ST_H */
diff --git a/drivers/staging/ti-st/st_core.c b/drivers/staging/ti-st/st_core.c
index 063c9b1db1ab..b85d8bfdf600 100644
--- a/drivers/staging/ti-st/st_core.c
+++ b/drivers/staging/ti-st/st_core.c
@@ -38,7 +38,6 @@
38#include "st_ll.h" 38#include "st_ll.h"
39#include "st.h" 39#include "st.h"
40 40
41#define VERBOSE
42/* strings to be used for rfkill entries and by 41/* strings to be used for rfkill entries and by
43 * ST Core to be used for sysfs debug entry 42 * ST Core to be used for sysfs debug entry
44 */ 43 */
@@ -581,7 +580,7 @@ long st_register(struct st_proto_s *new_proto)
581 long err = 0; 580 long err = 0;
582 unsigned long flags = 0; 581 unsigned long flags = 0;
583 582
584 st_kim_ref(&st_gdata); 583 st_kim_ref(&st_gdata, 0);
585 pr_info("%s(%d) ", __func__, new_proto->type); 584 pr_info("%s(%d) ", __func__, new_proto->type);
586 if (st_gdata == NULL || new_proto == NULL || new_proto->recv == NULL 585 if (st_gdata == NULL || new_proto == NULL || new_proto->recv == NULL
587 || new_proto->reg_complete_cb == NULL) { 586 || new_proto->reg_complete_cb == NULL) {
@@ -713,7 +712,7 @@ long st_unregister(enum proto_type type)
713 712
714 pr_debug("%s: %d ", __func__, type); 713 pr_debug("%s: %d ", __func__, type);
715 714
716 st_kim_ref(&st_gdata); 715 st_kim_ref(&st_gdata, 0);
717 if (type < ST_BT || type >= ST_MAX) { 716 if (type < ST_BT || type >= ST_MAX) {
718 pr_err(" protocol %d not supported", type); 717 pr_err(" protocol %d not supported", type);
719 return -EPROTONOSUPPORT; 718 return -EPROTONOSUPPORT;
@@ -767,7 +766,7 @@ long st_write(struct sk_buff *skb)
767#endif 766#endif
768 long len; 767 long len;
769 768
770 st_kim_ref(&st_gdata); 769 st_kim_ref(&st_gdata, 0);
771 if (unlikely(skb == NULL || st_gdata == NULL 770 if (unlikely(skb == NULL || st_gdata == NULL
772 || st_gdata->tty == NULL)) { 771 || st_gdata->tty == NULL)) {
773 pr_err("data/tty unavailable to perform write"); 772 pr_err("data/tty unavailable to perform write");
@@ -818,7 +817,7 @@ static int st_tty_open(struct tty_struct *tty)
818 struct st_data_s *st_gdata; 817 struct st_data_s *st_gdata;
819 pr_info("%s ", __func__); 818 pr_info("%s ", __func__);
820 819
821 st_kim_ref(&st_gdata); 820 st_kim_ref(&st_gdata, 0);
822 st_gdata->tty = tty; 821 st_gdata->tty = tty;
823 tty->disc_data = st_gdata; 822 tty->disc_data = st_gdata;
824 823
diff --git a/drivers/staging/ti-st/st_core.h b/drivers/staging/ti-st/st_core.h
index e0c32d149f5f..8601320a679e 100644
--- a/drivers/staging/ti-st/st_core.h
+++ b/drivers/staging/ti-st/st_core.h
@@ -117,7 +117,7 @@ int st_core_init(struct st_data_s **);
117void st_core_exit(struct st_data_s *); 117void st_core_exit(struct st_data_s *);
118 118
119/* ask for reference from KIM */ 119/* ask for reference from KIM */
120void st_kim_ref(struct st_data_s **); 120void st_kim_ref(struct st_data_s **, int);
121 121
122#define GPS_STUB_TEST 122#define GPS_STUB_TEST
123#ifdef GPS_STUB_TEST 123#ifdef GPS_STUB_TEST
diff --git a/drivers/staging/ti-st/st_kim.c b/drivers/staging/ti-st/st_kim.c
index b4a6c7fdc4e6..9e99463f76e8 100644
--- a/drivers/staging/ti-st/st_kim.c
+++ b/drivers/staging/ti-st/st_kim.c
@@ -72,11 +72,26 @@ const unsigned char *protocol_names[] = {
72 PROTO_ENTRY(ST_GPS, "GPS"), 72 PROTO_ENTRY(ST_GPS, "GPS"),
73}; 73};
74 74
75#define MAX_ST_DEVICES 3 /* Imagine 1 on each UART for now */
76struct platform_device *st_kim_devices[MAX_ST_DEVICES];
75 77
76/**********************************************************************/ 78/**********************************************************************/
77/* internal functions */ 79/* internal functions */
78 80
79/** 81/**
82 * st_get_plat_device -
83 * function which returns the reference to the platform device
84 * requested by id. As of now only 1 such device exists (id=0)
85 * the context requesting for reference can get the id to be
86 * requested by a. The protocol driver which is registering or
87 * b. the tty device which is opened.
88 */
89static struct platform_device *st_get_plat_device(int id)
90{
91 return st_kim_devices[id];
92}
93
94/**
80 * validate_firmware_response - 95 * validate_firmware_response -
81 * function to return whether the firmware response was proper 96 * function to return whether the firmware response was proper
82 * in case of error don't complete so that waiting for proper 97 * in case of error don't complete so that waiting for proper
@@ -353,7 +368,7 @@ void st_kim_chip_toggle(enum proto_type type, enum kim_gpio_state state)
353 struct kim_data_s *kim_gdata; 368 struct kim_data_s *kim_gdata;
354 pr_info(" %s ", __func__); 369 pr_info(" %s ", __func__);
355 370
356 kim_pdev = st_get_plat_device(); 371 kim_pdev = st_get_plat_device(0);
357 kim_gdata = dev_get_drvdata(&kim_pdev->dev); 372 kim_gdata = dev_get_drvdata(&kim_pdev->dev);
358 373
359 if (kim_gdata->gpios[type] == -1) { 374 if (kim_gdata->gpios[type] == -1) {
@@ -574,12 +589,12 @@ static int kim_toggle_radio(void *data, bool blocked)
574 * This would enable multiple such platform devices to exist 589 * This would enable multiple such platform devices to exist
575 * on a given platform 590 * on a given platform
576 */ 591 */
577void st_kim_ref(struct st_data_s **core_data) 592void st_kim_ref(struct st_data_s **core_data, int id)
578{ 593{
579 struct platform_device *pdev; 594 struct platform_device *pdev;
580 struct kim_data_s *kim_gdata; 595 struct kim_data_s *kim_gdata;
581 /* get kim_gdata reference from platform device */ 596 /* get kim_gdata reference from platform device */
582 pdev = st_get_plat_device(); 597 pdev = st_get_plat_device(id);
583 kim_gdata = dev_get_drvdata(&pdev->dev); 598 kim_gdata = dev_get_drvdata(&pdev->dev);
584 *core_data = kim_gdata->core_data; 599 *core_data = kim_gdata->core_data;
585} 600}
@@ -623,6 +638,7 @@ static int kim_probe(struct platform_device *pdev)
623 long *gpios = pdev->dev.platform_data; 638 long *gpios = pdev->dev.platform_data;
624 struct kim_data_s *kim_gdata; 639 struct kim_data_s *kim_gdata;
625 640
641 st_kim_devices[pdev->id] = pdev;
626 kim_gdata = kzalloc(sizeof(struct kim_data_s), GFP_ATOMIC); 642 kim_gdata = kzalloc(sizeof(struct kim_data_s), GFP_ATOMIC);
627 if (!kim_gdata) { 643 if (!kim_gdata) {
628 pr_err("no mem to allocate"); 644 pr_err("no mem to allocate");
diff --git a/drivers/staging/vt6655/wpactl.c b/drivers/staging/vt6655/wpactl.c
index 0142338bcafe..4bdb8362de82 100644
--- a/drivers/staging/vt6655/wpactl.c
+++ b/drivers/staging/vt6655/wpactl.c
@@ -766,9 +766,14 @@ static int wpa_set_associate(PSDevice pDevice,
766 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "wpa_ie_len = %d\n", param->u.wpa_associate.wpa_ie_len); 766 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "wpa_ie_len = %d\n", param->u.wpa_associate.wpa_ie_len);
767 767
768 768
769 if (param->u.wpa_associate.wpa_ie && 769 if (param->u.wpa_associate.wpa_ie_len) {
770 copy_from_user(&abyWPAIE[0], param->u.wpa_associate.wpa_ie, param->u.wpa_associate.wpa_ie_len)) 770 if (!param->u.wpa_associate.wpa_ie)
771 return -EINVAL; 771 return -EINVAL;
772 if (param->u.wpa_associate.wpa_ie_len > sizeof(abyWPAIE))
773 return -EINVAL;
774 if (copy_from_user(&abyWPAIE[0], param->u.wpa_associate.wpa_ie, param->u.wpa_associate.wpa_ie_len))
775 return -EFAULT;
776 }
772 777
773 if (param->u.wpa_associate.mode == 1) 778 if (param->u.wpa_associate.mode == 1)
774 pMgmt->eConfigMode = WMAC_CONFIG_IBSS_STA; 779 pMgmt->eConfigMode = WMAC_CONFIG_IBSS_STA;
diff --git a/drivers/usb/core/Kconfig b/drivers/usb/core/Kconfig
index 7e594449600e..9eed5b52d9de 100644
--- a/drivers/usb/core/Kconfig
+++ b/drivers/usb/core/Kconfig
@@ -91,12 +91,12 @@ config USB_DYNAMIC_MINORS
91 If you are unsure about this, say N here. 91 If you are unsure about this, say N here.
92 92
93config USB_SUSPEND 93config USB_SUSPEND
94 bool "USB runtime power management (suspend/resume and wakeup)" 94 bool "USB runtime power management (autosuspend) and wakeup"
95 depends on USB && PM_RUNTIME 95 depends on USB && PM_RUNTIME
96 help 96 help
97 If you say Y here, you can use driver calls or the sysfs 97 If you say Y here, you can use driver calls or the sysfs
98 "power/level" file to suspend or resume individual USB 98 "power/control" file to enable or disable autosuspend for
99 peripherals and to enable or disable autosuspend (see 99 individual USB peripherals (see
100 Documentation/usb/power-management.txt for more details). 100 Documentation/usb/power-management.txt for more details).
101 101
102 Also, USB "remote wakeup" signaling is supported, whereby some 102 Also, USB "remote wakeup" signaling is supported, whereby some
diff --git a/drivers/usb/core/file.c b/drivers/usb/core/file.c
index f06f5dbc8cdc..1e6ccef2cf0c 100644
--- a/drivers/usb/core/file.c
+++ b/drivers/usb/core/file.c
@@ -159,9 +159,9 @@ void usb_major_cleanup(void)
159int usb_register_dev(struct usb_interface *intf, 159int usb_register_dev(struct usb_interface *intf,
160 struct usb_class_driver *class_driver) 160 struct usb_class_driver *class_driver)
161{ 161{
162 int retval = -EINVAL; 162 int retval;
163 int minor_base = class_driver->minor_base; 163 int minor_base = class_driver->minor_base;
164 int minor = 0; 164 int minor;
165 char name[20]; 165 char name[20];
166 char *temp; 166 char *temp;
167 167
@@ -173,12 +173,17 @@ int usb_register_dev(struct usb_interface *intf,
173 */ 173 */
174 minor_base = 0; 174 minor_base = 0;
175#endif 175#endif
176 intf->minor = -1;
177
178 dbg ("looking for a minor, starting at %d", minor_base);
179 176
180 if (class_driver->fops == NULL) 177 if (class_driver->fops == NULL)
181 goto exit; 178 return -EINVAL;
179 if (intf->minor >= 0)
180 return -EADDRINUSE;
181
182 retval = init_usb_class();
183 if (retval)
184 return retval;
185
186 dev_dbg(&intf->dev, "looking for a minor, starting at %d", minor_base);
182 187
183 down_write(&minor_rwsem); 188 down_write(&minor_rwsem);
184 for (minor = minor_base; minor < MAX_USB_MINORS; ++minor) { 189 for (minor = minor_base; minor < MAX_USB_MINORS; ++minor) {
@@ -186,20 +191,12 @@ int usb_register_dev(struct usb_interface *intf,
186 continue; 191 continue;
187 192
188 usb_minors[minor] = class_driver->fops; 193 usb_minors[minor] = class_driver->fops;
189 194 intf->minor = minor;
190 retval = 0;
191 break; 195 break;
192 } 196 }
193 up_write(&minor_rwsem); 197 up_write(&minor_rwsem);
194 198 if (intf->minor < 0)
195 if (retval) 199 return -EXFULL;
196 goto exit;
197
198 retval = init_usb_class();
199 if (retval)
200 goto exit;
201
202 intf->minor = minor;
203 200
204 /* create a usb class device for this usb interface */ 201 /* create a usb class device for this usb interface */
205 snprintf(name, sizeof(name), class_driver->name, minor - minor_base); 202 snprintf(name, sizeof(name), class_driver->name, minor - minor_base);
@@ -213,11 +210,11 @@ int usb_register_dev(struct usb_interface *intf,
213 "%s", temp); 210 "%s", temp);
214 if (IS_ERR(intf->usb_dev)) { 211 if (IS_ERR(intf->usb_dev)) {
215 down_write(&minor_rwsem); 212 down_write(&minor_rwsem);
216 usb_minors[intf->minor] = NULL; 213 usb_minors[minor] = NULL;
214 intf->minor = -1;
217 up_write(&minor_rwsem); 215 up_write(&minor_rwsem);
218 retval = PTR_ERR(intf->usb_dev); 216 retval = PTR_ERR(intf->usb_dev);
219 } 217 }
220exit:
221 return retval; 218 return retval;
222} 219}
223EXPORT_SYMBOL_GPL(usb_register_dev); 220EXPORT_SYMBOL_GPL(usb_register_dev);
diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
index 844683e50383..9f0ce7de0e36 100644
--- a/drivers/usb/core/message.c
+++ b/drivers/usb/core/message.c
@@ -1802,6 +1802,7 @@ free_interfaces:
1802 intf->dev.groups = usb_interface_groups; 1802 intf->dev.groups = usb_interface_groups;
1803 intf->dev.dma_mask = dev->dev.dma_mask; 1803 intf->dev.dma_mask = dev->dev.dma_mask;
1804 INIT_WORK(&intf->reset_ws, __usb_queue_reset_device); 1804 INIT_WORK(&intf->reset_ws, __usb_queue_reset_device);
1805 intf->minor = -1;
1805 device_initialize(&intf->dev); 1806 device_initialize(&intf->dev);
1806 dev_set_name(&intf->dev, "%d-%s:%d.%d", 1807 dev_set_name(&intf->dev, "%d-%s:%d.%d",
1807 dev->bus->busnum, dev->devpath, 1808 dev->bus->busnum, dev->devpath,
diff --git a/drivers/usb/host/ehci-pci.c b/drivers/usb/host/ehci-pci.c
index 58b72d741d93..a1e8d273103f 100644
--- a/drivers/usb/host/ehci-pci.c
+++ b/drivers/usb/host/ehci-pci.c
@@ -119,6 +119,11 @@ static int ehci_pci_setup(struct usb_hcd *hcd)
119 ehci->broken_periodic = 1; 119 ehci->broken_periodic = 1;
120 ehci_info(ehci, "using broken periodic workaround\n"); 120 ehci_info(ehci, "using broken periodic workaround\n");
121 } 121 }
122 if (pdev->device == 0x0806 || pdev->device == 0x0811
123 || pdev->device == 0x0829) {
124 ehci_info(ehci, "disable lpm for langwell/penwell\n");
125 ehci->has_lpm = 0;
126 }
122 break; 127 break;
123 case PCI_VENDOR_ID_TDI: 128 case PCI_VENDOR_ID_TDI:
124 if (pdev->device == PCI_DEVICE_ID_TDI_EHCI) { 129 if (pdev->device == PCI_DEVICE_ID_TDI_EHCI) {
diff --git a/drivers/usb/musb/cppi_dma.c b/drivers/usb/musb/cppi_dma.c
index 59dc3d351b60..5ab5bb89bae3 100644
--- a/drivers/usb/musb/cppi_dma.c
+++ b/drivers/usb/musb/cppi_dma.c
@@ -322,6 +322,7 @@ cppi_channel_allocate(struct dma_controller *c,
322 index, transmit ? 'T' : 'R', cppi_ch); 322 index, transmit ? 'T' : 'R', cppi_ch);
323 cppi_ch->hw_ep = ep; 323 cppi_ch->hw_ep = ep;
324 cppi_ch->channel.status = MUSB_DMA_STATUS_FREE; 324 cppi_ch->channel.status = MUSB_DMA_STATUS_FREE;
325 cppi_ch->channel.max_len = 0x7fffffff;
325 326
326 DBG(4, "Allocate CPPI%d %cX\n", index, transmit ? 'T' : 'R'); 327 DBG(4, "Allocate CPPI%d %cX\n", index, transmit ? 'T' : 'R');
327 return &cppi_ch->channel; 328 return &cppi_ch->channel;
diff --git a/drivers/usb/musb/musb_debugfs.c b/drivers/usb/musb/musb_debugfs.c
index c79a5e30d437..9e8639d4e862 100644
--- a/drivers/usb/musb/musb_debugfs.c
+++ b/drivers/usb/musb/musb_debugfs.c
@@ -195,15 +195,14 @@ static const struct file_operations musb_regdump_fops = {
195 195
196static int musb_test_mode_open(struct inode *inode, struct file *file) 196static int musb_test_mode_open(struct inode *inode, struct file *file)
197{ 197{
198 file->private_data = inode->i_private;
199
200 return single_open(file, musb_test_mode_show, inode->i_private); 198 return single_open(file, musb_test_mode_show, inode->i_private);
201} 199}
202 200
203static ssize_t musb_test_mode_write(struct file *file, 201static ssize_t musb_test_mode_write(struct file *file,
204 const char __user *ubuf, size_t count, loff_t *ppos) 202 const char __user *ubuf, size_t count, loff_t *ppos)
205{ 203{
206 struct musb *musb = file->private_data; 204 struct seq_file *s = file->private_data;
205 struct musb *musb = s->private;
207 u8 test = 0; 206 u8 test = 0;
208 char buf[18]; 207 char buf[18];
209 208
diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c
index 6fca870e957e..d065e23f123e 100644
--- a/drivers/usb/musb/musb_gadget.c
+++ b/drivers/usb/musb/musb_gadget.c
@@ -300,6 +300,11 @@ static void txstate(struct musb *musb, struct musb_request *req)
300#ifndef CONFIG_MUSB_PIO_ONLY 300#ifndef CONFIG_MUSB_PIO_ONLY
301 if (is_dma_capable() && musb_ep->dma) { 301 if (is_dma_capable() && musb_ep->dma) {
302 struct dma_controller *c = musb->dma_controller; 302 struct dma_controller *c = musb->dma_controller;
303 size_t request_size;
304
305 /* setup DMA, then program endpoint CSR */
306 request_size = min_t(size_t, request->length - request->actual,
307 musb_ep->dma->max_len);
303 308
304 use_dma = (request->dma != DMA_ADDR_INVALID); 309 use_dma = (request->dma != DMA_ADDR_INVALID);
305 310
@@ -307,11 +312,6 @@ static void txstate(struct musb *musb, struct musb_request *req)
307 312
308#ifdef CONFIG_USB_INVENTRA_DMA 313#ifdef CONFIG_USB_INVENTRA_DMA
309 { 314 {
310 size_t request_size;
311
312 /* setup DMA, then program endpoint CSR */
313 request_size = min_t(size_t, request->length,
314 musb_ep->dma->max_len);
315 if (request_size < musb_ep->packet_sz) 315 if (request_size < musb_ep->packet_sz)
316 musb_ep->dma->desired_mode = 0; 316 musb_ep->dma->desired_mode = 0;
317 else 317 else
@@ -373,8 +373,8 @@ static void txstate(struct musb *musb, struct musb_request *req)
373 use_dma = use_dma && c->channel_program( 373 use_dma = use_dma && c->channel_program(
374 musb_ep->dma, musb_ep->packet_sz, 374 musb_ep->dma, musb_ep->packet_sz,
375 0, 375 0,
376 request->dma, 376 request->dma + request->actual,
377 request->length); 377 request_size);
378 if (!use_dma) { 378 if (!use_dma) {
379 c->channel_release(musb_ep->dma); 379 c->channel_release(musb_ep->dma);
380 musb_ep->dma = NULL; 380 musb_ep->dma = NULL;
@@ -386,8 +386,8 @@ static void txstate(struct musb *musb, struct musb_request *req)
386 use_dma = use_dma && c->channel_program( 386 use_dma = use_dma && c->channel_program(
387 musb_ep->dma, musb_ep->packet_sz, 387 musb_ep->dma, musb_ep->packet_sz,
388 request->zero, 388 request->zero,
389 request->dma, 389 request->dma + request->actual,
390 request->length); 390 request_size);
391#endif 391#endif
392 } 392 }
393#endif 393#endif
@@ -501,26 +501,14 @@ void musb_g_tx(struct musb *musb, u8 epnum)
501 request->zero = 0; 501 request->zero = 0;
502 } 502 }
503 503
504 /* ... or if not, then complete it. */ 504 if (request->actual == request->length) {
505 musb_g_giveback(musb_ep, request, 0); 505 musb_g_giveback(musb_ep, request, 0);
506 506 request = musb_ep->desc ? next_request(musb_ep) : NULL;
507 /* 507 if (!request) {
508 * Kickstart next transfer if appropriate; 508 DBG(4, "%s idle now\n",
509 * the packet that just completed might not 509 musb_ep->end_point.name);
510 * be transmitted for hours or days. 510 return;
511 * REVISIT for double buffering... 511 }
512 * FIXME revisit for stalls too...
513 */
514 musb_ep_select(mbase, epnum);
515 csr = musb_readw(epio, MUSB_TXCSR);
516 if (csr & MUSB_TXCSR_FIFONOTEMPTY)
517 return;
518
519 request = musb_ep->desc ? next_request(musb_ep) : NULL;
520 if (!request) {
521 DBG(4, "%s idle now\n",
522 musb_ep->end_point.name);
523 return;
524 } 512 }
525 } 513 }
526 514
@@ -568,11 +556,19 @@ static void rxstate(struct musb *musb, struct musb_request *req)
568{ 556{
569 const u8 epnum = req->epnum; 557 const u8 epnum = req->epnum;
570 struct usb_request *request = &req->request; 558 struct usb_request *request = &req->request;
571 struct musb_ep *musb_ep = &musb->endpoints[epnum].ep_out; 559 struct musb_ep *musb_ep;
572 void __iomem *epio = musb->endpoints[epnum].regs; 560 void __iomem *epio = musb->endpoints[epnum].regs;
573 unsigned fifo_count = 0; 561 unsigned fifo_count = 0;
574 u16 len = musb_ep->packet_sz; 562 u16 len;
575 u16 csr = musb_readw(epio, MUSB_RXCSR); 563 u16 csr = musb_readw(epio, MUSB_RXCSR);
564 struct musb_hw_ep *hw_ep = &musb->endpoints[epnum];
565
566 if (hw_ep->is_shared_fifo)
567 musb_ep = &hw_ep->ep_in;
568 else
569 musb_ep = &hw_ep->ep_out;
570
571 len = musb_ep->packet_sz;
576 572
577 /* We shouldn't get here while DMA is active, but we do... */ 573 /* We shouldn't get here while DMA is active, but we do... */
578 if (dma_channel_status(musb_ep->dma) == MUSB_DMA_STATUS_BUSY) { 574 if (dma_channel_status(musb_ep->dma) == MUSB_DMA_STATUS_BUSY) {
@@ -647,8 +643,8 @@ static void rxstate(struct musb *musb, struct musb_request *req)
647 */ 643 */
648 644
649 csr |= MUSB_RXCSR_DMAENAB; 645 csr |= MUSB_RXCSR_DMAENAB;
650#ifdef USE_MODE1
651 csr |= MUSB_RXCSR_AUTOCLEAR; 646 csr |= MUSB_RXCSR_AUTOCLEAR;
647#ifdef USE_MODE1
652 /* csr |= MUSB_RXCSR_DMAMODE; */ 648 /* csr |= MUSB_RXCSR_DMAMODE; */
653 649
654 /* this special sequence (enabling and then 650 /* this special sequence (enabling and then
@@ -663,10 +659,11 @@ static void rxstate(struct musb *musb, struct musb_request *req)
663 if (request->actual < request->length) { 659 if (request->actual < request->length) {
664 int transfer_size = 0; 660 int transfer_size = 0;
665#ifdef USE_MODE1 661#ifdef USE_MODE1
666 transfer_size = min(request->length, 662 transfer_size = min(request->length - request->actual,
667 channel->max_len); 663 channel->max_len);
668#else 664#else
669 transfer_size = len; 665 transfer_size = min(request->length - request->actual,
666 (unsigned)len);
670#endif 667#endif
671 if (transfer_size <= musb_ep->packet_sz) 668 if (transfer_size <= musb_ep->packet_sz)
672 musb_ep->dma->desired_mode = 0; 669 musb_ep->dma->desired_mode = 0;
@@ -740,9 +737,15 @@ void musb_g_rx(struct musb *musb, u8 epnum)
740 u16 csr; 737 u16 csr;
741 struct usb_request *request; 738 struct usb_request *request;
742 void __iomem *mbase = musb->mregs; 739 void __iomem *mbase = musb->mregs;
743 struct musb_ep *musb_ep = &musb->endpoints[epnum].ep_out; 740 struct musb_ep *musb_ep;
744 void __iomem *epio = musb->endpoints[epnum].regs; 741 void __iomem *epio = musb->endpoints[epnum].regs;
745 struct dma_channel *dma; 742 struct dma_channel *dma;
743 struct musb_hw_ep *hw_ep = &musb->endpoints[epnum];
744
745 if (hw_ep->is_shared_fifo)
746 musb_ep = &hw_ep->ep_in;
747 else
748 musb_ep = &hw_ep->ep_out;
746 749
747 musb_ep_select(mbase, epnum); 750 musb_ep_select(mbase, epnum);
748 751
@@ -1081,7 +1084,7 @@ struct free_record {
1081/* 1084/*
1082 * Context: controller locked, IRQs blocked. 1085 * Context: controller locked, IRQs blocked.
1083 */ 1086 */
1084static void musb_ep_restart(struct musb *musb, struct musb_request *req) 1087void musb_ep_restart(struct musb *musb, struct musb_request *req)
1085{ 1088{
1086 DBG(3, "<== %s request %p len %u on hw_ep%d\n", 1089 DBG(3, "<== %s request %p len %u on hw_ep%d\n",
1087 req->tx ? "TX/IN" : "RX/OUT", 1090 req->tx ? "TX/IN" : "RX/OUT",
diff --git a/drivers/usb/musb/musb_gadget.h b/drivers/usb/musb/musb_gadget.h
index c8b140325d82..572b1da7f2dc 100644
--- a/drivers/usb/musb/musb_gadget.h
+++ b/drivers/usb/musb/musb_gadget.h
@@ -105,4 +105,6 @@ extern void musb_gadget_cleanup(struct musb *);
105 105
106extern void musb_g_giveback(struct musb_ep *, struct usb_request *, int); 106extern void musb_g_giveback(struct musb_ep *, struct usb_request *, int);
107 107
108extern void musb_ep_restart(struct musb *, struct musb_request *);
109
108#endif /* __MUSB_GADGET_H */ 110#endif /* __MUSB_GADGET_H */
diff --git a/drivers/usb/musb/musb_gadget_ep0.c b/drivers/usb/musb/musb_gadget_ep0.c
index 59bef8f3a358..6dd03f4c5f49 100644
--- a/drivers/usb/musb/musb_gadget_ep0.c
+++ b/drivers/usb/musb/musb_gadget_ep0.c
@@ -261,6 +261,7 @@ __acquires(musb->lock)
261 ctrlrequest->wIndex & 0x0f; 261 ctrlrequest->wIndex & 0x0f;
262 struct musb_ep *musb_ep; 262 struct musb_ep *musb_ep;
263 struct musb_hw_ep *ep; 263 struct musb_hw_ep *ep;
264 struct musb_request *request;
264 void __iomem *regs; 265 void __iomem *regs;
265 int is_in; 266 int is_in;
266 u16 csr; 267 u16 csr;
@@ -302,6 +303,14 @@ __acquires(musb->lock)
302 musb_writew(regs, MUSB_RXCSR, csr); 303 musb_writew(regs, MUSB_RXCSR, csr);
303 } 304 }
304 305
306 /* Maybe start the first request in the queue */
307 request = to_musb_request(
308 next_request(musb_ep));
309 if (!musb_ep->busy && request) {
310 DBG(3, "restarting the request\n");
311 musb_ep_restart(musb, request);
312 }
313
305 /* select ep0 again */ 314 /* select ep0 again */
306 musb_ep_select(mbase, 0); 315 musb_ep_select(mbase, 0);
307 } break; 316 } break;
diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c
index 877d20b1dff9..9e65c47cc98b 100644
--- a/drivers/usb/musb/musb_host.c
+++ b/drivers/usb/musb/musb_host.c
@@ -660,6 +660,12 @@ static bool musb_tx_dma_program(struct dma_controller *dma,
660 660
661 qh->segsize = length; 661 qh->segsize = length;
662 662
663 /*
664 * Ensure the data reaches to main memory before starting
665 * DMA transfer
666 */
667 wmb();
668
663 if (!dma->channel_program(channel, pkt_size, mode, 669 if (!dma->channel_program(channel, pkt_size, mode,
664 urb->transfer_dma + offset, length)) { 670 urb->transfer_dma + offset, length)) {
665 dma->channel_release(channel); 671 dma->channel_release(channel);
diff --git a/drivers/usb/otg/twl4030-usb.c b/drivers/usb/otg/twl4030-usb.c
index 05aaac1c3861..0bc97698af15 100644
--- a/drivers/usb/otg/twl4030-usb.c
+++ b/drivers/usb/otg/twl4030-usb.c
@@ -347,11 +347,20 @@ static void twl4030_i2c_access(struct twl4030_usb *twl, int on)
347 } 347 }
348} 348}
349 349
350static void twl4030_phy_power(struct twl4030_usb *twl, int on) 350static void __twl4030_phy_power(struct twl4030_usb *twl, int on)
351{ 351{
352 u8 pwr; 352 u8 pwr = twl4030_usb_read(twl, PHY_PWR_CTRL);
353
354 if (on)
355 pwr &= ~PHY_PWR_PHYPWD;
356 else
357 pwr |= PHY_PWR_PHYPWD;
353 358
354 pwr = twl4030_usb_read(twl, PHY_PWR_CTRL); 359 WARN_ON(twl4030_usb_write_verify(twl, PHY_PWR_CTRL, pwr) < 0);
360}
361
362static void twl4030_phy_power(struct twl4030_usb *twl, int on)
363{
355 if (on) { 364 if (on) {
356 regulator_enable(twl->usb3v1); 365 regulator_enable(twl->usb3v1);
357 regulator_enable(twl->usb1v8); 366 regulator_enable(twl->usb1v8);
@@ -365,15 +374,13 @@ static void twl4030_phy_power(struct twl4030_usb *twl, int on)
365 twl_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER, 0, 374 twl_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER, 0,
366 VUSB_DEDICATED2); 375 VUSB_DEDICATED2);
367 regulator_enable(twl->usb1v5); 376 regulator_enable(twl->usb1v5);
368 pwr &= ~PHY_PWR_PHYPWD; 377 __twl4030_phy_power(twl, 1);
369 WARN_ON(twl4030_usb_write_verify(twl, PHY_PWR_CTRL, pwr) < 0);
370 twl4030_usb_write(twl, PHY_CLK_CTRL, 378 twl4030_usb_write(twl, PHY_CLK_CTRL,
371 twl4030_usb_read(twl, PHY_CLK_CTRL) | 379 twl4030_usb_read(twl, PHY_CLK_CTRL) |
372 (PHY_CLK_CTRL_CLOCKGATING_EN | 380 (PHY_CLK_CTRL_CLOCKGATING_EN |
373 PHY_CLK_CTRL_CLK32K_EN)); 381 PHY_CLK_CTRL_CLK32K_EN));
374 } else { 382 } else {
375 pwr |= PHY_PWR_PHYPWD; 383 __twl4030_phy_power(twl, 0);
376 WARN_ON(twl4030_usb_write_verify(twl, PHY_PWR_CTRL, pwr) < 0);
377 regulator_disable(twl->usb1v5); 384 regulator_disable(twl->usb1v5);
378 regulator_disable(twl->usb1v8); 385 regulator_disable(twl->usb1v8);
379 regulator_disable(twl->usb3v1); 386 regulator_disable(twl->usb3v1);
@@ -387,19 +394,25 @@ static void twl4030_phy_suspend(struct twl4030_usb *twl, int controller_off)
387 394
388 twl4030_phy_power(twl, 0); 395 twl4030_phy_power(twl, 0);
389 twl->asleep = 1; 396 twl->asleep = 1;
397 dev_dbg(twl->dev, "%s\n", __func__);
390} 398}
391 399
392static void twl4030_phy_resume(struct twl4030_usb *twl) 400static void __twl4030_phy_resume(struct twl4030_usb *twl)
393{ 401{
394 if (!twl->asleep)
395 return;
396
397 twl4030_phy_power(twl, 1); 402 twl4030_phy_power(twl, 1);
398 twl4030_i2c_access(twl, 1); 403 twl4030_i2c_access(twl, 1);
399 twl4030_usb_set_mode(twl, twl->usb_mode); 404 twl4030_usb_set_mode(twl, twl->usb_mode);
400 if (twl->usb_mode == T2_USB_MODE_ULPI) 405 if (twl->usb_mode == T2_USB_MODE_ULPI)
401 twl4030_i2c_access(twl, 0); 406 twl4030_i2c_access(twl, 0);
407}
408
409static void twl4030_phy_resume(struct twl4030_usb *twl)
410{
411 if (!twl->asleep)
412 return;
413 __twl4030_phy_resume(twl);
402 twl->asleep = 0; 414 twl->asleep = 0;
415 dev_dbg(twl->dev, "%s\n", __func__);
403} 416}
404 417
405static int twl4030_usb_ldo_init(struct twl4030_usb *twl) 418static int twl4030_usb_ldo_init(struct twl4030_usb *twl)
@@ -408,8 +421,8 @@ static int twl4030_usb_ldo_init(struct twl4030_usb *twl)
408 twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER, 0xC0, PROTECT_KEY); 421 twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER, 0xC0, PROTECT_KEY);
409 twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER, 0x0C, PROTECT_KEY); 422 twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER, 0x0C, PROTECT_KEY);
410 423
411 /* put VUSB3V1 LDO in active state */ 424 /* Keep VUSB3V1 LDO in sleep state until VBUS/ID change detected*/
412 twl_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER, 0, VUSB_DEDICATED2); 425 /*twl_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER, 0, VUSB_DEDICATED2);*/
413 426
414 /* input to VUSB3V1 LDO is from VBAT, not VBUS */ 427 /* input to VUSB3V1 LDO is from VBAT, not VBUS */
415 twl_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER, 0x14, VUSB_DEDICATED1); 428 twl_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER, 0x14, VUSB_DEDICATED1);
@@ -502,6 +515,26 @@ static irqreturn_t twl4030_usb_irq(int irq, void *_twl)
502 return IRQ_HANDLED; 515 return IRQ_HANDLED;
503} 516}
504 517
518static void twl4030_usb_phy_init(struct twl4030_usb *twl)
519{
520 int status;
521
522 status = twl4030_usb_linkstat(twl);
523 if (status >= 0) {
524 if (status == USB_EVENT_NONE) {
525 __twl4030_phy_power(twl, 0);
526 twl->asleep = 1;
527 } else {
528 __twl4030_phy_resume(twl);
529 twl->asleep = 0;
530 }
531
532 blocking_notifier_call_chain(&twl->otg.notifier, status,
533 twl->otg.gadget);
534 }
535 sysfs_notify(&twl->dev->kobj, NULL, "vbus");
536}
537
505static int twl4030_set_suspend(struct otg_transceiver *x, int suspend) 538static int twl4030_set_suspend(struct otg_transceiver *x, int suspend)
506{ 539{
507 struct twl4030_usb *twl = xceiv_to_twl(x); 540 struct twl4030_usb *twl = xceiv_to_twl(x);
@@ -550,7 +583,6 @@ static int __devinit twl4030_usb_probe(struct platform_device *pdev)
550 struct twl4030_usb_data *pdata = pdev->dev.platform_data; 583 struct twl4030_usb_data *pdata = pdev->dev.platform_data;
551 struct twl4030_usb *twl; 584 struct twl4030_usb *twl;
552 int status, err; 585 int status, err;
553 u8 pwr;
554 586
555 if (!pdata) { 587 if (!pdata) {
556 dev_dbg(&pdev->dev, "platform_data not available\n"); 588 dev_dbg(&pdev->dev, "platform_data not available\n");
@@ -569,10 +601,7 @@ static int __devinit twl4030_usb_probe(struct platform_device *pdev)
569 twl->otg.set_peripheral = twl4030_set_peripheral; 601 twl->otg.set_peripheral = twl4030_set_peripheral;
570 twl->otg.set_suspend = twl4030_set_suspend; 602 twl->otg.set_suspend = twl4030_set_suspend;
571 twl->usb_mode = pdata->usb_mode; 603 twl->usb_mode = pdata->usb_mode;
572 604 twl->asleep = 1;
573 pwr = twl4030_usb_read(twl, PHY_PWR_CTRL);
574
575 twl->asleep = (pwr & PHY_PWR_PHYPWD);
576 605
577 /* init spinlock for workqueue */ 606 /* init spinlock for workqueue */
578 spin_lock_init(&twl->lock); 607 spin_lock_init(&twl->lock);
@@ -610,15 +639,10 @@ static int __devinit twl4030_usb_probe(struct platform_device *pdev)
610 return status; 639 return status;
611 } 640 }
612 641
613 /* The IRQ handler just handles changes from the previous states 642 /* Power down phy or make it work according to
614 * of the ID and VBUS pins ... in probe() we must initialize that 643 * current link state.
615 * previous state. The easy way: fake an IRQ.
616 *
617 * REVISIT: a real IRQ might have happened already, if PREEMPT is
618 * enabled. Else the IRQ may not yet be configured or enabled,
619 * because of scheduling delays.
620 */ 644 */
621 twl4030_usb_irq(twl->irq, twl); 645 twl4030_usb_phy_init(twl);
622 646
623 dev_info(&pdev->dev, "Initialized TWL4030 USB module\n"); 647 dev_info(&pdev->dev, "Initialized TWL4030 USB module\n");
624 return 0; 648 return 0;
diff --git a/drivers/usb/serial/mos7720.c b/drivers/usb/serial/mos7720.c
index 30922a7e3347..aa665817a272 100644
--- a/drivers/usb/serial/mos7720.c
+++ b/drivers/usb/serial/mos7720.c
@@ -2024,6 +2024,9 @@ static int mos7720_ioctl(struct tty_struct *tty, struct file *file,
2024 2024
2025 case TIOCGICOUNT: 2025 case TIOCGICOUNT:
2026 cnow = mos7720_port->icount; 2026 cnow = mos7720_port->icount;
2027
2028 memset(&icount, 0, sizeof(struct serial_icounter_struct));
2029
2027 icount.cts = cnow.cts; 2030 icount.cts = cnow.cts;
2028 icount.dsr = cnow.dsr; 2031 icount.dsr = cnow.dsr;
2029 icount.rng = cnow.rng; 2032 icount.rng = cnow.rng;
diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c
index 1c9b6e9b2386..1a42bc213799 100644
--- a/drivers/usb/serial/mos7840.c
+++ b/drivers/usb/serial/mos7840.c
@@ -2285,6 +2285,9 @@ static int mos7840_ioctl(struct tty_struct *tty, struct file *file,
2285 case TIOCGICOUNT: 2285 case TIOCGICOUNT:
2286 cnow = mos7840_port->icount; 2286 cnow = mos7840_port->icount;
2287 smp_rmb(); 2287 smp_rmb();
2288
2289 memset(&icount, 0, sizeof(struct serial_icounter_struct));
2290
2288 icount.cts = cnow.cts; 2291 icount.cts = cnow.cts;
2289 icount.dsr = cnow.dsr; 2292 icount.dsr = cnow.dsr;
2290 icount.rng = cnow.rng; 2293 icount.rng = cnow.rng;
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index 29e850a7a2f9..72ab71fdf053 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -243,7 +243,7 @@ static int get_rx_bufs(struct vhost_virtqueue *vq,
243 int r, nlogs = 0; 243 int r, nlogs = 0;
244 244
245 while (datalen > 0) { 245 while (datalen > 0) {
246 if (unlikely(headcount >= VHOST_NET_MAX_SG)) { 246 if (unlikely(seg >= UIO_MAXIOV)) {
247 r = -ENOBUFS; 247 r = -ENOBUFS;
248 goto err; 248 goto err;
249 } 249 }
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index c579dcc9200c..344019774ddd 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -212,6 +212,45 @@ static int vhost_worker(void *data)
212 } 212 }
213} 213}
214 214
215/* Helper to allocate iovec buffers for all vqs. */
216static long vhost_dev_alloc_iovecs(struct vhost_dev *dev)
217{
218 int i;
219 for (i = 0; i < dev->nvqs; ++i) {
220 dev->vqs[i].indirect = kmalloc(sizeof *dev->vqs[i].indirect *
221 UIO_MAXIOV, GFP_KERNEL);
222 dev->vqs[i].log = kmalloc(sizeof *dev->vqs[i].log * UIO_MAXIOV,
223 GFP_KERNEL);
224 dev->vqs[i].heads = kmalloc(sizeof *dev->vqs[i].heads *
225 UIO_MAXIOV, GFP_KERNEL);
226
227 if (!dev->vqs[i].indirect || !dev->vqs[i].log ||
228 !dev->vqs[i].heads)
229 goto err_nomem;
230 }
231 return 0;
232err_nomem:
233 for (; i >= 0; --i) {
234 kfree(dev->vqs[i].indirect);
235 kfree(dev->vqs[i].log);
236 kfree(dev->vqs[i].heads);
237 }
238 return -ENOMEM;
239}
240
241static void vhost_dev_free_iovecs(struct vhost_dev *dev)
242{
243 int i;
244 for (i = 0; i < dev->nvqs; ++i) {
245 kfree(dev->vqs[i].indirect);
246 dev->vqs[i].indirect = NULL;
247 kfree(dev->vqs[i].log);
248 dev->vqs[i].log = NULL;
249 kfree(dev->vqs[i].heads);
250 dev->vqs[i].heads = NULL;
251 }
252}
253
215long vhost_dev_init(struct vhost_dev *dev, 254long vhost_dev_init(struct vhost_dev *dev,
216 struct vhost_virtqueue *vqs, int nvqs) 255 struct vhost_virtqueue *vqs, int nvqs)
217{ 256{
@@ -229,6 +268,9 @@ long vhost_dev_init(struct vhost_dev *dev,
229 dev->worker = NULL; 268 dev->worker = NULL;
230 269
231 for (i = 0; i < dev->nvqs; ++i) { 270 for (i = 0; i < dev->nvqs; ++i) {
271 dev->vqs[i].log = NULL;
272 dev->vqs[i].indirect = NULL;
273 dev->vqs[i].heads = NULL;
232 dev->vqs[i].dev = dev; 274 dev->vqs[i].dev = dev;
233 mutex_init(&dev->vqs[i].mutex); 275 mutex_init(&dev->vqs[i].mutex);
234 vhost_vq_reset(dev, dev->vqs + i); 276 vhost_vq_reset(dev, dev->vqs + i);
@@ -295,6 +337,10 @@ static long vhost_dev_set_owner(struct vhost_dev *dev)
295 if (err) 337 if (err)
296 goto err_cgroup; 338 goto err_cgroup;
297 339
340 err = vhost_dev_alloc_iovecs(dev);
341 if (err)
342 goto err_cgroup;
343
298 return 0; 344 return 0;
299err_cgroup: 345err_cgroup:
300 kthread_stop(worker); 346 kthread_stop(worker);
@@ -345,6 +391,7 @@ void vhost_dev_cleanup(struct vhost_dev *dev)
345 fput(dev->vqs[i].call); 391 fput(dev->vqs[i].call);
346 vhost_vq_reset(dev, dev->vqs + i); 392 vhost_vq_reset(dev, dev->vqs + i);
347 } 393 }
394 vhost_dev_free_iovecs(dev);
348 if (dev->log_ctx) 395 if (dev->log_ctx)
349 eventfd_ctx_put(dev->log_ctx); 396 eventfd_ctx_put(dev->log_ctx);
350 dev->log_ctx = NULL; 397 dev->log_ctx = NULL;
@@ -858,11 +905,12 @@ int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log,
858 if (r < 0) 905 if (r < 0)
859 return r; 906 return r;
860 len -= l; 907 len -= l;
861 if (!len) 908 if (!len) {
909 if (vq->log_ctx)
910 eventfd_signal(vq->log_ctx, 1);
862 return 0; 911 return 0;
912 }
863 } 913 }
864 if (vq->log_ctx)
865 eventfd_signal(vq->log_ctx, 1);
866 /* Length written exceeds what we have stored. This is a bug. */ 914 /* Length written exceeds what we have stored. This is a bug. */
867 BUG(); 915 BUG();
868 return 0; 916 return 0;
@@ -946,7 +994,7 @@ static int get_indirect(struct vhost_dev *dev, struct vhost_virtqueue *vq,
946 } 994 }
947 995
948 ret = translate_desc(dev, indirect->addr, indirect->len, vq->indirect, 996 ret = translate_desc(dev, indirect->addr, indirect->len, vq->indirect,
949 ARRAY_SIZE(vq->indirect)); 997 UIO_MAXIOV);
950 if (unlikely(ret < 0)) { 998 if (unlikely(ret < 0)) {
951 vq_err(vq, "Translation failure %d in indirect.\n", ret); 999 vq_err(vq, "Translation failure %d in indirect.\n", ret);
952 return ret; 1000 return ret;
diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h
index afd77295971c..edc892989992 100644
--- a/drivers/vhost/vhost.h
+++ b/drivers/vhost/vhost.h
@@ -15,11 +15,6 @@
15 15
16struct vhost_device; 16struct vhost_device;
17 17
18enum {
19 /* Enough place for all fragments, head, and virtio net header. */
20 VHOST_NET_MAX_SG = MAX_SKB_FRAGS + 2,
21};
22
23struct vhost_work; 18struct vhost_work;
24typedef void (*vhost_work_fn_t)(struct vhost_work *work); 19typedef void (*vhost_work_fn_t)(struct vhost_work *work);
25 20
@@ -93,12 +88,15 @@ struct vhost_virtqueue {
93 bool log_used; 88 bool log_used;
94 u64 log_addr; 89 u64 log_addr;
95 90
96 struct iovec indirect[VHOST_NET_MAX_SG]; 91 struct iovec iov[UIO_MAXIOV];
97 struct iovec iov[VHOST_NET_MAX_SG]; 92 /* hdr is used to store the virtio header.
98 struct iovec hdr[VHOST_NET_MAX_SG]; 93 * Since each iovec has >= 1 byte length, we never need more than
94 * header length entries to store the header. */
95 struct iovec hdr[sizeof(struct virtio_net_hdr_mrg_rxbuf)];
96 struct iovec *indirect;
99 size_t vhost_hlen; 97 size_t vhost_hlen;
100 size_t sock_hlen; 98 size_t sock_hlen;
101 struct vring_used_elem heads[VHOST_NET_MAX_SG]; 99 struct vring_used_elem *heads;
102 /* We use a kind of RCU to access private pointer. 100 /* We use a kind of RCU to access private pointer.
103 * All readers access it from worker, which makes it possible to 101 * All readers access it from worker, which makes it possible to
104 * flush the vhost_work instead of synchronize_rcu. Therefore readers do 102 * flush the vhost_work instead of synchronize_rcu. Therefore readers do
@@ -109,7 +107,7 @@ struct vhost_virtqueue {
109 void *private_data; 107 void *private_data;
110 /* Log write descriptors */ 108 /* Log write descriptors */
111 void __user *log_base; 109 void __user *log_base;
112 struct vhost_log log[VHOST_NET_MAX_SG]; 110 struct vhost_log *log;
113}; 111};
114 112
115struct vhost_dev { 113struct vhost_dev {
diff --git a/drivers/video/console/fbcon.c b/drivers/video/console/fbcon.c
index 84f842331dfa..7ccc967831f0 100644
--- a/drivers/video/console/fbcon.c
+++ b/drivers/video/console/fbcon.c
@@ -3508,7 +3508,7 @@ static void fbcon_exit(void)
3508 softback_buf = 0UL; 3508 softback_buf = 0UL;
3509 3509
3510 for (i = 0; i < FB_MAX; i++) { 3510 for (i = 0; i < FB_MAX; i++) {
3511 int pending; 3511 int pending = 0;
3512 3512
3513 mapped = 0; 3513 mapped = 0;
3514 info = registered_fb[i]; 3514 info = registered_fb[i];
@@ -3516,7 +3516,8 @@ static void fbcon_exit(void)
3516 if (info == NULL) 3516 if (info == NULL)
3517 continue; 3517 continue;
3518 3518
3519 pending = cancel_work_sync(&info->queue); 3519 if (info->queue.func)
3520 pending = cancel_work_sync(&info->queue);
3520 DPRINTK("fbcon: %s pending work\n", (pending ? "canceled" : 3521 DPRINTK("fbcon: %s pending work\n", (pending ? "canceled" :
3521 "no")); 3522 "no"));
3522 3523
diff --git a/drivers/video/efifb.c b/drivers/video/efifb.c
index 815f84b07933..70477c2e4b61 100644
--- a/drivers/video/efifb.c
+++ b/drivers/video/efifb.c
@@ -13,7 +13,7 @@
13#include <linux/platform_device.h> 13#include <linux/platform_device.h>
14#include <linux/screen_info.h> 14#include <linux/screen_info.h>
15#include <linux/dmi.h> 15#include <linux/dmi.h>
16 16#include <linux/pci.h>
17#include <video/vga.h> 17#include <video/vga.h>
18 18
19static struct fb_var_screeninfo efifb_defined __devinitdata = { 19static struct fb_var_screeninfo efifb_defined __devinitdata = {
@@ -39,17 +39,31 @@ enum {
39 M_I20, /* 20-Inch iMac */ 39 M_I20, /* 20-Inch iMac */
40 M_I20_SR, /* 20-Inch iMac (Santa Rosa) */ 40 M_I20_SR, /* 20-Inch iMac (Santa Rosa) */
41 M_I24, /* 24-Inch iMac */ 41 M_I24, /* 24-Inch iMac */
42 M_I24_8_1, /* 24-Inch iMac, 8,1th gen */
43 M_I24_10_1, /* 24-Inch iMac, 10,1th gen */
44 M_I27_11_1, /* 27-Inch iMac, 11,1th gen */
42 M_MINI, /* Mac Mini */ 45 M_MINI, /* Mac Mini */
46 M_MINI_3_1, /* Mac Mini, 3,1th gen */
47 M_MINI_4_1, /* Mac Mini, 4,1th gen */
43 M_MB, /* MacBook */ 48 M_MB, /* MacBook */
44 M_MB_2, /* MacBook, 2nd rev. */ 49 M_MB_2, /* MacBook, 2nd rev. */
45 M_MB_3, /* MacBook, 3rd rev. */ 50 M_MB_3, /* MacBook, 3rd rev. */
51 M_MB_5_1, /* MacBook, 5th rev. */
52 M_MB_6_1, /* MacBook, 6th rev. */
53 M_MB_7_1, /* MacBook, 7th rev. */
46 M_MB_SR, /* MacBook, 2nd gen, (Santa Rosa) */ 54 M_MB_SR, /* MacBook, 2nd gen, (Santa Rosa) */
47 M_MBA, /* MacBook Air */ 55 M_MBA, /* MacBook Air */
48 M_MBP, /* MacBook Pro */ 56 M_MBP, /* MacBook Pro */
49 M_MBP_2, /* MacBook Pro 2nd gen */ 57 M_MBP_2, /* MacBook Pro 2nd gen */
58 M_MBP_2_2, /* MacBook Pro 2,2nd gen */
50 M_MBP_SR, /* MacBook Pro (Santa Rosa) */ 59 M_MBP_SR, /* MacBook Pro (Santa Rosa) */
51 M_MBP_4, /* MacBook Pro, 4th gen */ 60 M_MBP_4, /* MacBook Pro, 4th gen */
52 M_MBP_5_1, /* MacBook Pro, 5,1th gen */ 61 M_MBP_5_1, /* MacBook Pro, 5,1th gen */
62 M_MBP_5_2, /* MacBook Pro, 5,2th gen */
63 M_MBP_5_3, /* MacBook Pro, 5,3rd gen */
64 M_MBP_6_1, /* MacBook Pro, 6,1th gen */
65 M_MBP_6_2, /* MacBook Pro, 6,2th gen */
66 M_MBP_7_1, /* MacBook Pro, 7,1th gen */
53 M_UNKNOWN /* placeholder */ 67 M_UNKNOWN /* placeholder */
54}; 68};
55 69
@@ -64,14 +78,28 @@ static struct efifb_dmi_info {
64 [M_I20] = { "i20", 0x80010000, 1728 * 4, 1680, 1050 }, /* guess */ 78 [M_I20] = { "i20", 0x80010000, 1728 * 4, 1680, 1050 }, /* guess */
65 [M_I20_SR] = { "imac7", 0x40010000, 1728 * 4, 1680, 1050 }, 79 [M_I20_SR] = { "imac7", 0x40010000, 1728 * 4, 1680, 1050 },
66 [M_I24] = { "i24", 0x80010000, 2048 * 4, 1920, 1200 }, /* guess */ 80 [M_I24] = { "i24", 0x80010000, 2048 * 4, 1920, 1200 }, /* guess */
81 [M_I24_8_1] = { "imac8", 0xc0060000, 2048 * 4, 1920, 1200 },
82 [M_I24_10_1] = { "imac10", 0xc0010000, 2048 * 4, 1920, 1080 },
83 [M_I27_11_1] = { "imac11", 0xc0010000, 2560 * 4, 2560, 1440 },
67 [M_MINI]= { "mini", 0x80000000, 2048 * 4, 1024, 768 }, 84 [M_MINI]= { "mini", 0x80000000, 2048 * 4, 1024, 768 },
85 [M_MINI_3_1] = { "mini31", 0x40010000, 1024 * 4, 1024, 768 },
86 [M_MINI_4_1] = { "mini41", 0xc0010000, 2048 * 4, 1920, 1200 },
68 [M_MB] = { "macbook", 0x80000000, 2048 * 4, 1280, 800 }, 87 [M_MB] = { "macbook", 0x80000000, 2048 * 4, 1280, 800 },
88 [M_MB_5_1] = { "macbook51", 0x80010000, 2048 * 4, 1280, 800 },
89 [M_MB_6_1] = { "macbook61", 0x80010000, 2048 * 4, 1280, 800 },
90 [M_MB_7_1] = { "macbook71", 0x80010000, 2048 * 4, 1280, 800 },
69 [M_MBA] = { "mba", 0x80000000, 2048 * 4, 1280, 800 }, 91 [M_MBA] = { "mba", 0x80000000, 2048 * 4, 1280, 800 },
70 [M_MBP] = { "mbp", 0x80010000, 1472 * 4, 1440, 900 }, 92 [M_MBP] = { "mbp", 0x80010000, 1472 * 4, 1440, 900 },
71 [M_MBP_2] = { "mbp2", 0, 0, 0, 0 }, /* placeholder */ 93 [M_MBP_2] = { "mbp2", 0, 0, 0, 0 }, /* placeholder */
94 [M_MBP_2_2] = { "mbp22", 0x80010000, 1472 * 4, 1440, 900 },
72 [M_MBP_SR] = { "mbp3", 0x80030000, 2048 * 4, 1440, 900 }, 95 [M_MBP_SR] = { "mbp3", 0x80030000, 2048 * 4, 1440, 900 },
73 [M_MBP_4] = { "mbp4", 0xc0060000, 2048 * 4, 1920, 1200 }, 96 [M_MBP_4] = { "mbp4", 0xc0060000, 2048 * 4, 1920, 1200 },
74 [M_MBP_5_1] = { "mbp51", 0xc0010000, 2048 * 4, 1440, 900 }, 97 [M_MBP_5_1] = { "mbp51", 0xc0010000, 2048 * 4, 1440, 900 },
98 [M_MBP_5_2] = { "mbp52", 0xc0010000, 2048 * 4, 1920, 1200 },
99 [M_MBP_5_3] = { "mbp53", 0xd0010000, 2048 * 4, 1440, 900 },
100 [M_MBP_6_1] = { "mbp61", 0x90030000, 2048 * 4, 1920, 1200 },
101 [M_MBP_6_2] = { "mbp62", 0x90030000, 2048 * 4, 1680, 1050 },
102 [M_MBP_7_1] = { "mbp71", 0xc0010000, 2048 * 4, 1280, 800 },
75 [M_UNKNOWN] = { NULL, 0, 0, 0, 0 } 103 [M_UNKNOWN] = { NULL, 0, 0, 0, 0 }
76}; 104};
77 105
@@ -92,7 +120,12 @@ static const struct dmi_system_id dmi_system_table[] __initconst = {
92 EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "iMac6,1", M_I24), 120 EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "iMac6,1", M_I24),
93 EFIFB_DMI_SYSTEM_ID("Apple Inc.", "iMac6,1", M_I24), 121 EFIFB_DMI_SYSTEM_ID("Apple Inc.", "iMac6,1", M_I24),
94 EFIFB_DMI_SYSTEM_ID("Apple Inc.", "iMac7,1", M_I20_SR), 122 EFIFB_DMI_SYSTEM_ID("Apple Inc.", "iMac7,1", M_I20_SR),
123 EFIFB_DMI_SYSTEM_ID("Apple Inc.", "iMac8,1", M_I24_8_1),
124 EFIFB_DMI_SYSTEM_ID("Apple Inc.", "iMac10,1", M_I24_10_1),
125 EFIFB_DMI_SYSTEM_ID("Apple Inc.", "iMac11,1", M_I27_11_1),
95 EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "Macmini1,1", M_MINI), 126 EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "Macmini1,1", M_MINI),
127 EFIFB_DMI_SYSTEM_ID("Apple Inc.", "Macmini3,1", M_MINI_3_1),
128 EFIFB_DMI_SYSTEM_ID("Apple Inc.", "Macmini4,1", M_MINI_4_1),
96 EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "MacBook1,1", M_MB), 129 EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "MacBook1,1", M_MB),
97 /* At least one of these two will be right; maybe both? */ 130 /* At least one of these two will be right; maybe both? */
98 EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "MacBook2,1", M_MB), 131 EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "MacBook2,1", M_MB),
@@ -101,14 +134,23 @@ static const struct dmi_system_id dmi_system_table[] __initconst = {
101 EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "MacBook3,1", M_MB), 134 EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "MacBook3,1", M_MB),
102 EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBook3,1", M_MB), 135 EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBook3,1", M_MB),
103 EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBook4,1", M_MB), 136 EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBook4,1", M_MB),
137 EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBook5,1", M_MB_5_1),
138 EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBook6,1", M_MB_6_1),
139 EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBook7,1", M_MB_7_1),
104 EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookAir1,1", M_MBA), 140 EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookAir1,1", M_MBA),
105 EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "MacBookPro1,1", M_MBP), 141 EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "MacBookPro1,1", M_MBP),
106 EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "MacBookPro2,1", M_MBP_2), 142 EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "MacBookPro2,1", M_MBP_2),
143 EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "MacBookPro2,2", M_MBP_2_2),
107 EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro2,1", M_MBP_2), 144 EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro2,1", M_MBP_2),
108 EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "MacBookPro3,1", M_MBP_SR), 145 EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "MacBookPro3,1", M_MBP_SR),
109 EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro3,1", M_MBP_SR), 146 EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro3,1", M_MBP_SR),
110 EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro4,1", M_MBP_4), 147 EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro4,1", M_MBP_4),
111 EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro5,1", M_MBP_5_1), 148 EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro5,1", M_MBP_5_1),
149 EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro5,2", M_MBP_5_2),
150 EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro5,3", M_MBP_5_3),
151 EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro6,1", M_MBP_6_1),
152 EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro6,2", M_MBP_6_2),
153 EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro7,1", M_MBP_7_1),
112 {}, 154 {},
113}; 155};
114 156
@@ -116,7 +158,7 @@ static int set_system(const struct dmi_system_id *id)
116{ 158{
117 struct efifb_dmi_info *info = id->driver_data; 159 struct efifb_dmi_info *info = id->driver_data;
118 if (info->base == 0) 160 if (info->base == 0)
119 return -ENODEV; 161 return 0;
120 162
121 printk(KERN_INFO "efifb: dmi detected %s - framebuffer at %p " 163 printk(KERN_INFO "efifb: dmi detected %s - framebuffer at %p "
122 "(%dx%d, stride %d)\n", id->ident, 164 "(%dx%d, stride %d)\n", id->ident,
@@ -124,18 +166,55 @@ static int set_system(const struct dmi_system_id *id)
124 info->stride); 166 info->stride);
125 167
126 /* Trust the bootloader over the DMI tables */ 168 /* Trust the bootloader over the DMI tables */
127 if (screen_info.lfb_base == 0) 169 if (screen_info.lfb_base == 0) {
170#if defined(CONFIG_PCI)
171 struct pci_dev *dev = NULL;
172 int found_bar = 0;
173#endif
128 screen_info.lfb_base = info->base; 174 screen_info.lfb_base = info->base;
129 if (screen_info.lfb_linelength == 0)
130 screen_info.lfb_linelength = info->stride;
131 if (screen_info.lfb_width == 0)
132 screen_info.lfb_width = info->width;
133 if (screen_info.lfb_height == 0)
134 screen_info.lfb_height = info->height;
135 if (screen_info.orig_video_isVGA == 0)
136 screen_info.orig_video_isVGA = VIDEO_TYPE_EFI;
137 175
138 return 0; 176#if defined(CONFIG_PCI)
177 /* make sure that the address in the table is actually on a
178 * VGA device's PCI BAR */
179
180 for_each_pci_dev(dev) {
181 int i;
182 if ((dev->class >> 8) != PCI_CLASS_DISPLAY_VGA)
183 continue;
184 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
185 resource_size_t start, end;
186
187 start = pci_resource_start(dev, i);
188 if (start == 0)
189 break;
190 end = pci_resource_end(dev, i);
191 if (screen_info.lfb_base >= start &&
192 screen_info.lfb_base < end) {
193 found_bar = 1;
194 }
195 }
196 }
197 if (!found_bar)
198 screen_info.lfb_base = 0;
199#endif
200 }
201 if (screen_info.lfb_base) {
202 if (screen_info.lfb_linelength == 0)
203 screen_info.lfb_linelength = info->stride;
204 if (screen_info.lfb_width == 0)
205 screen_info.lfb_width = info->width;
206 if (screen_info.lfb_height == 0)
207 screen_info.lfb_height = info->height;
208 if (screen_info.orig_video_isVGA == 0)
209 screen_info.orig_video_isVGA = VIDEO_TYPE_EFI;
210 } else {
211 screen_info.lfb_linelength = 0;
212 screen_info.lfb_width = 0;
213 screen_info.lfb_height = 0;
214 screen_info.orig_video_isVGA = 0;
215 return 0;
216 }
217 return 1;
139} 218}
140 219
141static int efifb_setcolreg(unsigned regno, unsigned red, unsigned green, 220static int efifb_setcolreg(unsigned regno, unsigned red, unsigned green,
diff --git a/drivers/video/pxa168fb.c b/drivers/video/pxa168fb.c
index c91a7f70f7b0..a31a77ff6f3d 100644
--- a/drivers/video/pxa168fb.c
+++ b/drivers/video/pxa168fb.c
@@ -298,8 +298,8 @@ static void set_dma_control0(struct pxa168fb_info *fbi)
298 * Set bit to enable graphics DMA. 298 * Set bit to enable graphics DMA.
299 */ 299 */
300 x = readl(fbi->reg_base + LCD_SPU_DMA_CTRL0); 300 x = readl(fbi->reg_base + LCD_SPU_DMA_CTRL0);
301 x |= fbi->active ? 0x00000100 : 0; 301 x &= ~CFG_GRA_ENA_MASK;
302 fbi->active = 0; 302 x |= fbi->active ? CFG_GRA_ENA(1) : CFG_GRA_ENA(0);
303 303
304 /* 304 /*
305 * If we are in a pseudo-color mode, we need to enable 305 * If we are in a pseudo-color mode, we need to enable
@@ -559,7 +559,7 @@ static struct fb_ops pxa168fb_ops = {
559 .fb_imageblit = cfb_imageblit, 559 .fb_imageblit = cfb_imageblit,
560}; 560};
561 561
562static int __init pxa168fb_init_mode(struct fb_info *info, 562static int __devinit pxa168fb_init_mode(struct fb_info *info,
563 struct pxa168fb_mach_info *mi) 563 struct pxa168fb_mach_info *mi)
564{ 564{
565 struct pxa168fb_info *fbi = info->par; 565 struct pxa168fb_info *fbi = info->par;
@@ -599,7 +599,7 @@ static int __init pxa168fb_init_mode(struct fb_info *info,
599 return ret; 599 return ret;
600} 600}
601 601
602static int __init pxa168fb_probe(struct platform_device *pdev) 602static int __devinit pxa168fb_probe(struct platform_device *pdev)
603{ 603{
604 struct pxa168fb_mach_info *mi; 604 struct pxa168fb_mach_info *mi;
605 struct fb_info *info = 0; 605 struct fb_info *info = 0;
@@ -792,7 +792,7 @@ static struct platform_driver pxa168fb_driver = {
792 .probe = pxa168fb_probe, 792 .probe = pxa168fb_probe,
793}; 793};
794 794
795static int __devinit pxa168fb_init(void) 795static int __init pxa168fb_init(void)
796{ 796{
797 return platform_driver_register(&pxa168fb_driver); 797 return platform_driver_register(&pxa168fb_driver);
798} 798}
diff --git a/drivers/video/sis/sis_main.c b/drivers/video/sis/sis_main.c
index 559bf1727a2b..b52f8e4ef1fd 100644
--- a/drivers/video/sis/sis_main.c
+++ b/drivers/video/sis/sis_main.c
@@ -1701,6 +1701,9 @@ static int sisfb_ioctl(struct fb_info *info, unsigned int cmd,
1701 break; 1701 break;
1702 1702
1703 case FBIOGET_VBLANK: 1703 case FBIOGET_VBLANK:
1704
1705 memset(&sisvbblank, 0, sizeof(struct fb_vblank));
1706
1704 sisvbblank.count = 0; 1707 sisvbblank.count = 0;
1705 sisvbblank.flags = sisfb_setupvbblankflags(ivideo, &sisvbblank.vcount, &sisvbblank.hcount); 1708 sisvbblank.flags = sisfb_setupvbblankflags(ivideo, &sisvbblank.vcount, &sisvbblank.hcount);
1706 1709
diff --git a/drivers/video/via/ioctl.c b/drivers/video/via/ioctl.c
index da03c074e32a..4d553d0b8d7a 100644
--- a/drivers/video/via/ioctl.c
+++ b/drivers/video/via/ioctl.c
@@ -25,6 +25,8 @@ int viafb_ioctl_get_viafb_info(u_long arg)
25{ 25{
26 struct viafb_ioctl_info viainfo; 26 struct viafb_ioctl_info viainfo;
27 27
28 memset(&viainfo, 0, sizeof(struct viafb_ioctl_info));
29
28 viainfo.viafb_id = VIAID; 30 viainfo.viafb_id = VIAID;
29 viainfo.vendor_id = PCI_VIA_VENDOR_ID; 31 viainfo.vendor_id = PCI_VIA_VENDOR_ID;
30 32
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index b036677df8c4..24efd8ea41bb 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -213,11 +213,11 @@ config OMAP_WATCHDOG
213 here to enable the OMAP1610/OMAP1710/OMAP2420/OMAP3430/OMAP4430 watchdog timer. 213 here to enable the OMAP1610/OMAP1710/OMAP2420/OMAP3430/OMAP4430 watchdog timer.
214 214
215config PNX4008_WATCHDOG 215config PNX4008_WATCHDOG
216 tristate "PNX4008 Watchdog" 216 tristate "PNX4008 and LPC32XX Watchdog"
217 depends on ARCH_PNX4008 217 depends on ARCH_PNX4008 || ARCH_LPC32XX
218 help 218 help
219 Say Y here if to include support for the watchdog timer 219 Say Y here if to include support for the watchdog timer
220 in the PNX4008 processor. 220 in the PNX4008 or LPC32XX processor.
221 This driver can be built as a module by choosing M. The module 221 This driver can be built as a module by choosing M. The module
222 will be called pnx4008_wdt. 222 will be called pnx4008_wdt.
223 223
diff --git a/drivers/watchdog/sb_wdog.c b/drivers/watchdog/sb_wdog.c
index 88c83aa57303..f31493e65b38 100644
--- a/drivers/watchdog/sb_wdog.c
+++ b/drivers/watchdog/sb_wdog.c
@@ -305,7 +305,7 @@ static int __init sbwdog_init(void)
305 if (ret) { 305 if (ret) {
306 printk(KERN_ERR "%s: failed to request irq 1 - %d\n", 306 printk(KERN_ERR "%s: failed to request irq 1 - %d\n",
307 ident.identity, ret); 307 ident.identity, ret);
308 return ret; 308 goto out;
309 } 309 }
310 310
311 ret = misc_register(&sbwdog_miscdev); 311 ret = misc_register(&sbwdog_miscdev);
@@ -313,14 +313,20 @@ static int __init sbwdog_init(void)
313 printk(KERN_INFO "%s: timeout is %ld.%ld secs\n", 313 printk(KERN_INFO "%s: timeout is %ld.%ld secs\n",
314 ident.identity, 314 ident.identity,
315 timeout / 1000000, (timeout / 100000) % 10); 315 timeout / 1000000, (timeout / 100000) % 10);
316 } else 316 return 0;
317 free_irq(1, (void *)user_dog); 317 }
318 free_irq(1, (void *)user_dog);
319out:
320 unregister_reboot_notifier(&sbwdog_notifier);
321
318 return ret; 322 return ret;
319} 323}
320 324
321static void __exit sbwdog_exit(void) 325static void __exit sbwdog_exit(void)
322{ 326{
323 misc_deregister(&sbwdog_miscdev); 327 misc_deregister(&sbwdog_miscdev);
328 free_irq(1, (void *)user_dog);
329 unregister_reboot_notifier(&sbwdog_notifier);
324} 330}
325 331
326module_init(sbwdog_init); 332module_init(sbwdog_init);
diff --git a/drivers/watchdog/ts72xx_wdt.c b/drivers/watchdog/ts72xx_wdt.c
index 458c499c1223..18cdeb4c4258 100644
--- a/drivers/watchdog/ts72xx_wdt.c
+++ b/drivers/watchdog/ts72xx_wdt.c
@@ -449,6 +449,9 @@ static __devinit int ts72xx_wdt_probe(struct platform_device *pdev)
449 wdt->pdev = pdev; 449 wdt->pdev = pdev;
450 mutex_init(&wdt->lock); 450 mutex_init(&wdt->lock);
451 451
452 /* make sure that the watchdog is disabled */
453 ts72xx_wdt_stop(wdt);
454
452 error = misc_register(&ts72xx_wdt_miscdev); 455 error = misc_register(&ts72xx_wdt_miscdev);
453 if (error) { 456 if (error) {
454 dev_err(&pdev->dev, "failed to register miscdev\n"); 457 dev_err(&pdev->dev, "failed to register miscdev\n");
diff --git a/drivers/xen/xenbus/xenbus_probe.c b/drivers/xen/xenbus/xenbus_probe.c
index 29bac5118877..d409495876f1 100644
--- a/drivers/xen/xenbus/xenbus_probe.c
+++ b/drivers/xen/xenbus/xenbus_probe.c
@@ -755,7 +755,10 @@ int register_xenstore_notifier(struct notifier_block *nb)
755{ 755{
756 int ret = 0; 756 int ret = 0;
757 757
758 blocking_notifier_chain_register(&xenstore_chain, nb); 758 if (xenstored_ready > 0)
759 ret = nb->notifier_call(nb, 0, NULL);
760 else
761 blocking_notifier_chain_register(&xenstore_chain, nb);
759 762
760 return ret; 763 return ret;
761} 764}
@@ -769,7 +772,7 @@ EXPORT_SYMBOL_GPL(unregister_xenstore_notifier);
769 772
770void xenbus_probe(struct work_struct *unused) 773void xenbus_probe(struct work_struct *unused)
771{ 774{
772 BUG_ON((xenstored_ready <= 0)); 775 xenstored_ready = 1;
773 776
774 /* Enumerate devices in xenstore and watch for changes. */ 777 /* Enumerate devices in xenstore and watch for changes. */
775 xenbus_probe_devices(&xenbus_frontend); 778 xenbus_probe_devices(&xenbus_frontend);
@@ -835,8 +838,8 @@ static int __init xenbus_init(void)
835 xen_store_evtchn = xen_start_info->store_evtchn; 838 xen_store_evtchn = xen_start_info->store_evtchn;
836 xen_store_mfn = xen_start_info->store_mfn; 839 xen_store_mfn = xen_start_info->store_mfn;
837 xen_store_interface = mfn_to_virt(xen_store_mfn); 840 xen_store_interface = mfn_to_virt(xen_store_mfn);
841 xenstored_ready = 1;
838 } 842 }
839 xenstored_ready = 1;
840 } 843 }
841 844
842 /* Initialize the interface to xenstore. */ 845 /* Initialize the interface to xenstore. */